python_code
stringlengths 0
780k
| repo_name
stringlengths 7
38
| file_path
stringlengths 5
103
|
---|---|---|
# Copyright 2020 DeepMind Technologies Limited.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# https://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Graph Data Structures."""
from typing import Any, NamedTuple, Iterable, Mapping, Union, Optional
import jax.numpy as jnp
# As of 04/2020 pytype doesn't support recursive types.
# pytype: disable=not-supported-yet
ArrayTree = Union[jnp.ndarray, Iterable['ArrayTree'], Mapping[Any, 'ArrayTree']]
class GraphsTuple(NamedTuple):
"""An ordered collection of graphs in a sparse format.
The values of ``nodes``, ``edges`` and ``globals`` can be ``ArrayTrees`` -
nests of features with ``jax`` compatible values. For example, ``nodes`` in a
graph may have more than one type of attribute.
However, the GraphsTuple typically takes the following form for a batch of
`n` graphs:
- n_node: The number of nodes per graph. It is a vector of integers with shape
`[n_graphs]`, such that ``graph.n_node[i]`` is the number of nodes in the
i-th graph.
- n_edge: The number of edges per graph. It is a vector of integers with shape
`[n_graphs]`, such that ``graph.n_edge[i]`` is the number of edges in the
i-th graph.
- nodes: The nodes features. It is either ``None`` (the graph has no node
features), or a vector of shape `[n_nodes] + node_shape`, where
``n_nodes = sum(graph.n_node)`` is the total number of nodes in the batch of
graphs, and `node_shape` represents the shape of the features of each node.
The relative index of a node from the batched version can be recovered from
the ``graph.n_node`` property. For instance, the second node of the third
graph will have its features in the
`1 + graph.n_node[0] + graph.n_node[1]`-th slot of graph.nodes.
Observe that having a ``None`` value for this field does not mean that the
graphs have no nodes, only that they do not have node features.
- edges: The edges features. It is either ``None`` (the graph has no edge
features), or a vector of shape `[n_edges] + edge_shape`, where
``n_edges = sum(graph.n_edge)`` is the total number of edges in the batch of
graphs, and ``edge_shape`` represents the shape of the features of each
edge.
The relative index of an edge from the batched version can be recovered from
the ``graph.n_edge`` property. For instance, the third edge of the third
graph will have its features in the `2 + graph.n_edge[0] + graph.n_edge[1]`-
th slot of graph.edges.
Having a ``None`` value for this field does not necessarily mean that the
graph has no edges, only that they do not have edge features.
- receivers: The indices of the receiver nodes, for each edge. It is either
``None`` (if the graph has no edges), or a vector of integers of shape
`[n_edges]`, such that ``graph.receivers[i]`` is the index of the node
receiving from the i-th edge.
Observe that the index is absolute (in other words, cumulative), i.e.
``graphs.receivers`` take value in `[0, n_nodes]`. For instance, an edge
connecting the vertices with relative indices 2 and 3 in the second graph of
the batch would have a ``receivers`` value of `3 + graph.n_node[0]`.
If `graphs.receivers` is ``None``, then ``graphs.edges`` and
``graphs.senders`` should also be ``None``.
- senders: The indices of the sender nodes, for each edge. It is either
``None`` (if the graph has no edges), or a vector of integers of shape
`[n_edges]`, such that ``graph.senders[i]`` is the index of the node
sending from the i-th edge.
Observe that the index is absolute, i.e. ``graphs.senders`` take value in
`[0, n_nodes]`. For instance, an edge connecting the vertices with relative
indices 1 and 3 in the third graph of the batch would have a ``senders``
value of `1 + graph.n_node[0] + graph.n_node[1]`.
If ``graphs.senders`` is ``None``, then ``graphs.edges`` and
``graphs.receivers`` should also be ``None``.
- globals: The global features of the graph. It is either ``None`` (the graph
has no global features), or a vector of shape `[n_graphs] + global_shape`
representing graph level features.
"""
nodes: Optional[ArrayTree]
edges: Optional[ArrayTree]
receivers: Optional[jnp.ndarray] # with integer dtype
senders: Optional[jnp.ndarray] # with integer dtype
globals: Optional[ArrayTree]
n_node: jnp.ndarray # with integer dtype
n_edge: jnp.ndarray # with integer dtype
| jraph-master | jraph/_src/graph.py |
# Copyright 2020 DeepMind Technologies Limited.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# https://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utilities for working with `GraphsTuple`s."""
import functools
from typing import Any, Callable, Generator, Iterable, Iterator, List, Mapping, Optional, Sequence, Union
import jax
from jax import lax
import jax.numpy as jnp
import jax.tree_util as tree
from jraph._src import graph as gn_graph
import numpy as np
# As of 04/2020 pytype doesn't support recursive types.
# pytype: disable=not-supported-yet
ArrayTree = Union[jnp.ndarray, Iterable['ArrayTree'], Mapping[Any, 'ArrayTree']]
def segment_sum(data: jnp.ndarray,
segment_ids: jnp.ndarray,
num_segments: Optional[int] = None,
indices_are_sorted: bool = False,
unique_indices: bool = False):
"""Computes the sum within segments of an array.
Jraph alias to `jax.ops.segment_sum
<https://jax.readthedocs.io/en/latest/_autosummary/jax.ops.segment_sum.html>`_.
Note that other segment operations in jraph are not aliases, but are rather
defined inside the package. Similar to TensorFlow's `segment_sum
<https://www.tensorflow.org/api_docs/python/tf/math/segment_sum>`_.
Args:
data: an array with the values to be summed.
segment_ids: an array with integer dtype that indicates the segments of
`data` (along its leading axis) to be summed. Values can be repeated and
need not be sorted. Values outside of the range [0, num_segments) are
dropped and do not contribute to the sum.
num_segments: optional, an int with nonnegative value indicating the number
of segments. The default is set to be the minimum number of segments that
would support all indices in ``segment_ids``, calculated as
``max(segment_ids) + 1``. Since `num_segments` determines the size of the
output, a static value must be provided to use ``segment_sum`` in a
``jit``-compiled function.
indices_are_sorted: whether ``segment_ids`` is known to be sorted.
unique_indices: whether ``segment_ids`` is known to be free of duplicates.
Returns:
An array with shape :code:`(num_segments,) + data.shape[1:]` representing
the segment sums.
Examples:
Simple 1D segment sum:
>>> data = jnp.arange(5)
>>> segment_ids = jnp.array([0, 0, 1, 1, 2])
>>> segment_sum(data, segment_ids)
DeviceArray([1, 5, 4], dtype=int32)
Using JIT requires static `num_segments`:
>>> from jax import jit
>>> jit(segment_sum, static_argnums=2)(data, segment_ids, 3)
DeviceArray([1, 5, 4], dtype=int32)
"""
return jax.ops.segment_sum(
data=data,
segment_ids=segment_ids,
num_segments=num_segments,
indices_are_sorted=indices_are_sorted,
unique_indices=unique_indices)
def segment_mean(data: jnp.ndarray,
segment_ids: jnp.ndarray,
num_segments: Optional[int] = None,
indices_are_sorted: bool = False,
unique_indices: bool = False):
"""Returns mean for each segment.
Args:
data: the values which are averaged segment-wise.
segment_ids: indices for the segments.
num_segments: total number of segments.
indices_are_sorted: whether ``segment_ids`` is known to be sorted.
unique_indices: whether ``segment_ids`` is known to be free of duplicates.
"""
nominator = segment_sum(
data,
segment_ids,
num_segments,
indices_are_sorted=indices_are_sorted,
unique_indices=unique_indices)
denominator = segment_sum(
jnp.ones_like(data),
segment_ids,
num_segments,
indices_are_sorted=indices_are_sorted,
unique_indices=unique_indices)
return nominator / jnp.maximum(denominator,
jnp.ones(shape=[], dtype=denominator.dtype))
def segment_variance(data: jnp.ndarray,
segment_ids: jnp.ndarray,
num_segments: Optional[int] = None,
indices_are_sorted: bool = False,
unique_indices: bool = False):
"""Returns the variance for each segment.
Args:
data: values whose variance will be calculated segment-wise.
segment_ids: indices for segments
num_segments: total number of segments.
indices_are_sorted: whether ``segment_ids`` is known to be sorted.
unique_indices: whether ``segment_ids`` is known to be free of duplicates.
Returns:
num_segments size array containing the variance of each segment.
"""
means = segment_mean(
data,
segment_ids,
num_segments,
indices_are_sorted=indices_are_sorted,
unique_indices=unique_indices)[segment_ids]
counts = segment_sum(
jnp.ones_like(data),
segment_ids,
num_segments,
indices_are_sorted=indices_are_sorted,
unique_indices=unique_indices)
counts = jnp.maximum(counts, jnp.ones_like(counts))
variances = segment_sum(
jnp.power(data - means, 2),
segment_ids,
num_segments,
indices_are_sorted=indices_are_sorted,
unique_indices=unique_indices) / counts
return variances
def segment_normalize(data: jnp.ndarray,
segment_ids: jnp.ndarray,
num_segments: Optional[int] = None,
indices_are_sorted: bool = False,
unique_indices: bool = False,
eps=1e-8):
"""Normalizes data within each segment.
Args:
data: values whose z-score normalized values will be calculated.
segment-wise.
segment_ids: indices for segments.
num_segments: total number of segments.
indices_are_sorted: whether ``segment_ids`` is known to be sorted.
unique_indices: whether ``segment_ids`` is known to be free of duplicates.
eps: epsilon for numerical stability.
Returns:
array containing data normalized segment-wise.
"""
means = segment_mean(
data,
segment_ids,
num_segments,
indices_are_sorted=indices_are_sorted,
unique_indices=unique_indices)[segment_ids]
variances = segment_variance(
data,
segment_ids,
num_segments,
indices_are_sorted=indices_are_sorted,
unique_indices=unique_indices)[segment_ids]
normalized = (data - means) * lax.rsqrt(jnp.maximum(
variances, jnp.array(eps, dtype=variances.dtype)))
return normalized
def segment_max(data: jnp.ndarray,
segment_ids: jnp.ndarray,
num_segments: Optional[int] = None,
indices_are_sorted: bool = False,
unique_indices: bool = False):
"""Alias for jax.ops.segment_max.
Args:
data: an array with the values to be maxed over.
segment_ids: an array with integer dtype that indicates the segments of
`data` (along its leading axis) to be maxed over. Values can be repeated
and need not be sorted. Values outside of the range [0, num_segments) are
dropped and do not contribute to the result.
num_segments: optional, an int with positive value indicating the number of
segments. The default is ``jnp.maximum(jnp.max(segment_ids) + 1,
jnp.max(-segment_ids))`` but since `num_segments` determines the size of
the output, a static value must be provided to use ``segment_max`` in a
``jit``-compiled function.
indices_are_sorted: whether ``segment_ids`` is known to be sorted
unique_indices: whether ``segment_ids`` is known to be free of duplicates
Returns:
An array with shape ``(num_segments,) + data.shape[1:]`` representing
the segment maxs.
"""
return jax.ops.segment_max(data, segment_ids, num_segments,
indices_are_sorted, unique_indices)
def _replace_empty_segments_with_constant(aggregated_segments: jnp.ndarray,
segment_ids: jnp.ndarray,
num_segments: Optional[int] = None,
constant: float = 0.):
"""Replaces the values of empty segments with constants."""
result_shape = (len(segment_ids),) + aggregated_segments.shape[1:]
num_elements_in_segment = segment_sum(
jnp.ones(result_shape, dtype=jnp.int32),
segment_ids,
num_segments=num_segments)
return jnp.where(num_elements_in_segment > 0, aggregated_segments,
jnp.array(constant, dtype=aggregated_segments.dtype))
def segment_min_or_constant(data: jnp.ndarray,
segment_ids: jnp.ndarray,
num_segments: Optional[int] = None,
indices_are_sorted: bool = False,
unique_indices: bool = False,
constant: float = 0.):
"""As segment_min, but returns a constant for empty segments.
`segment_min` returns `-inf` for empty segments, which can cause `nan`s in the
backwards pass of a neural network, even with masking. This method overrides
the default behaviour of `segment_min` and returns a constant for empty
segments.
Args:
data: an array with the values to be maxed over.
segment_ids: an array with integer dtype that indicates the segments of
`data` (along its leading axis) to be maxed over. Values can be repeated
and need not be sorted. Values outside of the range [0, num_segments) are
dropped and do not contribute to the result.
num_segments: optional, an int with positive value indicating the number of
segments. The default is ``jnp.maximum(jnp.max(segment_ids) + 1,
jnp.max(-segment_ids))`` but since `num_segments` determines the size of
the output, a static value must be provided to use ``segment_min`` in a
``jit``-compiled function.
indices_are_sorted: whether ``segment_ids`` is known to be sorted
unique_indices: whether ``segment_ids`` is known to be free of duplicates
constant: The constant to replace empty segments with, defaults to zero.
Returns:
An array with shape ``(num_segments,) + data.shape[1:]`` representing
the segment maxs.
"""
mins_ = segment_min(data, segment_ids, num_segments, indices_are_sorted,
unique_indices)
return _replace_empty_segments_with_constant(mins_, segment_ids, num_segments,
constant)
def segment_max_or_constant(data: jnp.ndarray,
segment_ids: jnp.ndarray,
num_segments: Optional[int] = None,
indices_are_sorted: bool = False,
unique_indices: bool = False,
constant: float = 0.):
"""As segment_max, but returns a constant for empty segments.
`segment_max` returns `-inf` for empty segments, which can cause `nan`s in the
backwards pass of a neural network, even with masking. This method overrides
the default behaviour of `segment_max` and returns a constant for empty
segments.
Args:
data: an array with the values to be maxed over.
segment_ids: an array with integer dtype that indicates the segments of
`data` (along its leading axis) to be maxed over. Values can be repeated
and need not be sorted. Values outside of the range [0, num_segments) are
dropped and do not contribute to the result.
num_segments: optional, an int with positive value indicating the number of
segments. The default is ``jnp.maximum(jnp.max(segment_ids) + 1,
jnp.max(-segment_ids))`` but since `num_segments` determines the size of
the output, a static value must be provided to use ``segment_max`` in a
``jit``-compiled function.
indices_are_sorted: whether ``segment_ids`` is known to be sorted
unique_indices: whether ``segment_ids`` is known to be free of duplicates
constant: The constant to replace empty segments with, defaults to zero.
Returns:
An array with shape ``(num_segments,) + data.shape[1:]`` representing
the segment maxs.
"""
maxs_ = segment_max(data, segment_ids, num_segments, indices_are_sorted,
unique_indices)
return _replace_empty_segments_with_constant(maxs_, segment_ids, num_segments,
constant)
def segment_min(data: jnp.ndarray,
segment_ids: jnp.ndarray,
num_segments: Optional[int] = None,
indices_are_sorted: bool = False,
unique_indices: bool = False):
"""Computes the min within segments of an array.
Similar to TensorFlow's segment_min:
https://www.tensorflow.org/api_docs/python/tf/math/segment_min
Args:
data: an array with the values to be maxed over.
segment_ids: an array with integer dtype that indicates the segments of
`data` (along its leading axis) to be min'd over. Values can be repeated
and need not be sorted. Values outside of the range [0, num_segments) are
dropped and do not contribute to the result.
num_segments: optional, an int with positive value indicating the number of
segments. The default is ``jnp.maximum(jnp.max(segment_ids) + 1,
jnp.max(-segment_ids))`` but since `num_segments` determines the size of
the output, a static value must be provided to use ``segment_max`` in a
``jit``-compiled function.
indices_are_sorted: whether ``segment_ids`` is known to be sorted
unique_indices: whether ``segment_ids`` is known to be free of duplicates
Returns:
An array with shape ``(num_segments,) + data.shape[1:]`` representing
the segment mins.
"""
return jax.ops.segment_min(data, segment_ids, num_segments,
indices_are_sorted, unique_indices)
def segment_softmax(logits: jnp.ndarray,
segment_ids: jnp.ndarray,
num_segments: Optional[int] = None,
indices_are_sorted: bool = False,
unique_indices: bool = False) -> ArrayTree:
"""Computes a segment-wise softmax.
For a given tree of logits that can be divded into segments, computes a
softmax over the segments.
logits = jnp.ndarray([1.0, 2.0, 3.0, 1.0, 2.0])
segment_ids = jnp.ndarray([0, 0, 0, 1, 1])
segment_softmax(logits, segments)
>> DeviceArray([0.09003057, 0.24472848, 0.66524094, 0.26894142, 0.7310586],
>> dtype=float32)
Args:
logits: an array of logits to be segment softmaxed.
segment_ids: an array with integer dtype that indicates the segments of
`data` (along its leading axis) to be maxed over. Values can be repeated
and need not be sorted. Values outside of the range [0, num_segments) are
dropped and do not contribute to the result.
num_segments: optional, an int with positive value indicating the number of
segments. The default is ``jnp.maximum(jnp.max(segment_ids) + 1,
jnp.max(-segment_ids))`` but since ``num_segments`` determines the size of
the output, a static value must be provided to use ``segment_sum`` in a
``jit``-compiled function.
indices_are_sorted: whether ``segment_ids`` is known to be sorted
unique_indices: whether ``segment_ids`` is known to be free of duplicates
Returns:
The segment softmax-ed ``logits``.
"""
# First, subtract the segment max for numerical stability
maxs = segment_max(logits, segment_ids, num_segments, indices_are_sorted,
unique_indices)
logits = logits - maxs[segment_ids]
# Then take the exp
logits = jnp.exp(logits)
# Then calculate the normalizers
normalizers = segment_sum(logits, segment_ids, num_segments,
indices_are_sorted, unique_indices)
normalizers = normalizers[segment_ids]
softmax = logits / normalizers
return softmax
def partition_softmax(logits: ArrayTree,
partitions: jnp.ndarray,
sum_partitions: Optional[int] = None):
"""Compute a softmax within partitions of an array.
For example::
logits = jnp.ndarray([1.0, 2.0, 3.0, 1.0, 2.0])
partitions = jnp.ndarray([3, 2])
partition_softmax(node_logits, n_node)
>> DeviceArray(
>> [0.09003057, 0.24472848, 0.66524094, 0.26894142, 0.7310586],
>> dtype=float32)
Args:
logits: the logits for the softmax.
partitions: the number of nodes per graph. It is a vector of integers with
shape ``[n_graphs]``, such that ``graph.n_node[i]`` is the number of nodes
in the i-th graph.
sum_partitions: the sum of n_node. If not passed, the result of this method
is data dependent and so not ``jit``-able.
Returns:
The softmax over partitions.
"""
n_partitions = len(partitions)
segment_ids = jnp.repeat(
jnp.arange(n_partitions),
partitions,
axis=0,
total_repeat_length=sum_partitions)
return segment_softmax(
logits, segment_ids, n_partitions, indices_are_sorted=True)
def batch(graphs: Sequence[gn_graph.GraphsTuple]) -> gn_graph.GraphsTuple:
"""Returns a batched graph given a list of graphs.
This method will concatenate the ``nodes``, ``edges`` and ``globals``,
``n_node`` and ``n_edge`` of a sequence of ``GraphsTuple`` along axis 0. For
``senders`` and ``receivers``, offsets are computed so that connectivity
remains valid for the new node indices.
For example::
key = jax.random.PRNGKey(0)
graph_1 = GraphsTuple(nodes=jax.random.normal(key, (3, 64)),
edges=jax.random.normal(key, (5, 64)),
senders=jnp.array([0,0,1,1,2]),
receivers=[1,2,0,2,1],
n_node=jnp.array([3]),
n_edge=jnp.array([5]),
globals=jax.random.normal(key, (1, 64)))
graph_2 = GraphsTuple(nodes=jax.random.normal(key, (5, 64)),
edges=jax.random.normal(key, (10, 64)),
senders=jnp.array([0,0,1,1,2,2,3,3,4,4]),
receivers=jnp.array([1,2,0,2,1,0,2,1,3,2]),
n_node=jnp.array([5]),
n_edge=jnp.array([10]),
globals=jax.random.normal(key, (1, 64)))
batch = graph.batch([graph_1, graph_2])
batch.nodes.shape
>> (8, 64)
batch.edges.shape
>> (15, 64)
# Offsets computed on senders and receivers
batch.senders
>> DeviceArray([0, 0, 1, 1, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7], dtype=int32)
batch.receivers
>> DeviceArray([1, 2, 0, 2, 1, 4, 5, 3, 5, 4, 3, 5, 4, 6, 5], dtype=int32)
batch.n_node
>> DeviceArray([3, 5], dtype=int32)
batch.n_edge
>> DeviceArray([5, 10], dtype=int32)
If a ``GraphsTuple`` does not contain any graphs, it will be dropped from the
batch.
This method is not compilable because it is data dependent.
This functionality was implementation as ``utils_tf.concat`` in the
Tensorflow version of graph_nets.
Args:
graphs: sequence of ``GraphsTuple``s which will be batched into a single
graph.
"""
return _batch(graphs, np_=jnp)
def batch_np(graphs: Sequence[gn_graph.GraphsTuple]) -> gn_graph.GraphsTuple:
"""Numpy implementation of `batch`. See `batch` for more details."""
return _batch(graphs, np_=np)
def _batch(graphs, np_):
"""Returns batched graph given a list of graphs and a numpy-like module."""
# Calculates offsets for sender and receiver arrays, caused by concatenating
# the nodes arrays.
offsets = np_.cumsum(
np_.array([0] + [np_.sum(g.n_node) for g in graphs[:-1]]))
def _map_concat(nests):
concat = lambda *args: np_.concatenate(args)
return tree.tree_map(concat, *nests)
return gn_graph.GraphsTuple(
n_node=np_.concatenate([g.n_node for g in graphs]),
n_edge=np_.concatenate([g.n_edge for g in graphs]),
nodes=_map_concat([g.nodes for g in graphs]),
edges=_map_concat([g.edges for g in graphs]),
globals=_map_concat([g.globals for g in graphs]),
senders=np_.concatenate([g.senders + o for g, o in zip(graphs, offsets)]),
receivers=np_.concatenate(
[g.receivers + o for g, o in zip(graphs, offsets)]))
def unbatch(graph: gn_graph.GraphsTuple) -> List[gn_graph.GraphsTuple]:
"""Returns a list of graphs given a batched graph.
This function does not support jax.jit, because the shape of the output
is data-dependent!
Args:
graph: the batched graph, which will be unbatched into a list of graphs.
"""
return _unbatch(graph, np_=jnp)
def unbatch_np(graph: gn_graph.GraphsTuple) -> List[gn_graph.GraphsTuple]:
"""Numpy implementation of `unbatch`. See `unbatch` for more details."""
return _unbatch(graph, np_=np)
def _unbatch(graph: gn_graph.GraphsTuple, np_) -> List[gn_graph.GraphsTuple]:
"""Returns a list of graphs given a batched graph."""
def _map_split(nest, indices_or_sections):
"""Splits leaf nodes of nests and returns a list of nests."""
if isinstance(indices_or_sections, int):
n_lists = indices_or_sections
else:
n_lists = len(indices_or_sections) + 1
concat = lambda field: np_.split(field, indices_or_sections)
nest_of_lists = tree.tree_map(concat, nest)
# pylint: disable=cell-var-from-loop
list_of_nests = [
tree.tree_map(lambda _, x: x[i], nest, nest_of_lists)
for i in range(n_lists)
]
return list_of_nests
all_n_node = graph.n_node[:, None]
all_n_edge = graph.n_edge[:, None]
node_offsets = np_.cumsum(graph.n_node[:-1])
all_nodes = _map_split(graph.nodes, node_offsets)
edge_offsets = np_.cumsum(graph.n_edge[:-1])
all_edges = _map_split(graph.edges, edge_offsets)
all_globals = _map_split(graph.globals, len(graph.n_node))
all_senders = np_.split(graph.senders, edge_offsets)
all_receivers = np_.split(graph.receivers, edge_offsets)
# Corrects offset in the sender and receiver arrays, caused by splitting the
# nodes array.
n_graphs = graph.n_node.shape[0]
for graph_index in np_.arange(n_graphs)[1:]:
all_senders[graph_index] -= node_offsets[graph_index - 1]
all_receivers[graph_index] -= node_offsets[graph_index - 1]
return [
gn_graph.GraphsTuple._make(elements)
for elements in zip(all_nodes, all_edges, all_receivers, all_senders,
all_globals, all_n_node, all_n_edge)
]
def pad_with_graphs(graph: gn_graph.GraphsTuple,
n_node: int,
n_edge: int,
n_graph: int = 2) -> gn_graph.GraphsTuple:
"""Pads a ``GraphsTuple`` to size by adding computation preserving graphs.
The ``GraphsTuple`` is padded by first adding a dummy graph which contains the
padding nodes and edges, and then empty graphs without nodes or edges.
The empty graphs and the dummy graph do not interfer with the graphnet
calculations on the original graph, and so are computation preserving.
The padding graph requires at least one node and one graph.
This function does not support jax.jit, because the shape of the output
is data-dependent.
Args:
graph: ``GraphsTuple`` padded with dummy graph and empty graphs.
n_node: the number of nodes in the padded ``GraphsTuple``.
n_edge: the number of edges in the padded ``GraphsTuple``.
n_graph: the number of graphs in the padded ``GraphsTuple``. Default is 2,
which is the lowest possible value, because we always have at least one
graph in the original ``GraphsTuple`` and we need one dummy graph for the
padding.
Raises:
ValueError: if the passed ``n_graph`` is smaller than 2.
RuntimeError: if the given ``GraphsTuple`` is too large for the given
padding.
Returns:
A padded ``GraphsTuple``.
"""
if n_graph < 2:
raise ValueError(
f'n_graph is {n_graph}, which is smaller than minimum value of 2.')
graph = jax.device_get(graph)
pad_n_node = int(n_node - np.sum(graph.n_node))
pad_n_edge = int(n_edge - np.sum(graph.n_edge))
pad_n_graph = int(n_graph - graph.n_node.shape[0])
if pad_n_node <= 0 or pad_n_edge < 0 or pad_n_graph <= 0:
raise RuntimeError(
'Given graph is too large for the given padding. difference: '
f'n_node {pad_n_node}, n_edge {pad_n_edge}, n_graph {pad_n_graph}')
pad_n_empty_graph = pad_n_graph - 1
tree_nodes_pad = (
lambda leaf: np.zeros((pad_n_node,) + leaf.shape[1:], dtype=leaf.dtype))
tree_edges_pad = (
lambda leaf: np.zeros((pad_n_edge,) + leaf.shape[1:], dtype=leaf.dtype))
tree_globs_pad = (
lambda leaf: np.zeros((pad_n_graph,) + leaf.shape[1:], dtype=leaf.dtype))
padding_graph = gn_graph.GraphsTuple(
n_node=np.concatenate(
[np.array([pad_n_node], dtype=np.int32),
np.zeros(pad_n_empty_graph, dtype=np.int32)]),
n_edge=np.concatenate(
[np.array([pad_n_edge], dtype=np.int32),
np.zeros(pad_n_empty_graph, dtype=np.int32)]),
nodes=tree.tree_map(tree_nodes_pad, graph.nodes),
edges=tree.tree_map(tree_edges_pad, graph.edges),
globals=tree.tree_map(tree_globs_pad, graph.globals),
senders=np.zeros(pad_n_edge, dtype=np.int32),
receivers=np.zeros(pad_n_edge, dtype=np.int32),
)
return _batch([graph, padding_graph], np_=np)
def get_number_of_padding_with_graphs_graphs(
padded_graph: gn_graph.GraphsTuple) -> int:
"""Returns number of padding graphs in padded_graph.
Warning: This method only gives results for graphs that have been padded with
``pad_with_graphs``.
Args:
padded_graph: a ``GraphsTuple`` that has been padded with
``pad_with_graphs``.
Returns:
The number of padding graphs.
"""
# The first_padding graph always has at least one padding node, and
# all padding graphs that follow have 0 nodes. We can count how many
# trailing graphs have 0 nodes, and add one.
n_trailing_empty_padding_graphs = jnp.argmin(padded_graph.n_node[::-1] == 0)
return n_trailing_empty_padding_graphs + 1
def get_number_of_padding_with_graphs_nodes(
padded_graph: gn_graph.GraphsTuple) -> int:
"""Returns number of padding nodes in given padded_graph.
Warning: This method only gives results for graphs that have been padded with
``pad_with_graphs``.
Args:
padded_graph: a ``GraphsTuple`` that has been padded with
``pad_with_graphs``.
Returns:
The number of padding nodes.
"""
return padded_graph.n_node[
-get_number_of_padding_with_graphs_graphs(padded_graph)]
def get_number_of_padding_with_graphs_edges(
padded_graph: gn_graph.GraphsTuple) -> int:
"""Returns number of padding edges in given padded_graph.
Warning: This method only gives results for graphs that have been padded with
``pad_with_graphs``.
Args:
padded_graph: a ``GraphsTuple`` that has been padded with
``pad_with_graphs``.
Returns:
The number of padding edges.
"""
return padded_graph.n_edge[
-get_number_of_padding_with_graphs_graphs(padded_graph)]
def unpad_with_graphs(
padded_graph: gn_graph.GraphsTuple) -> gn_graph.GraphsTuple:
"""Unpads the given graph by removing the dummy graph and empty graphs.
This function assumes that the given graph was padded with the
``pad_with_graphs`` function.
This function does not support jax.jit, because the shape of the output
is data-dependent!
Args:
padded_graph: ``GraphsTuple`` padded with a dummy graph and empty graphs.
Returns:
The unpadded graph.
"""
n_padding_graph = get_number_of_padding_with_graphs_graphs(padded_graph)
n_padding_node = get_number_of_padding_with_graphs_nodes(padded_graph)
n_padding_edge = get_number_of_padding_with_graphs_edges(padded_graph)
def remove_edge_padding(edge_array):
if n_padding_edge == 0:
return edge_array
return edge_array[:-n_padding_edge]
unpadded_graph = gn_graph.GraphsTuple(
n_node=padded_graph.n_node[:-n_padding_graph],
n_edge=padded_graph.n_edge[:-n_padding_graph],
nodes=tree.tree_map(lambda x: x[:-n_padding_node], padded_graph.nodes),
edges=tree.tree_map(remove_edge_padding, padded_graph.edges),
globals=tree.tree_map(lambda x: x[:-n_padding_graph],
padded_graph.globals),
senders=remove_edge_padding(padded_graph.senders),
receivers=remove_edge_padding(padded_graph.receivers),
)
return unpadded_graph
def get_node_padding_mask(padded_graph: gn_graph.GraphsTuple) -> ArrayTree:
"""Returns a mask for the nodes of a padded graph.
Args:
padded_graph: ``GraphsTuple`` padded using ``pad_with_graphs``. This graph
must contain at least one array of node features so the total static
number of nodes can be inferred statically from the shape, and the method
can be jitted.
Returns:
Boolean array of shape [total_num_nodes] containing True for real nodes,
and False for padding nodes.
"""
n_padding_node = get_number_of_padding_with_graphs_nodes(padded_graph)
flat_node_features = tree.tree_leaves(padded_graph.nodes)
if not flat_node_features:
raise ValueError(
'`padded_graph` must have at least one array of node features')
total_num_nodes = flat_node_features[0].shape[0]
return _get_mask(padding_length=n_padding_node, full_length=total_num_nodes)
def get_edge_padding_mask(padded_graph: gn_graph.GraphsTuple) -> ArrayTree:
"""Returns a mask for the edges of a padded graph.
Args:
padded_graph: ``GraphsTuple`` padded using ``pad_with_graphs``.
Returns:
Boolean array of shape [total_num_edges] containing True for real edges,
and False for padding edges.
"""
n_padding_edge = get_number_of_padding_with_graphs_edges(padded_graph)
total_num_edges = padded_graph.senders.shape[0]
return _get_mask(padding_length=n_padding_edge, full_length=total_num_edges)
def get_graph_padding_mask(padded_graph: gn_graph.GraphsTuple) -> ArrayTree:
"""Returns a mask for the graphs of a padded graph.
Args:
padded_graph: ``GraphsTuple`` padded using ``pad_with_graphs``.
Returns:
Boolean array of shape [total_num_graphs] containing True for real graphs,
and False for padding graphs.
"""
n_padding_graph = get_number_of_padding_with_graphs_graphs(padded_graph)
total_num_graphs = padded_graph.n_node.shape[0]
return _get_mask(padding_length=n_padding_graph, full_length=total_num_graphs)
def _get_mask(padding_length, full_length):
valid_length = full_length - padding_length
return jnp.arange(full_length, dtype=jnp.int32) < valid_length
def concatenated_args(
update: Optional[Callable[..., ArrayTree]] = None,
*,
axis: int = -1
) -> Union[Callable[..., ArrayTree], Callable[[Callable[..., ArrayTree]],
ArrayTree]]:
"""Decorator that concatenates arguments before being passed to an update_fn.
By default node, edge and global features are passed separately to update
functions. However, it is common practice to concatenate these features before
passing them to a neural network. This wrapper concatenates the arguments
for you.
For example::
# Without the wrapper
def update_node_fn(nodes, receivers, globals):
return net(jnp.concatenate([nodes, receivers, globals], axis=1))
# With the wrapper
@concatenated_args
def update_node_fn(features):
return net(features)
Args:
update: an update function that takes ``jnp.ndarray``.
axis: the axis upon which to concatenate.
Returns:
A wrapped function with the arguments concatenated.
"""
def _decorate(f):
@functools.wraps(update)
def wrapper(*args, **kwargs):
combined_args = tree.tree_flatten(args)[0] + tree.tree_flatten(kwargs)[0]
concat_args = jnp.concatenate(combined_args, axis=axis)
return f(concat_args)
return wrapper
# If the update function is passed, then decorate the update function.
if update:
return _decorate(update)
# Otherwise, return the decorator.
return _decorate
def dtype_max_value(dtype):
if dtype.kind == 'f':
return jnp.inf
elif dtype.kind == 'i':
return jnp.iinfo(dtype).max
elif dtype.kind == 'b':
return True
else:
raise ValueError(f'Invalid data type {dtype.kind!r}.')
def dtype_min_value(dtype):
if dtype.kind == 'f':
return -jnp.inf
elif dtype.kind == 'i':
return jnp.iinfo(dtype).min
elif dtype.kind == 'b':
return False
else:
raise ValueError(f'Invalid data type {dtype.kind!r}.')
def get_fully_connected_graph(n_node_per_graph: int,
n_graph: int,
node_features: Optional[ArrayTree] = None,
global_features: Optional[ArrayTree] = None,
add_self_edges: bool = True):
"""Gets a fully connected graph given n_node_per_graph and n_graph.
This method is jittable.
Args:
n_node_per_graph: The number of nodes in each graph.
n_graph: The number of graphs in the `jraph.GraphsTuple`.
node_features: Optional node features.
global_features: Optional global features.
add_self_edges: Whether to add self edges to the graph.
Returns:
`jraph.GraphsTuple`
"""
if node_features is not None:
num_node_features = jax.tree_leaves(node_features)[0].shape[0]
if n_node_per_graph * n_graph != num_node_features:
raise ValueError(
'Number of nodes is not equal to num_nodes_per_graph * n_graph.')
if global_features is not None:
if n_graph != jax.tree_leaves(global_features)[0].shape[0]:
raise ValueError('The number of globals is not equal to n_graph.')
senders = []
receivers = []
n_edge = []
tmp_senders, tmp_receivers = jnp.meshgrid(
jnp.arange(n_node_per_graph), jnp.arange(n_node_per_graph))
if not add_self_edges:
tmp_senders = jax.vmap(jnp.roll)(tmp_senders,
-jnp.arange(len(tmp_senders)))[:, 1:]
tmp_receivers = tmp_receivers[:, 1:]
# Flatten the senders and receivers.
tmp_senders = tmp_senders.flatten()
tmp_receivers = tmp_receivers.flatten()
for graph_idx in range(n_graph):
offset = graph_idx * n_node_per_graph
senders.append(tmp_senders + offset)
receivers.append(tmp_receivers + offset)
n_edge.append(len(tmp_senders))
def _concat_or_empty_indices(indices_list):
if indices_list:
return jnp.concatenate(indices_list, axis=0)
else:
return jnp.array([], dtype=tmp_senders.dtype)
return gn_graph.GraphsTuple(
nodes=node_features,
edges=None,
n_node=jnp.array([n_node_per_graph] * n_graph),
n_edge=jnp.array(n_edge) if n_edge else jnp.array([0]),
senders=_concat_or_empty_indices(senders),
receivers=_concat_or_empty_indices(receivers),
globals=global_features,
)
_NUMBER_FIELDS = ('n_node', 'n_edge', 'n_graph')
def _get_graph_size(graphs_tuple):
n_node = np.sum(graphs_tuple.n_node)
n_edge = len(graphs_tuple.senders)
n_graph = len(graphs_tuple.n_node)
return n_node, n_edge, n_graph
def _is_over_batch_size(graph, graph_batch_size):
graph_size = _get_graph_size(graph)
return any([x > y for x, y in zip(graph_size, graph_batch_size)])
def dynamically_batch(
graphs_tuple_iterator: Iterator[gn_graph.GraphsTuple], n_node: int,
n_edge: int, n_graph: int) -> Generator[gn_graph.GraphsTuple, None, None]:
"""Dynamically batches trees with `jraph.GraphsTuples` up to specified sizes.
Elements of the `graphs_tuple_iterator` will be incrementally added to a batch
until the limits defined by `n_node`, `n_edge` and `n_graph` are reached. This
means each element yielded by this generator may have a differing number of
graphs in its batch.
Args:
graphs_tuple_iterator: An iterator of `jraph.GraphsTuples`.
n_node: The maximum number of nodes in a batch, at least the maximum sized
graph + 1.
n_edge: The maximum number of edges in a batch, at least the maximum sized
graph.
n_graph: The maximum number of graphs in a batch, at least 2.
Yields:
A `jraph.GraphsTuple` batch of graphs.
Raises:
ValueError: if the number of graphs is < 2.
RuntimeError: if the `graphs_tuple_iterator` contains elements which are not
`jraph.GraphsTuple`s.
RuntimeError: if a graph is found which is larger than the batch size.
"""
if n_graph < 2:
raise ValueError('The number of graphs in a batch size must be greater or '
f'equal to `2` for padding with graphs, got {n_graph}.')
valid_batch_size = (n_node - 1, n_edge, n_graph - 1)
accumulated_graphs = []
num_accumulated_nodes = 0
num_accumulated_edges = 0
num_accumulated_graphs = 0
for element in graphs_tuple_iterator:
element_nodes, element_edges, element_graphs = _get_graph_size(element)
if _is_over_batch_size(element, valid_batch_size):
# First yield the batched graph so far if exists.
if accumulated_graphs:
batched_graph = batch_np(accumulated_graphs)
yield pad_with_graphs(batched_graph, n_node, n_edge, n_graph)
# Then report the error.
graph_size = element_nodes, element_edges, element_graphs
graph_size = {k: v for k, v in zip(_NUMBER_FIELDS, graph_size)}
batch_size = {k: v for k, v in zip(_NUMBER_FIELDS, valid_batch_size)}
raise RuntimeError('Found graph bigger than batch size. Valid Batch '
f'Size: {batch_size}, Graph Size: {graph_size}')
# If this is the first element of the batch, set it and continue.
# Otherwise check if there is space for the graph in the batch:
# if there is, add it to the batch
# if there isn't, return the old batch and start a new batch.
if not accumulated_graphs:
accumulated_graphs = [element]
num_accumulated_nodes = element_nodes
num_accumulated_edges = element_edges
num_accumulated_graphs = element_graphs
continue
else:
if ((num_accumulated_graphs + element_graphs > n_graph - 1) or
(num_accumulated_nodes + element_nodes > n_node - 1) or
(num_accumulated_edges + element_edges > n_edge)):
batched_graph = batch_np(accumulated_graphs)
yield pad_with_graphs(batched_graph, n_node, n_edge, n_graph)
accumulated_graphs = [element]
num_accumulated_nodes = element_nodes
num_accumulated_edges = element_edges
num_accumulated_graphs = element_graphs
else:
accumulated_graphs.append(element)
num_accumulated_nodes += element_nodes
num_accumulated_edges += element_edges
num_accumulated_graphs += element_graphs
# We may still have data in batched graph.
if accumulated_graphs:
batched_graph = batch_np(accumulated_graphs)
yield pad_with_graphs(batched_graph, n_node, n_edge, n_graph)
def _expand_trailing_dimensions(array: jnp.ndarray,
template: jnp.ndarray) -> jnp.ndarray:
missing_dims = len(template.shape) - len(array.shape)
out = jnp.reshape(array, array.shape + (1,) * missing_dims)
assert out.dtype == array.dtype
return out
def _get_zero_fn(mask: jnp.ndarray) -> Callable[[jnp.ndarray], jnp.ndarray]:
return lambda x: _expand_trailing_dimensions(mask, x) * x
def zero_out_padding(graph: gn_graph.GraphsTuple) -> gn_graph.GraphsTuple:
"""Zeroes out padded graphs values.
Padded graphs can cause numeric overflow issues when a node has a large number
connecting edges, and the graph network has many layers. By inserting this
zeroing function between layers of the graph network these issues may be
addressed.
Zeroing is performed by multiplying a boolean mask with the corresponding
array of the input dtype, assuming correct type casting happens.
For example::
# Set up padded jraph.GraphsTuple & simple jraph.GraphNetwork.
key = jax.random.PRNGKey(0)
graph = jraph.GraphsTuple(nodes=jax.random.randint(key, (3,), 0, 1,
dtype=jnp.int32),
edges=jax.random.randint(key, (5,), 0, 1,
dtype=jnp.int32),
senders=jnp.array([0,0,1,1,2]),
receivers=[1,2,0,2,1],
n_node=jnp.array([3]),
n_edge=jnp.array([5]),
globals=jax.random.randint(key, (1,), 0, 128,
dtype=jnp.int32))
padded_graph = jraph.pad_with_graphs(graph, n_node=4, n_edge=100, n_graph=2)
def net_fn(graph: jraph.GraphsTuple) -> jraph.GraphsTuple:
embedder = jraph.GraphMapFeatures(
hk.Embed(2, 1), hk.Embed(2, 1), hk.Embed(2, 1))
update_fn = jraph.concatenated_args(lambda x: jnp.sum(x, -1,
keepdims=True))
net = jraph.GraphNetwork(
update_node_fn=update_fn,
update_edge_fn=update_fn,
update_global_fn=update_fn)
embedded = embedder(graph)
return net(embedded)
net = haiku.without_apply_rng(haiku.transform(net_fn))
params = net.init(key, padded_graph)
# Without zeroing.
net.apply(params, padded_graph).nodes
>> [[ -1.1949954]
[ -1.4456191]
[ -1.1949954]
[-88.505775 ]] # Very large activation in a single node.
# With zeroing.
zero_out_padding(net.apply(params, padded_graph)).nodes
>> [[ -1.1949954]
[ -1.4456191]
[ -1.1949954]
[-0. ]] # Zeroed out activation.
Args:
graph: A padded graph.
Returns:
A graph with the same valid values as input, but padded values zeroed out.
"""
edge_mask = get_edge_padding_mask(graph)
masked_edges = jax.tree_map(_get_zero_fn(edge_mask), graph.edges)
node_mask = get_node_padding_mask(graph)
masked_nodes = jax.tree_map(_get_zero_fn(node_mask), graph.nodes)
global_mask = get_graph_padding_mask(graph)
masked_globals = jax.tree_map(_get_zero_fn(global_mask), graph.globals)
return graph._replace(
nodes=masked_nodes, edges=masked_edges, globals=masked_globals)
def with_zero_out_padding_outputs(
graph_net: Callable[[gn_graph.GraphsTuple], gn_graph.GraphsTuple]
) -> Callable[[gn_graph.GraphsTuple], gn_graph.GraphsTuple]:
"""A wrapper for graph to graph functions that zeroes padded d output values.
See `zero_out_padding` for a full explanation of the method.
Args:
graph_net: A Graph Neural Network.
Returns:
A Graph Neural Network that will zero out all output padded values.
"""
@functools.wraps(graph_net)
def wrapper(graph: gn_graph.GraphsTuple) -> gn_graph.GraphsTuple:
return zero_out_padding(graph_net(graph))
return wrapper
def sparse_matrix_to_graphs_tuple(
senders: jnp.ndarray, receivers: jnp.ndarray,
values: jnp.ndarray, n_node: jnp.ndarray) -> gn_graph.GraphsTuple:
"""Creates a `jraph.GraphsTuple` from a sparse matrix in COO format.
Args:
senders: The row indices of the matrix.
receivers: The column indices of the matrix.
values: The values of the matrix.
n_node: The number of nodes in the graph defined by the sparse matrix.
Returns:
A `jraph.GraphsTuple` graph based on the sparse matrix.
"""
# To capture graph with no edges, otherwise np.repeat will raise an error.
values = np.array([0]) if values.size == 0 else values
n_edge = np.array([np.sum(values)])
senders = np.repeat(senders, values)
receivers = np.repeat(receivers, values)
return gn_graph.GraphsTuple(
nodes=None,
edges=None,
receivers=receivers,
senders=senders,
globals=None,
n_node=n_node,
n_edge=n_edge)
| jraph-master | jraph/_src/utils.py |
# Copyright 2020 DeepMind Technologies Limited.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# https://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
r"""Electronic Voting Example.
In this example we use DeepSets to estimate the winner of an election.
Each vote is represented by a one-hot encoded vector.
It goes without saying, but don't use this in a real election!
Seriously, don't!
"""
import collections
import logging
import random
from absl import app
import haiku as hk
import jax
import jax.numpy as jnp
import jraph
import numpy as np
import optax
Problem = collections.namedtuple("Problem", ("graph", "labels"))
def get_voting_problem(min_n_voters: int, max_n_voters: int) -> Problem:
"""Creates set of one-hot vectors representing a randomly generated election.
Args:
min_n_voters: minimum number of voters in the election.
max_n_voters: maximum number of voters in the election.
Returns:
set, one-hot vector encoding the winner.
"""
n_candidates = 20
n_voters = random.randint(min_n_voters, max_n_voters)
votes = np.random.randint(0, n_candidates, size=(n_voters,))
one_hot_votes = np.eye(n_candidates)[votes]
winner = np.argmax(np.sum(one_hot_votes, axis=0))
one_hot_winner = np.eye(n_candidates)[winner]
graph = jraph.GraphsTuple(
n_node=np.asarray([n_voters]),
n_edge=np.asarray([0]),
nodes=one_hot_votes,
edges=None,
globals=np.zeros((1, n_candidates)),
# There are no edges in our graph.
senders=np.array([], dtype=np.int32),
receivers=np.array([], dtype=np.int32))
# In order to jit compile our code, we have to pad the nodes and edges of
# the GraphsTuple to a static shape.
graph = jraph.pad_with_graphs(graph, max_n_voters+1, 0)
return Problem(graph=graph, labels=one_hot_winner)
def network_definition(
graph: jraph.GraphsTuple,
num_message_passing_steps: int = 1) -> jraph.ArrayTree:
"""Defines a graph neural network.
Args:
graph: Graphstuple the network processes.
num_message_passing_steps: number of message passing steps.
Returns:
globals.
"""
@jax.vmap
def update_fn(*args):
size = args[0].shape[-1]
return hk.nets.MLP([size, size])(jnp.concatenate(args, axis=-1))
for _ in range(num_message_passing_steps):
gn = jraph.DeepSets(
update_node_fn=update_fn,
update_global_fn=update_fn,
aggregate_nodes_for_globals_fn=jraph.segment_mean,
)
graph = gn(graph)
return hk.Linear(graph.globals.shape[-1])(graph.globals)
def train(num_steps: int):
"""Trains a graph neural network on an electronic voting problem."""
train_dataset = (2, 15)
test_dataset = (16, 20)
random.seed(42)
network = hk.without_apply_rng(hk.transform(network_definition))
problem = get_voting_problem(*train_dataset)
params = network.init(jax.random.PRNGKey(42), problem.graph)
@jax.jit
def prediction_loss(params, problem):
globals_ = network.apply(params, problem.graph)
# We interpret the globals as logits for the winner.
# Only the first graph is real, the second graph is for padding.
log_prob = jax.nn.log_softmax(globals_[0]) * problem.labels
return -jnp.sum(log_prob)
@jax.jit
def accuracy_loss(params, problem):
globals_ = network.apply(params, problem.graph)
# We interpret the globals as logits for the winner.
# Only the first graph is real, the second graph is for padding.
equal = jnp.argmax(globals_[0]) == jnp.argmax(problem.labels)
return equal.astype(np.int32)
opt_init, opt_update = optax.adam(2e-4)
opt_state = opt_init(params)
@jax.jit
def update(params, opt_state, problem):
g = jax.grad(prediction_loss)(params, problem)
updates, opt_state = opt_update(g, opt_state)
return optax.apply_updates(params, updates), opt_state
for step in range(num_steps):
problem = get_voting_problem(*train_dataset)
params, opt_state = update(params, opt_state, problem)
if step % 1000 == 0:
train_loss = jnp.mean(
jnp.asarray([
accuracy_loss(params, get_voting_problem(*train_dataset))
for _ in range(100)
])).item()
test_loss = jnp.mean(
jnp.asarray([
accuracy_loss(params, get_voting_problem(*test_dataset))
for _ in range(100)
])).item()
logging.info("step %r loss train %r test %r", step, train_loss, test_loss)
def main(argv):
if len(argv) > 1:
raise app.UsageError("Too many command-line arguments.")
train(num_steps=100000)
if __name__ == "__main__":
app.run(main)
| jraph-master | jraph/examples/e_voting.py |
# Copyright 2020 DeepMind Technologies Limited.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# https://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Example of a Hamiltonian Graph Network (https://arxiv.org/abs/1909.12790).
In this example:
* The `GraphNetwork`s implements the hardcoded formulas of the Hooke's
Hamiltonian to return the scalar hamiltonian of a system
(see `hookes_hamiltonian_from_graph_fn`).
* Then JAX autodiff is used to obtain the function of derivatives of the state
via Hamilton equations(see `get_state_derivatives_from_hamiltonian_fn`).
* The function of the derivatives of the state is used by a generic integrator
to simulate steps forward in time (see `single_integration_step`).
* `build_hookes_particle_state_graph` is used to sample the initial state
of the system.
Note this example does not implement a learned Hamiltonian Graph Network, but
a hardcoded Hamiltonian function corresponding to the Hooke's potential,
implemented as a `GraphNetwork`. This is to show how natural is to express the
true Hamiltonian of a particle system as a Graph Network. However, the only
system-specific code here is the hard coded Hamiltonian in
`hookes_hamiltonian_from_graph_fn` and the data generation provided in
`build_hookes_particle_state_graph`. Everything is reusable for any other
system.
To implement a learned Hamiltonian Graph Network, one could closely follow
`hookes_hamiltonian_from_graph_fn` using function approximators (e.g. MLP) in
`edge_update_fn`, `node_update_fn` and `global_update_fn` that take as inputs
the concatenated features (e.g. using `jraph.concatenated_args`), with the only
condition that the `global_update_fn` has an output size of 1, to match
the expected output size for a Hamiltonian. Then the learned Hamiltonian Graph
Network would be trained by simply adding a loss term in the position / momentum
of the next step after applying the integrator.
Note it is recommended to use immutable container types to store nested edge,
node and global features to avoid unwanted side effects. In this example we
use `frozendict`s, which we register with `jax.tree_util`.
"""
import functools
from typing import Tuple, Callable
from absl import app
from frozendict import frozendict
import jax
import jax.numpy as jnp
import jraph
import matplotlib.pyplot as plt
import numpy as np
# Tell tree_util how to navigate frozendicts.
jax.tree_util.register_pytree_node(
frozendict,
flatten_func=lambda s: (tuple(s.values()), tuple(s.keys())),
unflatten_func=lambda k, xs: frozendict(zip(k, xs)))
def hookes_hamiltonian_from_graph_fn(
graph: jraph.GraphsTuple) -> jraph.GraphsTuple:
"""Computes Hamiltonian of a Hooke's potential system represented in a graph.
While this function hardcodes the Hamiltonian for a Hooke's potential, a
learned Hamiltonian Graph Network (https://arxiv.org/abs/1909.12790) could
be implemented by replacing the hardcoded formulas by learnable MLPs that
take as inputs all of the concatenated features to the edge_fn, node_fn,
and global_fn, and outputs a single scalar value in the global_fn.
Args:
graph: `GraphsTuple` where the nodes contain:
- "mass": [num_particles]
- "position": [num_particles, num_dims]
- "momentum": [num_particles, num_dims]
and the edges contain:
- "spring_constant": [num_interations]
Returns:
`GraphsTuple` with features:
- edge features: "hookes_potential" [num_interactions]
- node features: "kinetic_energy" [num_particles]
- global features: "hamiltonian" [batch_size]
"""
def update_edge_fn(edges, senders, receivers, globals_):
del globals_
distance = jnp.linalg.norm(senders["position"] - receivers["position"])
hookes_potential_per_edge = 0.5 * edges["spring_constant"] * distance ** 2
return frozendict({"hookes_potential": hookes_potential_per_edge})
def update_node_fn(nodes, sent_edges, received_edges, globals_):
del sent_edges, received_edges, globals_
momentum_norm = jnp.linalg.norm(nodes["momentum"])
kinetic_energy_per_node = momentum_norm ** 2 / (2 * nodes["mass"])
return frozendict({"kinetic_energy": kinetic_energy_per_node})
def update_global_fn(nodes, edges, globals_):
del globals_
# At this point we will receive node and edge features aggregated (summed)
# for all nodes and edges in each graph.
hamiltonian_per_graph = nodes["kinetic_energy"] + edges["hookes_potential"]
return frozendict({"hamiltonian": hamiltonian_per_graph})
gn = jraph.GraphNetwork(
update_edge_fn=update_edge_fn,
update_node_fn=update_node_fn,
update_global_fn=update_global_fn)
return gn(graph)
# Methods for generating the data.
def build_hookes_particle_state_graph(num_particles: int) -> jraph.GraphsTuple:
"""Generates a graph representing a Hooke's system in a random state."""
mass = np.random.uniform(0, 5, [num_particles])
velocity = get_random_uniform_norm2d_vectors(0, 0.1, num_particles)
position = get_random_uniform_norm2d_vectors(0, 1, num_particles)
momentum = velocity * np.expand_dims(mass, axis=-1)
# Remove average momentum, so center of mass does not move.
momentum = momentum - momentum.mean(0, keepdims=True)
# Connect all particles to all particles.
particle_indices = np.arange(num_particles)
senders, receivers = np.meshgrid(particle_indices, particle_indices)
senders, receivers = senders.flatten(), receivers.flatten()
# Generate a symmetric random matrix of spring constants.
# Generate random elements stringly in the lower triangular part.
spring_constants = np.random.uniform(
1e-2, 1e-1, [num_particles, num_particles])
spring_constants = np.tril(
spring_constants) + np.tril(spring_constants, -1).T
spring_constants = spring_constants.flatten()
# Remove interactions of particles to themselves.
mask = senders != receivers
senders, receivers = senders[mask], receivers[mask]
spring_constants = spring_constants[mask]
num_interactions = receivers.shape[0]
return jraph.GraphsTuple(
n_node=np.asarray([num_particles]),
n_edge=np.asarray([num_interactions]),
nodes={
"mass": mass, # Scalar mass for each particle.
"position": position, # 2d position for each particle.
"momentum": momentum, # 2d momentum for each particle.
},
edges={
# Scalar spring constant for each interaction
"spring_constant": spring_constants,
},
globals={},
senders=senders,
receivers=receivers)
def get_random_uniform_norm2d_vectors(
min_norm: float, max_norm: float, num_particles: int) -> np.ndarray:
"""Returns 2-d vectors with random norms."""
norm = np.random.uniform(min_norm, max_norm, [num_particles, 1])
angle = np.random.uniform(0, 2*np.pi, [num_particles])
return norm * np.stack([np.cos(angle), np.sin(angle)], axis=-1)
def get_fully_connected_senders_and_receivers(
num_particles: int, self_edges: bool = False,
) -> Tuple[np.ndarray, np.ndarray]:
"""Returns senders and receivers for fully connected particles."""
particle_indices = np.arange(num_particles)
senders, receivers = np.meshgrid(particle_indices, particle_indices)
senders, receivers = senders.flatten(), receivers.flatten()
if not self_edges:
mask = senders != receivers
senders, receivers = senders[mask], receivers[mask]
return senders, receivers
# All code below here is general purpose for any system or integrator.
# Utility methods for getting/setting the state of the particles in the graph
# (position and momentum), and for obtaining the static part of the graph
# (connectivity and particle parameters: masses, spring constants).
def set_system_state(
static_graph: jraph.GraphsTuple,
position: np.ndarray,
momentum: np.ndarray) -> jraph.GraphsTuple:
"""Sets the non-static parameters of the graph (momentum, position)."""
nodes = static_graph.nodes.copy(position=position, momentum=momentum)
return static_graph._replace(nodes=nodes)
def get_system_state(graph: jraph.GraphsTuple) -> Tuple[np.ndarray, np.ndarray]:
return graph.nodes["position"], graph.nodes["momentum"]
def get_static_graph(graph: jraph.GraphsTuple) -> jraph.GraphsTuple:
"""Returns the graph with the static parts of a system only."""
nodes = dict(graph.nodes)
del nodes["position"], nodes["momentum"]
return graph._replace(nodes=frozendict(nodes))
# Utility methods to operate with Hamiltonian functions.
def get_hamiltonian_from_state_fn(
static_graph: jraph.GraphsTuple,
hamiltonian_from_graph_fn: Callable[[jraph.GraphsTuple], jraph.GraphsTuple],
) -> Callable[[np.ndarray, np.ndarray], float]:
"""Returns fn such that fn(position, momentum) -> scalar Hamiltonian.
Args:
static_graph: `GraphsTuple` containing per-particle static parameters and
connectivity, such as a full graph of the state can be build by calling
`set_system_state(static_graph, position, momentum)`.
hamiltonian_from_graph_fn: callable that given an input `GraphsTuple`
returns a `GraphsTuple` with a "hamiltonian" field in the globals.
Returns:
Function that given a state (position, momentum) returns the scalar
Hamiltonian.
"""
def hamiltonian_from_state_fn(position, momentum):
# Note we sum along the batch dimension to get the total energy in the batch
# so get can easily get the gradient.
graph = set_system_state(static_graph, position, momentum)
output_graph = hamiltonian_from_graph_fn(graph)
return output_graph.globals["hamiltonian"].sum()
return hamiltonian_from_state_fn
def get_state_derivatives_from_hamiltonian_fn(
hamiltonian_from_state_fn: Callable[[np.ndarray, np.ndarray], float],
) -> Callable[[np.ndarray, np.ndarray], Tuple[np.ndarray, np.ndarray]]:
"""Returns fn(position, momentum, ...) -> (dposition_dt, dmomentum_dt).
Args:
hamiltonian_from_state_fn: Function that given a state
(position, momentum) returns the scalar Hamiltonian.
Returns:
Function that given a state (position, momentum) returns the time
derivatives of the state (dposition_dt, dmomentum_dt) by applying
Hamilton equations.
"""
hamiltonian_gradients_fn = jax.grad(hamiltonian_from_state_fn, argnums=[0, 1])
def state_derivatives_from_hamiltonian_fn(
position: np.ndarray, momentum: np.ndarray
) -> Tuple[np.ndarray, np.ndarray]:
# Take the derivatives against position and momentum.
dh_dposition, dh_dmomentum = hamiltonian_gradients_fn(position, momentum)
# Hamilton equations.
dposition_dt = dh_dmomentum
dmomentum_dt = - dh_dposition
return dposition_dt, dmomentum_dt
return state_derivatives_from_hamiltonian_fn
# Implementations of some general purpose integrators for Hamiltonian states.
StateDerivativesFnType = Callable[
[np.ndarray, np.ndarray], Tuple[np.ndarray, np.ndarray]]
def abstract_integrator(
position: np.ndarray, momentum: np.ndarray, time_step: float,
state_derivatives_fn: StateDerivativesFnType,
) -> Tuple[np.ndarray, np.ndarray]:
"""Signature of an abstract integrator.
An integrator is a function, that given the the current state, a time step,
and a `state_derivatives_fn` returns the next state.
Args:
position: array with the position at time t.
momentum: array with the momentum at time t.
time_step: integration step size.
state_derivatives_fn: a function fn, that returns time derivatives of a
state such fn(position, momentum) -> (dposition_dt, dmomentum_dt)
where dposition_dt, dmomentum_dt, have the same shapes as
position, momentum.
Returns:
Tuple with position and momentum at time `t + time_step`.
"""
raise NotImplementedError("Abstract integrator")
def euler_integrator(
position: np.ndarray, momentum: np.ndarray, time_step: float,
state_derivatives_fn: StateDerivativesFnType,
) -> Tuple[np.ndarray, np.ndarray]:
"""Implementation of an Euler integrator (see `abstract_integrator`)."""
dposition_dt, dmomentum_dt = state_derivatives_fn(position, momentum)
next_position = position + dposition_dt * time_step
next_momentum = momentum + dmomentum_dt * time_step
return next_position, next_momentum
def verlet_integrator(
position: np.ndarray, momentum: np.ndarray, time_step: float,
state_derivatives_fn: StateDerivativesFnType,
) -> Tuple[np.ndarray, np.ndarray]:
"""Implementation of Verlet integrator (see `abstract_integrator`)."""
_, dmomentum_dt = state_derivatives_fn(position, momentum)
aux_momentum = momentum + dmomentum_dt * time_step / 2
dposition_dt, _ = state_derivatives_fn(position, aux_momentum)
next_position = position + dposition_dt * time_step
_, dmomentum_dt = state_derivatives_fn(next_position, aux_momentum)
next_momentum = aux_momentum + dmomentum_dt * time_step / 2
return next_position, next_momentum
# Single graph -> graph integration step.
IntegratorType = Callable[
[np.ndarray, np.ndarray, float, StateDerivativesFnType],
Tuple[np.ndarray, np.ndarray]
]
def single_integration_step(
graph: jraph.GraphsTuple, time_step: float,
integrator_fn: IntegratorType,
hamiltonian_from_graph_fn: Callable[[jraph.GraphsTuple], jraph.GraphsTuple],
) -> Tuple[float, jraph.GraphsTuple]:
"""Updates a graph state integrating by a single step.
Args:
graph: `GraphsTuple` representing a system state at time t.
time_step: size of the timestep to integrate for.
integrator_fn: Integrator to use. A function fn such that
fn(position_t, momentum_t, time_step, state_derivatives_fn) ->
(position_tp1, momentum_tp1)
hamiltonian_from_graph_fn: Function that given a `GraphsTuple`, returns
another one with a "hamiltonian" global field.
Returns:
`GraphsTuple` representing a system state at time `t + time_step`.
"""
# Template graph with particle/interactions parameters and connectiviity
# but without the state (position/momentum).
static_graph = get_static_graph(graph)
# Get the Hamiltonian function, and the function that returns the state
# derivatives.
hamiltonian_fn = get_hamiltonian_from_state_fn(
static_graph=static_graph,
hamiltonian_from_graph_fn=hamiltonian_from_graph_fn)
state_derivatives_fn = get_state_derivatives_from_hamiltonian_fn(
hamiltonian_fn)
# Get the current state.
position, momentum = get_system_state(graph)
# Calling the integrator to get the next state.
next_position, next_momentum = integrator_fn(
position, momentum, time_step, state_derivatives_fn)
next_graph = set_system_state(static_graph, next_position, next_momentum)
# Return the energy of the next state too for plotting.
energy = hamiltonian_fn(next_position, next_momentum)
return energy, next_graph
def main(_):
# Get a state function and jit it.
# We could switch to any other Hamiltonian and any other integrator here.
# e.g. the non-symplectic `euler_integrator`.
step_fn = functools.partial(
single_integration_step,
hamiltonian_from_graph_fn=hookes_hamiltonian_from_graph_fn,
integrator_fn=verlet_integrator)
step_fn = jax.jit(step_fn)
# Get a graph with the initial state.
num_particles = 10
graph = build_hookes_particle_state_graph(num_particles)
# Iterate for multiple timesteps.
num_steps = 200
time_step = 0.002
positions_sequence = []
total_energies = []
steps = []
for step_i in range(num_steps):
energy, graph = step_fn(graph, time_step)
total_energies.append(energy)
positions_sequence.append(graph.nodes["position"])
steps.append(step_i + 1)
# Plot results (positions and energy as a function of time).
unused_fig, axes = plt.subplots(1, 2, figsize=(15, 5))
positions_sequence_array = np.stack(positions_sequence, axis=0)
axes[0].plot(positions_sequence_array[..., 0],
positions_sequence_array[..., -1])
axes[0].set_xlabel("Particle position x")
axes[0].set_ylabel("Particle position y")
axes[1].plot(steps, total_energies)
axes[1].set_ylim(0, max(total_energies)*1.2)
axes[1].set_xlabel("Simulation step")
axes[1].set_ylabel("Total energy")
plt.show()
if __name__ == "__main__":
app.run(main)
| jraph-master | jraph/examples/hamiltonian_graph_network.py |
# Copyright 2020 DeepMind Technologies Limited.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# https://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
r"""Higgs Boson Detection Example.
One of the decay-channels of the Higgs Boson is Higgs to two photons.
The two photons must have a combined invariant mass 125 GeV.
In this example we use a relational vector to detect if a Higgs Boson
is present in a set of photons.
There are two situations:
a) Higgs: Two photons with an invariant mass of 125 GeV + an arbitrary number of
uncorrelated photons.
b) No Higgs: Just an arbitrary number of uncorrelation photons.
"""
import collections
import logging
import random
from absl import app
import haiku as hk
import jax
import jax.numpy as jnp
import jraph
import numpy as np
import optax
import scipy.stats
Problem = collections.namedtuple("Problem", ("graph", "labels"))
def get_random_rotation_matrix():
rotation = np.eye(4)
rotation[1:, 1:] = scipy.stats.ortho_group.rvs(3)
return rotation
def get_random_boost_matrix():
eta = np.random.uniform(-1, 1)
boost = np.eye(4)
boost[:2, :2] = np.array([[np.cosh(eta), -np.sinh(eta)],
[-np.sinh(eta), np.cosh(eta)]])
rotation = get_random_rotation_matrix()
return rotation.T @ boost @ rotation
def get_random_higgs_photons():
higgs = 125.18
boost = get_random_boost_matrix()
rotation = get_random_rotation_matrix()
photon1 = boost @ rotation @ np.array([higgs / 2, higgs / 2, 0, 0])
photon2 = boost @ rotation @ np.array([higgs / 2, -higgs / 2, 0, 0])
return photon1, photon2
def get_random_background_photon():
boost = get_random_boost_matrix()
rotation = get_random_rotation_matrix()
energy = np.random.uniform(20, 120)
return boost @ rotation @ np.array([energy, energy, 0, 0])
def get_higgs_problem(min_n_photons: int, max_n_photons: int) -> Problem:
"""Creates fully connected graph containing the detected photons.
Args:
min_n_photons: minimum number of photons in the detector.
max_n_photons: maximum number of photons in the detector.
Returns:
graph, one-hot label whether a higgs was present or not.
"""
assert min_n_photons >= 2, "Number of photons must be at least 2."
n_photons = random.randint(min_n_photons, max_n_photons)
photons = np.stack([get_random_background_photon() for _ in range(n_photons)])
# Add a higgs
if random.random() > 0.5:
label = np.eye(2)[0]
photons[:2] = np.stack(get_random_higgs_photons())
else:
label = np.eye(2)[1]
# The graph is fully connected.
senders = np.repeat(np.arange(n_photons), n_photons)
receivers = np.tile(np.arange(n_photons), n_photons)
graph = jraph.GraphsTuple(
n_node=np.asarray([n_photons]),
n_edge=np.asarray([len(senders)]),
nodes=photons,
edges=None,
globals=None,
senders=senders,
receivers=receivers)
# In order to jit compile our code, we have to pad the nodes and edges of
# the GraphsTuple to a static shape.
graph = jraph.pad_with_graphs(graph, max_n_photons + 1,
max_n_photons * max_n_photons)
return Problem(graph=graph, labels=label)
def network_definition(
graph: jraph.GraphsTuple) -> jraph.ArrayTree:
"""Defines a graph neural network.
Args:
graph: Graphstuple the network processes.
Returns:
globals.
"""
@jax.vmap
@jraph.concatenated_args
def update_edge_fn(features):
return hk.nets.MLP([30, 30, 30])(features)
# The correct solution for the edge update function is the invariant mass
# of the photon pair.
# The simple MLP we use here seems to fail to find the correct solution.
# You can ensure that the example works in principle by replacing the
# update_edge_fn below with the following analytical solution.
@jax.vmap
def unused_update_edge_fn_solution(s, r):
"""Calculates invariant mass of photon pair and compares to Higgs mass."""
t = (s + r)**2
return jnp.array(jnp.abs(t[0] - t[1] - t[2] - t[3] - 125.18**2) < 1,
dtype=jnp.float32)[None]
gn = jraph.RelationNetwork(
update_edge_fn=update_edge_fn,
update_global_fn=hk.nets.MLP([2]),
aggregate_edges_for_globals_fn=jraph.segment_sum,
)
graph = gn(graph)
return graph.globals
def train(num_steps: int):
"""Trains a graph neural network on an electronic voting problem."""
train_dataset = (2, 15)
test_dataset = (16, 20)
random.seed(42)
network = hk.without_apply_rng(hk.transform(network_definition))
problem = get_higgs_problem(*train_dataset)
params = network.init(jax.random.PRNGKey(42), problem.graph)
@jax.jit
def prediction_loss(params, problem):
globals_ = network.apply(params, problem.graph)
# We interpret the globals as logits for the detection.
# Only the first graph is real, the second graph is for padding.
log_prob = jax.nn.log_softmax(globals_[0]) * problem.labels
return -jnp.sum(log_prob)
@jax.jit
def accuracy_loss(params, problem):
globals_ = network.apply(params, problem.graph)
# We interpret the globals as logits for the detection.
# Only the first graph is real, the second graph is for padding.
equal = jnp.argmax(globals_[0]) == jnp.argmax(problem.labels)
return equal.astype(np.int32)
opt_init, opt_update = optax.adam(2e-4)
opt_state = opt_init(params)
@jax.jit
def update(params, opt_state, problem):
g = jax.grad(prediction_loss)(params, problem)
updates, opt_state = opt_update(g, opt_state)
return optax.apply_updates(params, updates), opt_state
for step in range(num_steps):
problem = get_higgs_problem(*train_dataset)
params, opt_state = update(params, opt_state, problem)
if step % 1000 == 0:
train_loss = jnp.mean(
jnp.asarray([
accuracy_loss(params, get_higgs_problem(*train_dataset))
for _ in range(100)
])).item()
test_loss = jnp.mean(
jnp.asarray([
accuracy_loss(params, get_higgs_problem(*test_dataset))
for _ in range(100)
])).item()
logging.info("step %r loss train %r test %r", step, train_loss, test_loss)
def main(argv):
if len(argv) > 1:
raise app.UsageError("Too many command-line arguments.")
train(num_steps=10000)
if __name__ == "__main__":
app.run(main)
| jraph-master | jraph/examples/higgs_detection.py |
# Copyright 2020 DeepMind Technologies Limited.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# https://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
r"""Zachary's karate club example.
Here we train a graph neural network to process Zachary's karate club.
https://en.wikipedia.org/wiki/Zachary%27s_karate_club
Zachary's karate club is used in the literature as an example of a social graph.
Here we use a graphnet to optimize the assignments of the students in the
karate club to two distinct karate instructors (Mr. Hi and John A).
"""
import logging
from absl import app
import haiku as hk
import jax
import jax.numpy as jnp
import jraph
import optax
def get_zacharys_karate_club() -> jraph.GraphsTuple:
"""Returns GraphsTuple representing Zachary's karate club."""
social_graph = [
(1, 0), (2, 0), (2, 1), (3, 0), (3, 1), (3, 2),
(4, 0), (5, 0), (6, 0), (6, 4), (6, 5), (7, 0), (7, 1),
(7, 2), (7, 3), (8, 0), (8, 2), (9, 2), (10, 0), (10, 4),
(10, 5), (11, 0), (12, 0), (12, 3), (13, 0), (13, 1), (13, 2),
(13, 3), (16, 5), (16, 6), (17, 0), (17, 1), (19, 0), (19, 1),
(21, 0), (21, 1), (25, 23), (25, 24), (27, 2), (27, 23),
(27, 24), (28, 2), (29, 23), (29, 26), (30, 1), (30, 8),
(31, 0), (31, 24), (31, 25), (31, 28), (32, 2), (32, 8),
(32, 14), (32, 15), (32, 18), (32, 20), (32, 22), (32, 23),
(32, 29), (32, 30), (32, 31), (33, 8), (33, 9), (33, 13),
(33, 14), (33, 15), (33, 18), (33, 19), (33, 20), (33, 22),
(33, 23), (33, 26), (33, 27), (33, 28), (33, 29), (33, 30),
(33, 31), (33, 32)]
# Add reverse edges.
social_graph += [(edge[1], edge[0]) for edge in social_graph]
n_club_members = 34
return jraph.GraphsTuple(
n_node=jnp.asarray([n_club_members]),
n_edge=jnp.asarray([len(social_graph)]),
# One-hot encoding for nodes.
nodes=jnp.eye(n_club_members),
# No edge features.
edges=None,
globals=None,
senders=jnp.asarray([edge[0] for edge in social_graph]),
receivers=jnp.asarray([edge[1] for edge in social_graph]))
def get_ground_truth_assignments_for_zacharys_karate_club() -> jnp.ndarray:
"""Returns ground truth assignments for Zachary's karate club."""
return jnp.array([0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 1, 0, 0, 1,
0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1])
def network_definition(graph: jraph.GraphsTuple) -> jraph.ArrayTree:
"""Implements the GCN from Kipf et al https://arxiv.org/pdf/1609.02907.pdf.
A' = D^{-0.5} A D^{-0.5}
Z = f(X, A') = A' relu(A' X W_0) W_1
Args:
graph: GraphsTuple the network processes.
Returns:
processed nodes.
"""
gn = jraph.GraphConvolution(
update_node_fn=hk.Linear(5, with_bias=False),
add_self_edges=True)
graph = gn(graph)
graph = graph._replace(nodes=jax.nn.relu(graph.nodes))
gn = jraph.GraphConvolution(
update_node_fn=hk.Linear(2, with_bias=False))
graph = gn(graph)
return graph.nodes
def optimize_club(num_steps: int):
"""Solves the karte club problem by optimizing the assignments of students."""
network = hk.without_apply_rng(hk.transform(network_definition))
zacharys_karate_club = get_zacharys_karate_club()
labels = get_ground_truth_assignments_for_zacharys_karate_club()
params = network.init(jax.random.PRNGKey(42), zacharys_karate_club)
@jax.jit
def prediction_loss(params):
decoded_nodes = network.apply(params, zacharys_karate_club)
# We interpret the decoded nodes as a pair of logits for each node.
log_prob = jax.nn.log_softmax(decoded_nodes)
# The only two assignments we know a-priori are those of Mr. Hi (Node 0)
# and John A (Node 33).
return -(log_prob[0, 0] + log_prob[33, 1])
opt_init, opt_update = optax.adam(1e-2)
opt_state = opt_init(params)
@jax.jit
def update(params, opt_state):
g = jax.grad(prediction_loss)(params)
updates, opt_state = opt_update(g, opt_state)
return optax.apply_updates(params, updates), opt_state
@jax.jit
def accuracy(params):
decoded_nodes = network.apply(params, zacharys_karate_club)
return jnp.mean(jnp.argmax(decoded_nodes, axis=1) == labels)
for step in range(num_steps):
logging.info("step %r accuracy %r", step, accuracy(params).item())
params, opt_state = update(params, opt_state)
def main(_):
optimize_club(num_steps=30)
if __name__ == "__main__":
app.run(main)
| jraph-master | jraph/examples/zacharys_karate_club.py |
# Copyright 2020 DeepMind Technologies Limited.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# https://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Implementation of Conway's game of life using jraph."""
import time
from absl import app
import haiku as hk
import jax
import jax.numpy as jnp
import jraph
import numpy as np
def conway_mlp(x):
"""Implements a MLP representing Conway's game of life rules."""
w = jnp.array([[0.0, -1.0], [0.0, 1.0], [0.0, 1.0],
[0, -1.0], [1.0, 1.0], [1.0, 1.0]])
b = jnp.array([3.5, -3.5, -1.5, 1.5, -2.5, -3.5])
h = jnp.maximum(jnp.dot(w, x) + b, 0.)
w = jnp.array([[2.0, -4.0, 2.0, -4.0, 2.0, -4.0]])
b = jnp.array([-4.0])
y = jnp.maximum(jnp.dot(w, h) + b, 0.0)
return y
def conway_graph(size) -> jraph.GraphsTuple:
"""Returns a graph representing the game field of conway's game of life."""
# Creates nodes: each node represents a cell in the game.
n_node = size**2
nodes = np.zeros((n_node, 1))
node_indices = jnp.arange(n_node)
# Creates edges, senders and receivers:
# the senders represent the connections to the 8 neighboring fields.
n_edge = 8 * n_node
edges = jnp.zeros((n_edge, 1))
senders = jnp.vstack(
[node_indices - size - 1, node_indices - size, node_indices - size + 1,
node_indices - 1, node_indices + 1,
node_indices + size - 1, node_indices + size, node_indices + size + 1])
senders = senders.T.reshape(-1)
senders = (senders + size**2) % size**2
receivers = jnp.repeat(node_indices, 8)
# Adds a glider to the game
nodes[0, 0] = 1.0
nodes[1, 0] = 1.0
nodes[2, 0] = 1.0
nodes[2 + size, 0] = 1.0
nodes[1 + 2 * size, 0] = 1.0
return jraph.GraphsTuple(n_node=jnp.array([n_node]),
n_edge=jnp.array([n_edge]),
nodes=jnp.asarray(nodes),
edges=edges,
globals=None,
senders=senders,
receivers=receivers)
def display_graph(graph: jraph.GraphsTuple):
"""Prints the nodes of the graph representing Conway's game of life."""
size = int(np.sqrt(np.sum(graph.n_node)))
def _display_node(node):
if node == 1.0:
return 'x'
else:
return ' '
nodes = graph.nodes.copy()
output = '\n'.join(
''.join(_display_node(nodes[i * size + j][0])
for j in range(size))
for i in range(size))
print('-' * size + '\n' + output)
def main(_):
def net_fn(graph: jraph.GraphsTuple):
unf = jraph.concatenated_args(conway_mlp)
net = jraph.InteractionNetwork(
update_edge_fn=lambda e, n_s, n_r: n_s,
update_node_fn=jax.vmap(unf))
return net(graph)
net = hk.without_apply_rng(hk.transform(net_fn))
cg = conway_graph(size=20)
params = net.init(jax.random.PRNGKey(42), cg)
for _ in range(100):
time.sleep(0.05)
cg = jax.jit(net.apply)(params, cg)
display_graph(cg)
if __name__ == '__main__':
app.run(main)
| jraph-master | jraph/examples/game_of_life.py |
# Copyright 2020 DeepMind Technologies Limited.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# https://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
r"""A basic graphnet example.
This example just explains the bare mechanics of the library.
"""
import logging
from absl import app
import jax
import jraph
import numpy as np
MASK_BROKEN_MSG = ("Support for jax.mask is currently broken. This is not a "
"jraph error.")
def run():
"""Runs basic example."""
# Creating graph tuples.
# Creates a GraphsTuple from scratch containing a single graph.
# The graph has 3 nodes and 2 edges.
# Each node has a 4-dimensional feature vector.
# Each edge has a 5-dimensional feature vector.
# The graph itself has a 6-dimensional feature vector.
single_graph = jraph.GraphsTuple(
n_node=np.asarray([3]), n_edge=np.asarray([2]),
nodes=np.ones((3, 4)), edges=np.ones((2, 5)),
globals=np.ones((1, 6)),
senders=np.array([0, 1]), receivers=np.array([2, 2]))
logging.info("Single graph %r", single_graph)
# Creates a GraphsTuple from scratch containing a single graph with nested
# feature vectors.
# The graph has 3 nodes and 2 edges.
# The feature vector can be arbitrary nested types of dict, list and tuple,
# or any other type you registered with jax.tree_util.register_pytree_node.
nested_graph = jraph.GraphsTuple(
n_node=np.asarray([3]), n_edge=np.asarray([2]),
nodes={"a": np.ones((3, 4))}, edges={"b": np.ones((2, 5))},
globals={"c": np.ones((1, 6))},
senders=np.array([0, 1]), receivers=np.array([2, 2]))
logging.info("Nested graph %r", nested_graph)
# Creates a GraphsTuple from scratch containing 2 graphs using an implicit
# batch dimension.
# The first graph has 3 nodes and 2 edges.
# The second graph has 1 node and 1 edge.
# Each node has a 4-dimensional feature vector.
# Each edge has a 5-dimensional feature vector.
# The graph itself has a 6-dimensional feature vector.
implicitly_batched_graph = jraph.GraphsTuple(
n_node=np.asarray([3, 1]), n_edge=np.asarray([2, 1]),
nodes=np.ones((4, 4)), edges=np.ones((3, 5)),
globals=np.ones((2, 6)),
senders=np.array([0, 1, 3]), receivers=np.array([2, 2, 3]))
logging.info("Implicitly batched graph %r", implicitly_batched_graph)
# Batching graphs can be challenging. There are in general two approaches:
# 1. Implicit batching: Independent graphs are combined into the same
# GraphsTuple first, and the padding is added to the combined graph.
# 2. Explicit batching: Pad all graphs to a maximum size, stack them together
# using an explicit batch dimension followed by jax.vmap.
# Both approaches are shown below.
# Creates a GraphsTuple from two existing GraphsTuple using an implicit
# batch dimension.
# The GraphsTuple will contain three graphs.
implicitly_batched_graph = jraph.batch(
[single_graph, implicitly_batched_graph])
logging.info("Implicitly batched graph %r", implicitly_batched_graph)
# Creates multiple GraphsTuples from an existing GraphsTuple with an implicit
# batch dimension.
graph_1, graph_2, graph_3 = jraph.unbatch(implicitly_batched_graph)
logging.info("Unbatched graphs %r %r %r", graph_1, graph_2, graph_3)
# Creates a padded GraphsTuple from an existing GraphsTuple.
# The padded GraphsTuple will contain 10 nodes, 5 edges, and 4 graphs.
# Three graphs are added for the padding.
# First a dummy graph which contains the padding nodes and edges and secondly
# two empty graphs without nodes or edges to pad out the graphs.
padded_graph = jraph.pad_with_graphs(
single_graph, n_node=10, n_edge=5, n_graph=4)
logging.info("Padded graph %r", padded_graph)
# Creates a GraphsTuple from an existing padded GraphsTuple.
# The previously added padding is removed.
single_graph = jraph.unpad_with_graphs(padded_graph)
logging.info("Unpadded graph %r", single_graph)
# Creates a GraphsTuple containing 2 graphs using an explicit batch
# dimension.
# An explicit batch dimension requires more memory, but can simplify
# the definition of functions operating on the graph.
# Explicitly batched graphs require the GraphNetwork to be transformed
# by jax.vmap.
# Using an explicit batch requires padding all feature vectors to
# the maximum size of nodes and edges.
# The first graph has 3 nodes and 2 edges.
# The second graph has 1 node and 1 edge.
# Each node has a 4-dimensional feature vector.
# Each edge has a 5-dimensional feature vector.
# The graph itself has a 6-dimensional feature vector.
explicitly_batched_graph = jraph.GraphsTuple(
n_node=np.asarray([[3], [1]]), n_edge=np.asarray([[2], [1]]),
nodes=np.ones((2, 3, 4)), edges=np.ones((2, 2, 5)),
globals=np.ones((2, 1, 6)),
senders=np.array([[0, 1], [0, -1]]),
receivers=np.array([[2, 2], [0, -1]]))
logging.info("Explicitly batched graph %r", explicitly_batched_graph)
# Running a graph propagation step.
# First define the update functions for the edges, nodes and globals.
# In this example we use the identity everywhere.
# For Graph neural networks, each update function is typically a neural
# network.
def update_edge_fn(
edge_features,
sender_node_features,
receiver_node_features,
globals_):
"""Returns the update edge features."""
del sender_node_features
del receiver_node_features
del globals_
return edge_features
def update_node_fn(
node_features,
aggregated_sender_edge_features,
aggregated_receiver_edge_features,
globals_):
"""Returns the update node features."""
del aggregated_sender_edge_features
del aggregated_receiver_edge_features
del globals_
return node_features
def update_globals_fn(
aggregated_node_features,
aggregated_edge_features,
globals_):
"""Returns the global features."""
del aggregated_node_features
del aggregated_edge_features
return globals_
# Optionally define custom aggregation functions.
# In this example we use the defaults (so no need to define them explicitly).
aggregate_edges_for_nodes_fn = jraph.segment_sum
aggregate_nodes_for_globals_fn = jraph.segment_sum
aggregate_edges_for_globals_fn = jraph.segment_sum
# Optionally define an attention logit function and an attention reduce
# function. This can be used for graph attention.
# The attention function calculates attention weights, and the apply
# attention function calculates the new edge feature given the weights.
# We don't use graph attention here, and just pass the defaults.
attention_logit_fn = None
attention_reduce_fn = None
# Creates a new GraphNetwork in its most general form.
# Most of the arguments have defaults and can be omitted if a feature
# is not used.
# There are also predefined GraphNetworks available (see models.py)
network = jraph.GraphNetwork(
update_edge_fn=update_edge_fn,
update_node_fn=update_node_fn,
update_global_fn=update_globals_fn,
attention_logit_fn=attention_logit_fn,
aggregate_edges_for_nodes_fn=aggregate_edges_for_nodes_fn,
aggregate_nodes_for_globals_fn=aggregate_nodes_for_globals_fn,
aggregate_edges_for_globals_fn=aggregate_edges_for_globals_fn,
attention_reduce_fn=attention_reduce_fn)
# Runs graph propagation on (implicitly batched) graphs.
updated_graph = network(single_graph)
logging.info("Updated graph from single graph %r", updated_graph)
updated_graph = network(nested_graph)
logging.info("Updated graph from nested graph %r", nested_graph)
updated_graph = network(implicitly_batched_graph)
logging.info("Updated graph from implicitly batched graph %r", updated_graph)
updated_graph = network(padded_graph)
logging.info("Updated graph from padded graph %r", updated_graph)
# JIT-compile graph propagation.
# Use padded graphs to avoid re-compilation at every step!
jitted_network = jax.jit(network)
updated_graph = jitted_network(padded_graph)
logging.info("(JIT) updated graph from padded graph %r", updated_graph)
logging.info("basic.py complete!")
def main(argv):
if len(argv) > 1:
raise app.UsageError("Too many command-line arguments.")
run()
if __name__ == "__main__":
app.run(main)
| jraph-master | jraph/examples/basic.py |
# Copyright 2020 DeepMind Technologies Limited.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# https://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Example of how to use recurrent networks (e.g.`LSTM`s) with `GraphNetwork`s.
Models can use the mechanism for specifying nested node, edge, or global
features to simultaneously keep inputs/embeddings together with a per-node,
per-edge or per-graph recurrent state.
In this example we show an `InteractionNetwork` that uses an LSTM to keep a
memory of the inputs to the edge model at each step of message passing, by using
separate "embedding" and "state" fields in the edge features.
Following a similar procedure, an LSTM could be added to the `node_update_fn`,
or even the `global_update_fn`, if using a full `GraphNetwork`.
Note it is recommended to use immutable container types to store nested edge,
node and global features to avoid unwanted side effects. In this example we
use `namedtuple`s.
"""
import collections
from absl import app
import haiku as hk
import jax
import jax.numpy as jnp
import jax.tree_util as tree
import jraph
import numpy as np
NUM_NODES = 5
NUM_EDGES = 7
NUM_MESSAGE_PASSING_STEPS = 10
EMBEDDING_SIZE = 32
HIDDEN_SIZE = 128
# Immutable class for storing nested node/edge features containing an embedding
# and a recurrent state.
StatefulField = collections.namedtuple("StatefulField", ["embedding", "state"])
def get_random_graph() -> jraph.GraphsTuple:
return jraph.GraphsTuple(
n_node=np.asarray([NUM_NODES]),
n_edge=np.asarray([NUM_EDGES]),
nodes=np.random.normal(size=[NUM_NODES, EMBEDDING_SIZE]),
edges=np.random.normal(size=[NUM_EDGES, EMBEDDING_SIZE]),
globals=None,
senders=np.random.randint(0, NUM_NODES, [NUM_EDGES]),
receivers=np.random.randint(0, NUM_NODES, [NUM_EDGES]))
def network_definition(graph: jraph.GraphsTuple) -> jraph.ArrayTree:
"""`InteractionNetwork` with an LSTM in the edge update."""
# LSTM that will keep a memory of the inputs to the edge model.
edge_fn_lstm = hk.LSTM(hidden_size=HIDDEN_SIZE)
# MLPs used in the edge and the node model. Note that in this instance
# the output size matches the input size so the same model can be run
# iteratively multiple times. In a real model, this would usually be achieved
# by first using an encoder in the input data into a common `EMBEDDING_SIZE`.
edge_fn_mlp = hk.nets.MLP([HIDDEN_SIZE, EMBEDDING_SIZE])
node_fn_mlp = hk.nets.MLP([HIDDEN_SIZE, EMBEDDING_SIZE])
# Initialize the edge features to contain both the input edge embedding
# and initial LSTM state. Note for the nodes we only have an embedding since
# in this example nodes do not use a `node_fn_lstm`, but for analogy, we
# still put it in a `StatefulField`.
graph = graph._replace(
edges=StatefulField(
embedding=graph.edges,
state=edge_fn_lstm.initial_state(graph.edges.shape[0])),
nodes=StatefulField(embedding=graph.nodes, state=None),
)
def update_edge_fn(edges, sender_nodes, receiver_nodes):
# We will run an LSTM memory on the inputs first, and then
# process the output of the LSTM with an MLP.
edge_inputs = jnp.concatenate([edges.embedding,
sender_nodes.embedding,
receiver_nodes.embedding], axis=-1)
lstm_output, updated_state = edge_fn_lstm(edge_inputs, edges.state)
updated_edges = StatefulField(
embedding=edge_fn_mlp(lstm_output), state=updated_state,
)
return updated_edges
def update_node_fn(nodes, received_edges):
# Note `received_edges.state` will also contain the aggregated state for
# all received edges, which we may choose to use in the node update.
node_inputs = jnp.concatenate(
[nodes.embedding, received_edges.embedding], axis=-1)
updated_nodes = StatefulField(
embedding=node_fn_mlp(node_inputs),
state=None)
return updated_nodes
recurrent_graph_network = jraph.InteractionNetwork(
update_edge_fn=update_edge_fn,
update_node_fn=update_node_fn)
# Apply the model recurrently for 10 message passing steps.
# If instead we intended to use the LSTM to process a sequence of features
# for each node/edge, here we would select the corresponding inputs from the
# sequence along the sequence axis of the nodes/edges features to build the
# correct input graph for each step of the iteration.
num_message_passing_steps = 10
for _ in range(num_message_passing_steps):
graph = recurrent_graph_network(graph)
return graph
def main(_):
network = hk.without_apply_rng(hk.transform(network_definition))
input_graph = get_random_graph()
params = network.init(jax.random.PRNGKey(42), input_graph)
output_graph = network.apply(params, input_graph)
print(tree.tree_map(lambda x: x.shape, output_graph))
if __name__ == "__main__":
app.run(main)
| jraph-master | jraph/examples/lstm.py |
# Copyright 2020 DeepMind Technologies Limited.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# https://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
r"""2-SAT solver example.
Here we train a graph neural network to solve 2-sat problems.
https://en.wikipedia.org/wiki/2-satisfiability
For instance a 2 sat problem with 3 literals would look like this:
(a or b) and (not a or c) and (not b or not c)
We represent this problem in form of a bipartite-graph, with edges
connecting the literal-nodes (a, b, c) with the constraint-nodes (O).
The corresponding graph looks like this:
O O O
|\ /\ /|
| \/ \/ |
| /\ /\ |
|/ \/ \|
a b c
The nodes are one-hot encoded with literal nodes as (1, 0) and constraint nodes
as (0, 1). The edges are one-hot encoded with (1, 0) if the literal should be
true and (0, 1) if the literal should be false.
The graph neural network encodes the nodes and the edges and runs multiple
message passing steps by calculating message for each edge and aggregating
all the messages of the nodes.
The training dataset consists of randomly generated 2-sat problems with 2 to 15
literals.
The test dataset consists of randomly generated 2-sat problems with 16 to 20
literals.
"""
import collections
import logging
import random
from absl import app
import haiku as hk
import jax
import jax.numpy as jnp
import jraph
import numpy as np
import optax
Problem = collections.namedtuple("Problem", ("graph", "labels", "mask"))
def get_2sat_problem(min_n_literals: int, max_n_literals: int) -> Problem:
"""Creates bipartite-graph representing a randomly generated 2-sat problem.
Args:
min_n_literals: minimum number of literals in the 2-sat problem.
max_n_literals: maximum number of literals in the 2-sat problem.
Returns:
bipartite-graph, node labels and node mask.
"""
n_literals = random.randint(min_n_literals, max_n_literals)
n_literals_true = random.randint(1, n_literals - 1)
n_constraints = n_literals * (n_literals - 1) // 2
n_node = n_literals + n_constraints
# 0 indicates a literal node
# 1 indicates a constraint node.
nodes = [0 if i < n_literals else 1 for i in range(n_node)]
edges = []
senders = []
for literal_node1 in range(n_literals):
for literal_node2 in range(literal_node1 + 1, n_literals):
senders.append(literal_node1)
senders.append(literal_node2)
# 1 indicates that the literal must be true for this constraint.
# 0 indicates that the literal must be false for this constraint.
# I.e. with literals a and b, we have the following possible constraints:
# 0, 0 -> a or b
# 1, 0 -> not a or b
# 0, 1 -> a or not b
# 1, 1 -> not a or not b
edges.append(1 if literal_node1 < n_literals_true else 0)
edges.append(1 if literal_node2 < n_literals_true else 0)
graph = jraph.GraphsTuple(
n_node=np.asarray([n_node]),
n_edge=np.asarray([2 * n_constraints]),
# One-hot encoding for nodes and edges.
nodes=np.eye(2)[nodes],
edges=np.eye(2)[edges],
globals=None,
senders=np.asarray(senders),
receivers=np.repeat(np.arange(n_constraints) + n_literals, 2))
# In order to jit compile our code, we have to pad the nodes and edges of
# the GraphsTuple to a static shape.
max_n_constraints = max_n_literals * (max_n_literals - 1) // 2
max_nodes = max_n_literals + max_n_constraints + 1
max_edges = 2 * max_n_constraints
graph = jraph.pad_with_graphs(graph, max_nodes, max_edges)
# The ground truth solution for the 2-sat problem.
labels = (np.arange(max_nodes) < n_literals_true).astype(np.int32)
labels = np.eye(2)[labels]
# For the loss calculation we create a mask for the nodes, which masks the
# the constraint nodes and the padding nodes.
mask = (np.arange(max_nodes) < n_literals).astype(np.int32)
return Problem(graph=graph, labels=labels, mask=mask)
def network_definition(
graph: jraph.GraphsTuple,
num_message_passing_steps: int = 5) -> jraph.ArrayTree:
"""Defines a graph neural network.
Args:
graph: Graphstuple the network processes.
num_message_passing_steps: number of message passing steps.
Returns:
Decoded nodes.
"""
embedding = jraph.GraphMapFeatures(
embed_edge_fn=jax.vmap(hk.Linear(output_size=16)),
embed_node_fn=jax.vmap(hk.Linear(output_size=16)))
graph = embedding(graph)
@jax.vmap
@jraph.concatenated_args
def update_fn(features):
net = hk.Sequential([
hk.Linear(10), jax.nn.relu,
hk.Linear(10), jax.nn.relu,
hk.Linear(10), jax.nn.relu])
return net(features)
for _ in range(num_message_passing_steps):
gn = jraph.InteractionNetwork(
update_edge_fn=update_fn,
update_node_fn=update_fn,
include_sent_messages_in_node_update=True)
graph = gn(graph)
return hk.Linear(2)(graph.nodes)
def train(num_steps: int):
"""Trains a graph neural network on a 2-sat problem."""
train_dataset = (2, 15)
test_dataset = (16, 20)
random.seed(42)
network = hk.without_apply_rng(hk.transform(network_definition))
problem = get_2sat_problem(*train_dataset)
params = network.init(jax.random.PRNGKey(42), problem.graph)
@jax.jit
def prediction_loss(params, problem):
decoded_nodes = network.apply(params, problem.graph)
# We interpret the decoded nodes as a pair of logits for each node.
log_prob = jax.nn.log_softmax(decoded_nodes) * problem.labels
return -jnp.sum(log_prob * problem.mask[:, None]) / jnp.sum(problem.mask)
opt_init, opt_update = optax.adam(2e-4)
opt_state = opt_init(params)
@jax.jit
def update(params, opt_state, problem):
g = jax.grad(prediction_loss)(params, problem)
updates, opt_state = opt_update(g, opt_state)
return optax.apply_updates(params, updates), opt_state
for step in range(num_steps):
problem = get_2sat_problem(*train_dataset)
params, opt_state = update(params, opt_state, problem)
if step % 1000 == 0:
train_loss = jnp.mean(
jnp.asarray([
prediction_loss(params, get_2sat_problem(*train_dataset))
for _ in range(100)
])).item()
test_loss = jnp.mean(
jnp.asarray([
prediction_loss(params, get_2sat_problem(*test_dataset))
for _ in range(100)
])).item()
logging.info("step %r loss train %r test %r", step, train_loss, test_loss)
def main(argv):
if len(argv) > 1:
raise app.UsageError("Too many command-line arguments.")
train(num_steps=10000)
if __name__ == "__main__":
app.run(main)
| jraph-master | jraph/examples/sat.py |
# Copyright 2020 DeepMind Technologies Limited.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# https://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for jraph.ogb_examples.train."""
import pathlib
from absl.testing import absltest
from jraph.ogb_examples import train
class TrainTest(absltest.TestCase):
def test_train_and_eval_overfit(self):
ogb_path = pathlib.Path(train.__file__).parents[0]
master_csv_path = pathlib.Path(ogb_path, 'test_data', 'master.csv')
split_path = pathlib.Path(ogb_path, 'test_data', 'train.csv.gz')
data_path = master_csv_path.parents[0]
temp_dir = self.create_tempdir().full_path
train.train(data_path, master_csv_path, split_path, 1, 101, temp_dir)
_, accuracy = train.evaluate(
data_path, master_csv_path, split_path, temp_dir)
self.assertEqual(accuracy, 1.0)
if __name__ == '__main__':
absltest.main()
| jraph-master | jraph/ogb_examples/train_test.py |
# Copyright 2020 DeepMind Technologies Limited.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# https://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for graph.ogb_examples.data_utils."""
import pathlib
from absl.testing import absltest
from absl.testing import parameterized
import jraph
from jraph.ogb_examples import data_utils
import numpy as np
import tree
class DataUtilsTest(parameterized.TestCase):
def setUp(self):
super(DataUtilsTest, self).setUp()
self._test_graph = jraph.GraphsTuple(
nodes=np.broadcast_to(
np.arange(10, dtype=np.float32)[:, None], (10, 10)),
edges=np.concatenate((
np.broadcast_to(np.arange(20, dtype=np.float32)[:, None], (20, 4)),
np.broadcast_to(np.arange(20, dtype=np.float32)[:, None], (20, 4))
)),
receivers=np.concatenate((np.arange(20), np.arange(20))),
senders=np.concatenate((np.arange(20), np.arange(20))),
globals={'label': np.array([1], dtype=np.int32)},
n_node=np.array([10], dtype=np.int32),
n_edge=np.array([40], dtype=np.int32))
ogb_path = pathlib.Path(data_utils.__file__).parents[0]
master_csv_path = pathlib.Path(ogb_path, 'test_data', 'master.csv')
split_path = pathlib.Path(ogb_path, 'test_data', 'train.csv.gz')
data_path = master_csv_path.parents[0]
self._reader = data_utils.DataReader(
data_path=data_path,
master_csv_path=master_csv_path,
split_path=split_path)
def test_total_num_graph(self):
self.assertEqual(self._reader.total_num_graphs, 1)
def test_expected_graph(self):
graph = next(self._reader)
with self.subTest('test_graph_equality'):
tree.map_structure(
np.testing.assert_almost_equal, graph, self._test_graph)
with self.subTest('stop_iteration'):
# One element in the dataset, so should have stop iteration.
with self.assertRaises(StopIteration):
next(self._reader)
def test_reader_repeat(self):
self._reader.repeat()
next(self._reader)
graph = next(self._reader)
# One graph in the test dataset so should be the same.
tree.map_structure(np.testing.assert_almost_equal, graph, self._test_graph)
if __name__ == '__main__':
absltest.main()
| jraph-master | jraph/ogb_examples/data_utils_test.py |
# Copyright 2020 DeepMind Technologies Limited.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# https://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for jraph.ogb_examples.train_pmap."""
import pathlib
from absl.testing import absltest
from jraph.ogb_examples import train_pmap
class TrainTest(absltest.TestCase):
def test_train_and_eval_overfit(self):
ogb_path = pathlib.Path(train_pmap.__file__).parents[0]
master_csv_path = pathlib.Path(ogb_path, 'test_data', 'master.csv')
split_path = pathlib.Path(ogb_path, 'test_data', 'train.csv.gz')
data_path = master_csv_path.parents[0]
temp_dir = self.create_tempdir().full_path
train_pmap.train(data_path, master_csv_path, split_path, 1, 101, temp_dir)
_, accuracy = train_pmap.evaluate(data_path, master_csv_path, split_path,
temp_dir)
self.assertEqual(float(accuracy), 1.0)
if __name__ == '__main__':
absltest.main()
| jraph-master | jraph/ogb_examples/train_pmap_test.py |
# Copyright 2020 DeepMind Technologies Limited.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# https://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Data loading utils for the Open Graph Benchmark (OGB) Mol-Hiv."""
import pathlib
import jraph
import numpy as np
import pandas as pd
class DataReader:
"""Data Reader for Open Graph Benchmark datasets."""
def __init__(self,
data_path,
master_csv_path,
split_path,
batch_size=1,
dynamically_batch=False):
"""Initializes the data reader by loading in data."""
with pathlib.Path(master_csv_path).open("rt") as fp:
self._dataset_info = pd.read_csv(fp, index_col=0)["ogbg-molhiv"]
self._data_path = pathlib.Path(data_path)
# Load edge information, and transpose into (senders, receivers).
with pathlib.Path(data_path, "edge.csv.gz").open("rb") as fp:
sender_receivers = pd.read_csv(
fp, compression="gzip", header=None).values.T.astype(np.int64)
self._senders = sender_receivers[0]
self._receivers = sender_receivers[1]
# Load n_node and n_edge
with pathlib.Path(data_path, "num-node-list.csv.gz").open("rb") as fp:
self._n_node = pd.read_csv(fp, compression="gzip", header=None)
self._n_node = self._n_node.astype(np.int64)[0].tolist()
with pathlib.Path(data_path, "num-edge-list.csv.gz").open("rb") as fp:
self._n_edge = pd.read_csv(fp, compression="gzip", header=None)
self._n_edge = self._n_edge.astype(np.int64)[0].tolist()
# Load node features
with pathlib.Path(data_path, "node-feat.csv.gz").open("rb") as fp:
self._nodes = pd.read_csv(
fp, compression="gzip", header=None).astype(np.float32).values
with pathlib.Path(data_path, "edge-feat.csv.gz").open("rb") as fp:
self._edges = pd.read_csv(
fp, compression="gzip", header=None).astype(np.float32).values
with pathlib.Path(data_path, "graph-label.csv.gz").open("rb") as fp:
self._labels = pd.read_csv(
fp, compression="gzip", header=None).values
with pathlib.Path(split_path).open("rb") as fp:
self._split_idx = pd.read_csv(
fp, compression="gzip", header=None).values.T[0]
self._repeat = False
self._batch_size = batch_size
self._generator = self._make_generator()
self._max_nodes = int(np.max(self._n_node))
self._max_edges = int(np.max(self._n_edge))
if dynamically_batch:
self._generator = jraph.dynamically_batch(
self._generator,
# Plus one for the extra padding node.
n_node=self._batch_size * (self._max_nodes) + 1,
# Times two because we want backwards edges.
n_edge=self._batch_size * (self._max_edges) * 2,
n_graph=self._batch_size + 1)
# If n_node = [1,2,3], we create accumulated n_node [0,1,3,6] for indexing.
self._accumulated_n_nodes = np.concatenate((np.array([0]),
np.cumsum(self._n_node)))
# Same for n_edge
self._accumulated_n_edges = np.concatenate((np.array([0]),
np.cumsum(self._n_edge)))
self._dynamically_batch = dynamically_batch
@property
def total_num_graphs(self):
return len(self._n_node)
def repeat(self):
self._repeat = True
def __iter__(self):
return self
def __next__(self):
graphs = []
if self._dynamically_batch:
# If we are using pmap we need each batch to have the same size in both
# number of nodes and number of edges. So we use dynamically batch which
# guarantees this.
return next(self._generator)
else:
for _ in range(self._batch_size):
graph = next(self._generator)
graphs.append(graph)
return jraph.batch(graphs)
def get_graph_by_idx(self, idx):
"""Gets a graph by an integer index."""
# Gather the graph information
label = self._labels[idx]
n_node = self._n_node[idx]
n_edge = self._n_edge[idx]
node_slice = slice(
self._accumulated_n_nodes[idx], self._accumulated_n_nodes[idx+1])
edge_slice = slice(
self._accumulated_n_edges[idx], self._accumulated_n_edges[idx+1])
nodes = self._nodes[node_slice]
edges = self._edges[edge_slice]
senders = self._senders[edge_slice]
receivers = self._receivers[edge_slice]
# Molecular graphs are bi directional, but the serialization only
# stores one way so we add the missing edges.
return jraph.GraphsTuple(
nodes=nodes,
edges=np.concatenate([edges, edges]),
n_node=np.array([n_node]),
n_edge=np.array([n_edge*2]),
senders=np.concatenate([senders, receivers]),
receivers=np.concatenate([receivers, senders]),
globals={"label": label})
def _make_generator(self):
"""Makes a single example generator of the loaded OGB data."""
idx = 0
while True:
# If not repeating, exit when we've cycled through all the graphs.
# Only return graphs within the split.
if not self._repeat:
if idx == self.total_num_graphs:
return
else:
# This will reset the index to 0 if we are at the end of the dataset.
idx = idx % self.total_num_graphs
if idx not in self._split_idx:
idx += 1
continue
graph = self.get_graph_by_idx(idx)
idx += 1
yield graph
| jraph-master | jraph/ogb_examples/data_utils.py |
# Copyright 2020 DeepMind Technologies Limited.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# https://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
r"""Example training script for training OGB molhiv with jax graph-nets.
The ogbg-molhiv dataset is a molecular property prediction dataset.
It is adopted from the MoleculeNet [1]. All the molecules are pre-processed
using RDKit [2].
Each graph represents a molecule, where nodes are atoms, and edges are chemical
bonds. Input node features are 9-dimensional, containing atomic number and
chirality, as well as other additional atom features such as formal charge and
whether the atom is in the ring or not.
The goal is to predict whether a molecule inhibits HIV virus replication or not.
Performance is measured in ROC-AUC.
This script uses a GraphNet to learn the prediction task.
[1] Zhenqin Wu, Bharath Ramsundar, Evan N Feinberg, Joseph Gomes,
Caleb Geniesse, Aneesh SPappu, Karl Leswing, and Vijay Pande.
Moleculenet: a benchmark for molecular machine learning.
Chemical Science, 9(2):513–530, 2018.
[2] Greg Landrum et al. RDKit: Open-source cheminformatics, 2006.
Example usage:
python3 train.py --data_path={DATA_PATH} --master_csv_path={MASTER_CSV_PATH} \
--save_dir={SAVE_DIR} --split_path={SPLIT_PATH}
"""
import functools
import logging
import pathlib
import pickle
from absl import app
from absl import flags
import haiku as hk
import jax
import jax.numpy as jnp
import jraph
from jraph.ogb_examples import data_utils
import optax
flags.DEFINE_string('data_path', None, 'Directory of the data.')
flags.DEFINE_string('split_path', None, 'Path to the data split indices.')
flags.DEFINE_string('master_csv_path', None, 'Path to OGB master.csv.')
flags.DEFINE_string('save_dir', None, 'Directory to save parameters to.')
flags.DEFINE_integer('batch_size', 1, 'Number of graphs in batch.')
flags.DEFINE_integer('num_training_steps', 1000, 'Number of training steps.')
flags.DEFINE_enum('mode', 'train', ['train', 'evaluate'], 'Train or evaluate.')
FLAGS = flags.FLAGS
@jraph.concatenated_args
def edge_update_fn(feats: jnp.ndarray) -> jnp.ndarray:
"""Edge update function for graph net."""
net = hk.Sequential(
[hk.Linear(128), jax.nn.relu,
hk.Linear(128)])
return net(feats)
@jraph.concatenated_args
def node_update_fn(feats: jnp.ndarray) -> jnp.ndarray:
"""Node update function for graph net."""
net = hk.Sequential(
[hk.Linear(128), jax.nn.relu,
hk.Linear(128)])
return net(feats)
@jraph.concatenated_args
def update_global_fn(feats: jnp.ndarray) -> jnp.ndarray:
"""Global update function for graph net."""
# Molhiv is a binary classification task, so output pos neg logits.
net = hk.Sequential(
[hk.Linear(128), jax.nn.relu,
hk.Linear(2)])
return net(feats)
def net_fn(graph: jraph.GraphsTuple) -> jraph.GraphsTuple:
"""Graph net function."""
# Add a global paramater for graph classification.
graph = graph._replace(globals=jnp.zeros([graph.n_node.shape[0], 1]))
embedder = jraph.GraphMapFeatures(
hk.Linear(128), hk.Linear(128), hk.Linear(128))
net = jraph.GraphNetwork(
update_node_fn=node_update_fn,
update_edge_fn=edge_update_fn,
update_global_fn=update_global_fn)
return net(embedder(graph))
def _nearest_bigger_power_of_two(x: int) -> int:
"""Computes the nearest power of two greater than x for padding."""
y = 2
while y < x:
y *= 2
return y
def pad_graph_to_nearest_power_of_two(
graphs_tuple: jraph.GraphsTuple) -> jraph.GraphsTuple:
"""Pads a batched `GraphsTuple` to the nearest power of two.
For example, if a `GraphsTuple` has 7 nodes, 5 edges and 3 graphs, this method
would pad the `GraphsTuple` nodes and edges:
7 nodes --> 8 nodes (2^3)
5 edges --> 8 edges (2^3)
And since padding is accomplished using `jraph.pad_with_graphs`, an extra
graph and node is added:
8 nodes --> 9 nodes
3 graphs --> 4 graphs
Args:
graphs_tuple: a batched `GraphsTuple` (can be batch size 1).
Returns:
A graphs_tuple batched to the nearest power of two.
"""
# Add 1 since we need at least one padding node for pad_with_graphs.
pad_nodes_to = _nearest_bigger_power_of_two(jnp.sum(graphs_tuple.n_node)) + 1
pad_edges_to = _nearest_bigger_power_of_two(jnp.sum(graphs_tuple.n_edge))
# Add 1 since we need at least one padding graph for pad_with_graphs.
# We do not pad to nearest power of two because the batch size is fixed.
pad_graphs_to = graphs_tuple.n_node.shape[0] + 1
return jraph.pad_with_graphs(graphs_tuple, pad_nodes_to, pad_edges_to,
pad_graphs_to)
def compute_loss(params, graph, label, net):
"""Computes loss."""
pred_graph = net.apply(params, graph)
preds = jax.nn.log_softmax(pred_graph.globals)
targets = jax.nn.one_hot(label, 2)
# Since we have an extra 'dummy' graph in our batch due to padding, we want
# to mask out any loss associated with the dummy graph.
# Since we padded with `pad_with_graphs` we can recover the mask by using
# get_graph_padding_mask.
mask = jraph.get_graph_padding_mask(pred_graph)
# Cross entropy loss.
loss = -jnp.mean(preds * targets * mask[:, None])
# Accuracy taking into account the mask.
accuracy = jnp.sum(
(jnp.argmax(pred_graph.globals, axis=1) == label) * mask)/jnp.sum(mask)
return loss, accuracy
def train(data_path, master_csv_path, split_path, batch_size,
num_training_steps, save_dir):
"""OGB Training Script."""
# Initialize the dataset reader.
reader = data_utils.DataReader(
data_path=data_path,
master_csv_path=master_csv_path,
split_path=split_path,
batch_size=batch_size)
# Repeat the dataset forever for training.
reader.repeat()
# Transform impure `net_fn` to pure functions with hk.transform.
net = hk.without_apply_rng(hk.transform(net_fn))
# Get a candidate graph and label to initialize the network.
graph = reader.get_graph_by_idx(0)
# Initialize the network.
logging.info('Initializing network.')
params = net.init(jax.random.PRNGKey(42), graph)
# Initialize the optimizer.
opt_init, opt_update = optax.adam(1e-4)
opt_state = opt_init(params)
compute_loss_fn = functools.partial(compute_loss, net=net)
# We jit the computation of our loss, since this is the main computation.
# Using jax.jit means that we will use a single accelerator. If you want
# to use more than 1 accelerator, use jax.pmap. More information can be
# found in the jax documentation.
compute_loss_fn = jax.jit(jax.value_and_grad(
compute_loss_fn, has_aux=True))
for idx in range(num_training_steps):
graph = next(reader)
# Jax will re-jit your graphnet every time a new graph shape is encountered.
# In the limit, this means a new compilation every training step, which
# will result in *extremely* slow training. To prevent this, pad each
# batch of graphs to the nearest power of two. Since jax maintains a cache
# of compiled programs, the compilation cost is amortized.
graph = pad_graph_to_nearest_power_of_two(graph)
# Extract the label from the graph.
label = graph.globals['label']
graph = graph._replace(globals={})
(loss, acc), grad = compute_loss_fn(params, graph, label)
updates, opt_state = opt_update(grad, opt_state, params)
params = optax.apply_updates(params, updates)
if idx % 100 == 0:
logging.info('step: %s, loss: %s, acc: %s', idx, loss, acc)
if save_dir is not None:
with pathlib.Path(save_dir, 'molhiv.pkl').open('wb') as fp:
logging.info('Saving model to %s', save_dir)
pickle.dump(params, fp)
logging.info('Training finished')
def evaluate(data_path, master_csv_path, split_path, save_dir):
"""Evaluation Script."""
logging.info('Evaluating OGB molviv')
logging.info('Dataset split: %s', split_path)
# Initialize the dataset reader.
reader = data_utils.DataReader(
data_path=data_path,
master_csv_path=master_csv_path,
split_path=split_path,
batch_size=1)
# Transform impure `net_fn` to pure functions with hk.transform.
net = hk.without_apply_rng(hk.transform(net_fn))
with pathlib.Path(save_dir, 'molhiv.pkl').open('rb') as fp:
params = pickle.load(fp)
accumulated_loss = 0
accumulated_accuracy = 0
idx = 0
# We jit the computation of our loss, since this is the main computation.
# Using jax.jit means that we will use a single accelerator. If you want
# to use more than 1 accelerator, use jax.pmap. More information can be
# found in the jax documentation.
compute_loss_fn = jax.jit(functools.partial(compute_loss, net=net))
for graph in reader:
# Jax will re-jit your graphnet every time a new graph shape is encountered.
# In the limit, this means a new compilation every training step, which
# will result in *extremely* slow training. To prevent this, pad each
# batch of graphs to the nearest power of two. Since jax maintains a cache
# of compiled programs, the compilation cost is amortized.
graph = pad_graph_to_nearest_power_of_two(graph)
# Extract the labels and remove from the graph.
label = graph.globals['label']
graph = graph._replace(globals={})
loss, acc = compute_loss_fn(params, graph, label)
accumulated_accuracy += acc
accumulated_loss += loss
idx += 1
if idx % 100 == 0:
logging.info('Evaluated %s graphs', idx)
logging.info('Completed evaluation.')
loss = accumulated_loss / idx
accuracy = accumulated_accuracy / idx
logging.info('Eval loss: %s, accuracy %s', loss, accuracy)
return loss, accuracy
def main(_):
if FLAGS.mode == 'train':
train(FLAGS.data_path, FLAGS.master_csv_path, FLAGS.split_path,
FLAGS.batch_size, FLAGS.num_training_steps, FLAGS.save_dir)
elif FLAGS.mode == 'evaluate':
evaluate(FLAGS.data_path, FLAGS.master_csv_path, FLAGS.split_path,
FLAGS.save_dir)
if __name__ == '__main__':
app.run(main)
| jraph-master | jraph/ogb_examples/train.py |
# Copyright 2020 DeepMind Technologies Limited.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# https://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for jraph.ogb_examples.train_flax."""
import pathlib
from absl.testing import absltest
from jraph.ogb_examples import train_flax
class TrainTest(absltest.TestCase):
def test_train_and_eval_overfit(self):
ogb_path = pathlib.Path(train_flax.__file__).parents[0]
master_csv_path = pathlib.Path(ogb_path, 'test_data', 'master.csv')
split_path = pathlib.Path(ogb_path, 'test_data', 'train.csv.gz')
data_path = master_csv_path.parents[0]
temp_dir = self.create_tempdir().full_path
train_flax.train(data_path, master_csv_path, split_path, 1, 101, temp_dir)
_, accuracy = train_flax.evaluate(
data_path, master_csv_path, split_path, temp_dir)
self.assertEqual(accuracy, 1.0)
if __name__ == '__main__':
absltest.main()
| jraph-master | jraph/ogb_examples/train_flax_test.py |
# Copyright 2020 DeepMind Technologies Limited.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# https://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
r"""Example training script for training OGB molhiv with jax graph-nets & flax.
The ogbg-molhiv dataset is a molecular property prediction dataset.
It is adopted from the MoleculeNet [1]. All the molecules are pre-processed
using RDKit [2].
Each graph represents a molecule, where nodes are atoms, and edges are chemical
bonds. Input node features are 9-dimensional, containing atomic number and
chirality, as well as other additional atom features such as formal charge and
whether the atom is in the ring or not.
The goal is to predict whether a molecule inhibits HIV virus replication or not.
Performance is measured in ROC-AUC.
This script uses a GraphNet to learn the prediction task.
[1] Zhenqin Wu, Bharath Ramsundar, Evan N Feinberg, Joseph Gomes,
Caleb Geniesse, Aneesh SPappu, Karl Leswing, and Vijay Pande.
Moleculenet: a benchmark for molecular machine learning.
Chemical Science, 9(2):513–530, 2018.
[2] Greg Landrum et al. RDKit: Open-source cheminformatics, 2006.
Example usage:
python3 train.py --data_path={DATA_PATH} --master_csv_path={MASTER_CSV_PATH} \
--save_dir={SAVE_DIR} --split_path={SPLIT_PATH}
"""
import functools
import logging
import pathlib
import pickle
from typing import Sequence
from absl import app
from absl import flags
from flax import linen as nn
from flax import optim
import jax
import jax.numpy as jnp
import jraph
from jraph.ogb_examples import data_utils
flags.DEFINE_string('data_path', None, 'Directory of the data.')
flags.DEFINE_string('split_path', None, 'Path to the data split indices.')
flags.DEFINE_string('master_csv_path', None, 'Path to OGB master.csv.')
flags.DEFINE_string('save_dir', None, 'Directory to save parameters to.')
flags.DEFINE_integer('batch_size', 1, 'Number of graphs in batch.')
flags.DEFINE_integer('num_training_steps', 1000, 'Number of training steps.')
flags.DEFINE_enum('mode', 'train', ['train', 'evaluate'], 'Train or evaluate.')
FLAGS = flags.FLAGS
class ExplicitMLP(nn.Module):
"""A flax MLP."""
features: Sequence[int]
@nn.compact
def __call__(self, inputs):
x = inputs
for i, lyr in enumerate([nn.Dense(feat) for feat in self.features]):
x = lyr(x)
if i != len(self.features) - 1:
x = nn.relu(x)
return x
# Functions must be passed to jraph GNNs, but pytype does not recognise
# linen Modules as callables to here we wrap in a function.
def make_embed_fn(latent_size):
def embed(inputs):
return nn.Dense(latent_size)(inputs)
return embed
def make_mlp(features):
@jraph.concatenated_args
def update_fn(inputs):
return ExplicitMLP(features)(inputs)
return update_fn
class GraphNetwork(nn.Module):
"""A flax GraphNetwork."""
mlp_features: Sequence[int]
latent_size: int
@nn.compact
def __call__(self, graph):
# Add a global parameter for graph classification.
graph = graph._replace(globals=jnp.zeros([graph.n_node.shape[0], 1]))
embedder = jraph.GraphMapFeatures(
embed_node_fn=make_embed_fn(self.latent_size),
embed_edge_fn=make_embed_fn(self.latent_size),
embed_global_fn=make_embed_fn(self.latent_size))
net = jraph.GraphNetwork(
update_node_fn=make_mlp(self.mlp_features),
update_edge_fn=make_mlp(self.mlp_features),
# The global update outputs size 2 for binary classification.
update_global_fn=make_mlp(self.mlp_features + (2,))) # pytype: disable=unsupported-operands
return net(embedder(graph))
def _nearest_bigger_power_of_two(x: int) -> int:
"""Computes the nearest power of two greater than x for padding."""
y = 2
while y < x:
y *= 2
return y
def pad_graph_to_nearest_power_of_two(
graphs_tuple: jraph.GraphsTuple) -> jraph.GraphsTuple:
"""Pads a batched `GraphsTuple` to the nearest power of two.
For example, if a `GraphsTuple` has 7 nodes, 5 edges and 3 graphs, this method
would pad the `GraphsTuple` nodes and edges:
7 nodes --> 8 nodes (2^3)
5 edges --> 8 edges (2^3)
And since padding is accomplished using `jraph.pad_with_graphs`, an extra
graph and node is added:
8 nodes --> 9 nodes
3 graphs --> 4 graphs
Args:
graphs_tuple: a batched `GraphsTuple` (can be batch size 1).
Returns:
A graphs_tuple batched to the nearest power of two.
"""
# Add 1 since we need at least one padding node for pad_with_graphs.
pad_nodes_to = _nearest_bigger_power_of_two(jnp.sum(graphs_tuple.n_node)) + 1
pad_edges_to = _nearest_bigger_power_of_two(jnp.sum(graphs_tuple.n_edge))
# Add 1 since we need at least one padding graph for pad_with_graphs.
# We do not pad to nearest power of two because the batch size is fixed.
pad_graphs_to = graphs_tuple.n_node.shape[0] + 1
return jraph.pad_with_graphs(graphs_tuple, pad_nodes_to, pad_edges_to,
pad_graphs_to)
def compute_loss(params, graph, label, net):
"""Computes loss."""
pred_graph = net.apply(params, graph)
preds = jax.nn.log_softmax(pred_graph.globals)
targets = jax.nn.one_hot(label, 2)
# Since we have an extra 'dummy' graph in our batch due to padding, we want
# to mask out any loss associated with the dummy graph.
# Since we padded with `pad_with_graphs` we can recover the mask by using
# get_graph_padding_mask.
mask = jraph.get_graph_padding_mask(pred_graph)
# Cross entropy loss.
loss = -jnp.mean(preds * targets * mask[:, None])
# Accuracy taking into account the mask.
accuracy = jnp.sum(
(jnp.argmax(pred_graph.globals, axis=1) == label) * mask)/jnp.sum(mask)
return loss, accuracy
def train_step(optimizer, graph, label, net):
partial_loss_fn = functools.partial(
compute_loss, graph=graph, label=label, net=net)
grad_fn = jax.value_and_grad(partial_loss_fn, has_aux=True)
(loss, accuracy), grad = grad_fn(optimizer.target)
optimizer = optimizer.apply_gradient(grad)
return optimizer, {'loss': loss, 'accuracy': accuracy}
def train(data_path, master_csv_path, split_path, batch_size,
num_training_steps, save_dir):
"""OGB Training Script."""
# Initialize the dataset reader.
reader = data_utils.DataReader(
data_path=data_path,
master_csv_path=master_csv_path,
split_path=split_path,
batch_size=batch_size)
# Repeat the dataset forever for training.
reader.repeat()
net = GraphNetwork(mlp_features=(128, 128), latent_size=128)
# Get a candidate graph and label to initialize the network.
graph = reader.get_graph_by_idx(0)
# Initialize the network.
logging.info('Initializing network.')
params = net.init(jax.random.PRNGKey(42), graph)
optimizer = optim.Adam(learning_rate=1e-4).create(params)
optimizer = jax.device_put(optimizer)
for idx in range(num_training_steps):
graph = next(reader)
# Jax will re-jit your graphnet every time a new graph shape is encountered.
# In the limit, this means a new compilation every training step, which
# will result in *extremely* slow training. To prevent this, pad each
# batch of graphs to the nearest power of two. Since jax maintains a cache
# of compiled programs, the compilation cost is amortized.
graph = pad_graph_to_nearest_power_of_two(graph)
# Remove the label from the input graph/
label = graph.globals['label']
graph = graph._replace(globals={})
optimizer, scalars = train_step(optimizer, graph, label, net)
if idx % 100 == 0:
logging.info('step: %s, loss: %s, acc: %s', idx, scalars['loss'],
scalars['accuracy'])
if save_dir is not None:
with pathlib.Path(save_dir, 'molhiv.pkl').open('wb') as fp:
logging.info('Saving model to %s', save_dir)
pickle.dump(optimizer.target, fp)
logging.info('Training finished')
def evaluate(data_path, master_csv_path, split_path, save_dir):
"""Evaluation Script."""
logging.info('Evaluating OGB molviv')
logging.info('Dataset split: %s', split_path)
# Initialize the dataset reader.
reader = data_utils.DataReader(
data_path=data_path,
master_csv_path=master_csv_path,
split_path=split_path,
batch_size=1)
with pathlib.Path(save_dir, 'molhiv.pkl').open('rb') as fp:
params = pickle.load(fp)
accumulated_loss = 0
accumulated_accuracy = 0
idx = 0
# We jit the computation of our loss, since this is the main computation.
# Using jax.jit means that we will use a single accelerator. If you want
# to use more than 1 accelerator, use jax.pmap. More information can be
# found in the jax documentation.
net = GraphNetwork(mlp_features=[128, 128], latent_size=128)
compute_loss_fn = jax.jit(functools.partial(compute_loss, net=net))
for graph in reader:
# Jax will re-jit your graphnet every time a new graph shape is encountered.
# In the limit, this means a new compilation every training step, which
# will result in *extremely* slow training. To prevent this, pad each
# batch of graphs to the nearest power of two. Since jax maintains a cache
# of compiled programs, the compilation cost is amortized.
graph = pad_graph_to_nearest_power_of_two(graph)
# Extract the labels and remove from the graph.
label = graph.globals['label']
graph = graph._replace(globals={})
loss, acc = compute_loss_fn(params, graph, label)
accumulated_accuracy += acc
accumulated_loss += loss
idx += 1
if idx % 100 == 0:
logging.info('Evaluated %s graphs', idx)
logging.info('Completed evaluation.')
loss = accumulated_loss / idx
accuracy = accumulated_accuracy / idx
logging.info('Eval loss: %s, accuracy %s', loss, accuracy)
return loss, accuracy
def main(_):
if FLAGS.mode == 'train':
train(FLAGS.data_path, FLAGS.master_csv_path, FLAGS.split_path,
FLAGS.batch_size, FLAGS.num_training_steps, FLAGS.save_dir)
elif FLAGS.mode == 'evaluate':
evaluate(FLAGS.data_path, FLAGS.master_csv_path, FLAGS.split_path,
FLAGS.save_dir)
if __name__ == '__main__':
app.run(main)
| jraph-master | jraph/ogb_examples/train_flax.py |
# Copyright 2020 DeepMind Technologies Limited.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# https://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
r"""Example training script for training OGB molhiv with jax graph-nets.
The ogbg-molhiv dataset is a molecular property prediction dataset.
It is adopted from the MoleculeNet [1]. All the molecules are pre-processed
using RDKit [2].
Each graph represents a molecule, where nodes are atoms, and edges are chemical
bonds. Input node features are 9-dimensional, containing atomic number and
chirality, as well as other additional atom features such as formal charge and
whether the atom is in the ring or not.
The goal is to predict whether a molecule inhibits HIV virus replication or not.
Performance is measured in ROC-AUC.
This script uses a GraphNet to learn the prediction task.
[1] Zhenqin Wu, Bharath Ramsundar, Evan N Feinberg, Joseph Gomes,
Caleb Geniesse, Aneesh SPappu, Karl Leswing, and Vijay Pande.
Moleculenet: a benchmark for molecular machine learning.
Chemical Science, 9(2):513–530, 2018.
[2] Greg Landrum et al. RDKit: Open-source cheminformatics, 2006.
Example usage:
python3 train.py --data_path={DATA_PATH} --master_csv_path={MASTER_CSV_PATH} \
--save_dir={SAVE_DIR} --split_path={SPLIT_PATH}
"""
import functools
import logging
import pathlib
import pickle
from typing import Iterator
from absl import app
from absl import flags
import haiku as hk
import jax
import jax.numpy as jnp
import jraph
from jraph.ogb_examples import data_utils
import optax
flags.DEFINE_string('data_path', None, 'Directory of the data.')
flags.DEFINE_string('split_path', None, 'Path to the data split indices.')
flags.DEFINE_string('master_csv_path', None, 'Path to OGB master.csv.')
flags.DEFINE_string('save_dir', None, 'Directory to save parameters to.')
flags.DEFINE_integer('batch_size', 1, 'Number of graphs in batch.')
flags.DEFINE_integer('num_training_steps', 1000, 'Number of training steps.')
flags.DEFINE_enum('mode', 'train', ['train', 'evaluate'], 'Train or evaluate.')
FLAGS = flags.FLAGS
@jraph.concatenated_args
def edge_update_fn(feats: jnp.ndarray) -> jnp.ndarray:
"""Edge update function for graph net."""
net = hk.Sequential(
[hk.Linear(128), jax.nn.relu,
hk.Linear(128)])
return net(feats)
@jraph.concatenated_args
def node_update_fn(feats: jnp.ndarray) -> jnp.ndarray:
"""Node update function for graph net."""
net = hk.Sequential(
[hk.Linear(128), jax.nn.relu,
hk.Linear(128)])
return net(feats)
@jraph.concatenated_args
def update_global_fn(feats: jnp.ndarray) -> jnp.ndarray:
"""Global update function for graph net."""
# Molhiv is a binary classification task, so output pos neg logits.
net = hk.Sequential(
[hk.Linear(128), jax.nn.relu,
hk.Linear(2)])
return net(feats)
def net_fn(graph: jraph.GraphsTuple) -> jraph.GraphsTuple:
"""Graph net function."""
# Add a global paramater for graph classification.
graph = graph._replace(globals=jnp.zeros([graph.n_node.shape[0], 1]))
embedder = jraph.GraphMapFeatures(
hk.Linear(128), hk.Linear(128), hk.Linear(128))
net = jraph.GraphNetwork(
update_node_fn=node_update_fn,
update_edge_fn=edge_update_fn,
update_global_fn=update_global_fn)
return net(embedder(graph))
def device_batch(
graph_generator: data_utils.DataReader) -> Iterator[jraph.GraphsTuple]:
"""Batches a set of graphs the size of the number of devices."""
num_devices = jax.local_device_count()
batch = []
for idx, graph in enumerate(graph_generator):
if idx % num_devices == num_devices - 1:
batch.append(graph)
yield jax.tree_map(lambda *x: jnp.stack(x, axis=0), *batch)
batch = []
else:
batch.append(graph)
def compute_loss(params, graph, label, net):
"""Computes loss."""
pred_graph = net.apply(params, graph)
preds = jax.nn.log_softmax(pred_graph.globals)
targets = jax.nn.one_hot(label, 2)
# Since we have an extra 'dummy' graph in our batch due to padding, we want
# to mask out any loss associated with the dummy graph.
# Since we padded with `pad_with_graphs` we can recover the mask by using
# get_graph_padding_mask.
mask = jraph.get_graph_padding_mask(pred_graph)
# Cross entropy loss.
loss = -jnp.mean(preds * targets * mask[:, None])
# Accuracy taking into account the mask.
accuracy = jnp.sum(
(jnp.argmax(pred_graph.globals, axis=1) == label) * mask)/jnp.sum(mask)
return loss, accuracy
def train(data_path, master_csv_path, split_path, batch_size,
num_training_steps, save_dir):
"""OGB Training Script."""
# Initialize the dataset reader.
reader = data_utils.DataReader(
data_path=data_path,
master_csv_path=master_csv_path,
split_path=split_path,
batch_size=batch_size,
dynamically_batch=True)
# Repeat the dataset forever for training.
reader.repeat()
# Transform impure `net_fn` to pure functions with hk.transform.
net = hk.without_apply_rng(hk.transform(net_fn))
# Get a candidate graph and label to initialize the network.
graph = reader.get_graph_by_idx(0)
# Initialize the network.
logging.info('Initializing network.')
params = net.init(jax.random.PRNGKey(42), graph)
# Because we are training with multiple devices, params needs to have a
# device axis.
params = jax.device_put_replicated(params, list(jax.devices()))
# Initialize the optimizer.
opt_init, opt_update = optax.adam(1e-4)
opt_state = jax.pmap(opt_init)(params)
compute_loss_fn = functools.partial(compute_loss, net=net)
# We pmap the computation of our loss, since this is the main computation.
# Using jax.pmap means that we will use all available accelerators.
# More information can be found in the jax documentation.
@functools.partial(jax.pmap, axis_name='device')
def update_fn(params, graph, label, opt_state):
(loss, acc), grad = jax.value_and_grad(
compute_loss_fn, has_aux=True)(params, graph, label)
# Average gradients across devices
grad = jax.lax.pmean(grad, axis_name='device')
updates, opt_state = opt_update(grad, opt_state, params)
params = optax.apply_updates(params, updates)
return loss, acc, opt_state, params
for idx in range(num_training_steps):
graph_batch = next(device_batch(reader))
label = graph_batch.globals['label']
loss, acc, opt_state, params = update_fn(
params, graph_batch, label, opt_state)
if idx % 100 == 0:
logging.info('step: %s, loss: %s, acc: %s', idx, loss, acc)
if save_dir is not None:
with pathlib.Path(save_dir, 'molhiv.pkl').open('wb') as fp:
logging.info('Saving model to %s', save_dir)
pickle.dump(params, fp)
logging.info('Training finished')
def evaluate(data_path, master_csv_path, split_path, save_dir):
"""Evaluation Script."""
logging.info('Evaluating OGB molviv')
logging.info('Dataset split: %s', split_path)
# Initialize the dataset reader.
reader = data_utils.DataReader(
data_path=data_path,
master_csv_path=master_csv_path,
split_path=split_path,
batch_size=1,
dynamically_batch=True)
# Transform impure `net_fn` to pure functions with hk.transform.
net = hk.without_apply_rng(hk.transform(net_fn))
with pathlib.Path(save_dir, 'molhiv.pkl').open('rb') as fp:
params = pickle.load(fp)
accumulated_loss = 0
accumulated_accuracy = 0
idx = 0
# We pmap the computation of our loss, since this is the main computation.
compute_loss_fn = jax.pmap(functools.partial(compute_loss, net=net))
for graph_batch in device_batch(reader):
label = graph_batch.globals['label']
loss, acc = compute_loss_fn(params, graph_batch, label)
accumulated_accuracy += jnp.sum(acc)
accumulated_loss += jnp.sum(loss)
total_num_padding_graphs = jnp.sum(
jax.vmap(jraph.get_number_of_padding_with_graphs_graphs)(graph_batch))
idx += graph_batch.n_node.size - total_num_padding_graphs
if idx % 100 == 0:
logging.info('Evaluated %s graphs', idx)
logging.info('Completed evaluation.')
loss = accumulated_loss / idx
accuracy = accumulated_accuracy / idx
logging.info('Eval loss: %s, accuracy %s', loss, accuracy)
return loss, accuracy
def main(_):
if FLAGS.mode == 'train':
train(FLAGS.data_path, FLAGS.master_csv_path, FLAGS.split_path,
FLAGS.batch_size, FLAGS.num_training_steps, FLAGS.save_dir)
elif FLAGS.mode == 'evaluate':
evaluate(FLAGS.data_path, FLAGS.master_csv_path, FLAGS.split_path,
FLAGS.save_dir)
if __name__ == '__main__':
app.run(main)
| jraph-master | jraph/ogb_examples/train_pmap.py |
# Copyright 2022 Intrinsic Innovation LLC.
# Copyright 2023 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Setup for PGMax package."""
import os
import setuptools
_CURRENT_DIR = os.path.dirname(os.path.abspath(__file__))
def _get_version():
with open('pgmax/__init__.py') as fp:
for line in fp:
if line.startswith('__version__'):
g = {}
exec(line, g) # pylint: disable=exec-used
return g['__version__']
raise ValueError('`__version__` not defined in `pgmax/__init__.py`')
def _parse_requirements(path):
with open(os.path.join(_CURRENT_DIR, path)) as f:
return [
line.rstrip()
for line in f
if not (line.isspace() or line.startswith('#'))
]
VERSION = _get_version()
if __name__ == '__main__':
setuptools.setup(
name='pgmax',
version=VERSION,
packages=setuptools.find_packages(),
license='Apache 2.0',
author='DeepMind',
description=(
'Loopy belief propagation for factor graphs on discrete variables'
' in JAX'
),
long_description=open('README.md').read(),
long_description_content_type='text/markdown',
author_email='[email protected]',
install_requires=_parse_requirements(
os.path.join(_CURRENT_DIR, 'requirements.txt')
),
requires_python='>=3.7,<3.11',
)
| PGMax-main | setup.py |
# Copyright 2023 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Test inference in two models with hardcoded set of messages."""
import jax.numpy as jnp
import numpy as np
from pgmax import fgraph
from pgmax import fgroup
from pgmax import infer
from pgmax import vgroup
def test_energy_empty_vgroup():
"""Compute energy with an empty vgroup."""
variables = vgroup.NDVarArray(num_states=2, shape=(2, 2))
variables_empty = vgroup.NDVarArray(num_states=2, shape=(0, 2))
fg = fgraph.FactorGraph(variable_groups=[variables, variables_empty])
factor_group = fgroup.EnumFactorGroup(
variables_for_factors=[[variables[0, 0], variables[0, 1]]],
factor_configs=np.zeros((1, 2), int),
)
fg.add_factors(factor_group)
bp = infer.build_inferer(fg.bp_state, backend="bp")
bp_arrays = bp.init()
beliefs = bp.get_beliefs(bp_arrays)
map_states = infer.decode_map_states(beliefs)
init_energy = infer.compute_energy(fg.bp_state, bp_arrays, map_states)[0]
assert init_energy == 0
init_energy2, _, _ = infer.compute_energy(
fg.bp_state, bp_arrays, map_states, debug_mode=True
)
assert init_energy == init_energy2
def test_energy_single_state():
"""Compute energy with a single state."""
variables = vgroup.NDVarArray(num_states=2, shape=(2, 2))
variables_single_state = vgroup.NDVarArray(num_states=1, shape=(1, 2))
fg = fgraph.FactorGraph(variable_groups=[variables, variables_single_state])
factor_group = fgroup.EnumFactorGroup(
variables_for_factors=[[variables[0, 0], variables[0, 1]]],
factor_configs=np.zeros((1, 2), int),
)
fg.add_factors(factor_group)
bp = infer.build_inferer(fg.bp_state, backend="bp")
bp_arrays = bp.init()
beliefs = bp.get_beliefs(bp_arrays)
assert beliefs[variables_single_state].shape == (1, 2, 1)
map_states = infer.decode_map_states(beliefs)
init_energy = infer.compute_energy(fg.bp_state, bp_arrays, map_states)[0]
assert init_energy == 0
init_energy2, _, _ = infer.compute_energy(
fg.bp_state, bp_arrays, map_states, debug_mode=True
)
assert init_energy == init_energy2
def test_energy_all_infinite_but_one_log_potentials():
"""Compute energy with all but one log potentials are infinite."""
variables = vgroup.NDVarArray(num_states=2, shape=(2,))
fg = fgraph.FactorGraph(variable_groups=[variables])
factor_group = fgroup.PairwiseFactorGroup(
variables_for_factors=[[variables[0], variables[1]]],
log_potential_matrix=np.array([[-np.inf, -np.inf], [-np.inf, 0]]),
)
fg.add_factors(factor_group)
bp = infer.build_inferer(fg.bp_state, backend="bp")
bp_arrays = bp.init()
bp_arrays = bp.run(bp_arrays, num_iters=1, temperature=0)
beliefs = bp.get_beliefs(bp_arrays)
assert beliefs[variables].shape == (2, 2)
map_states = infer.decode_map_states(beliefs)
assert np.all(map_states[variables] == np.array([1, 1]))
init_energy = infer.compute_energy(fg.bp_state, bp_arrays, map_states)[0]
assert init_energy == 0
init_energy2, _, _ = infer.compute_energy(
fg.bp_state, bp_arrays, map_states, debug_mode=True
)
assert init_energy == init_energy2
def test_energy_all_infinite_log_potentials():
"""Compute energy with all infinite log potentials."""
variables = vgroup.NDVarArray(num_states=2, shape=(2,))
fg = fgraph.FactorGraph(variable_groups=[variables])
factor_group = fgroup.PairwiseFactorGroup(
variables_for_factors=[[variables[0], variables[1]]],
log_potential_matrix=jnp.array([[-np.inf, -np.inf], [-np.inf, -np.inf]]),
)
fg.add_factors(factor_group)
bp = infer.build_inferer(fg.bp_state, backend="bp")
bp_arrays = bp.init()
bp_arrays = bp.run(bp_arrays, num_iters=1, temperature=0)
beliefs = bp.get_beliefs(bp_arrays)
assert beliefs[variables].shape == (2, 2)
map_states = infer.decode_map_states(beliefs)
init_energy = infer.compute_energy(fg.bp_state, bp_arrays, map_states)[0]
assert init_energy == np.inf
init_energy2, _, _ = infer.compute_energy(
fg.bp_state, bp_arrays, map_states, debug_mode=True
)
assert init_energy == init_energy2
| PGMax-main | tests/test_energy.py |
# Copyright 2022 Intrinsic Innovation LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# pylint: disable=invalid-name
"""Test inference in two models with hardcoded set of messages."""
from typing import Any, Dict, List, Tuple
import jax
import jax.numpy as jnp
import numpy as np
from numpy.random import default_rng
from pgmax import factor
from pgmax import fgraph
from pgmax import fgroup
from pgmax import infer
from pgmax import vgroup
from scipy.ndimage import gaussian_filter
# Set random seed for rng
rng = default_rng(23)
def test_e2e_sanity_check():
"""Test running BP in the a cut model applied on a synthetic depth scene."""
def create_valid_suppression_config_arr(
suppression_diameter: int,
) -> np.ndarray:
"""Helper function to easily generate a list of valid configurations for a given suppression diameter.
Args:
suppression_diameter: Suppression diameter
Returns:
An array with valid suppressions.
"""
valid_suppressions_list = []
base_list = [0] * suppression_diameter
valid_suppressions_list.append(base_list)
for idx in range(suppression_diameter):
new_valid_list1 = base_list[:]
new_valid_list2 = base_list[:]
new_valid_list1[idx] = 1
new_valid_list2[idx] = 2
valid_suppressions_list.append(new_valid_list1)
valid_suppressions_list.append(new_valid_list2)
ret_arr = np.array(valid_suppressions_list)
ret_arr.flags.writeable = False
return ret_arr
true_final_msgs_output = jax.device_put(
jnp.array(
[
0.0000000e00,
-2.9802322e-08,
0.0000000e00,
0.0000000e00,
0.0000000e00,
-6.2903470e-01,
0.0000000e00,
-3.0177221e-01,
-3.0177209e-01,
-2.8220856e-01,
0.0000000e00,
-6.0928625e-01,
-1.2259892e-01,
-5.7227510e-01,
0.0000000e00,
-1.2259889e-01,
0.0000000e00,
-7.0657951e-01,
0.0000000e00,
-5.8416575e-01,
-4.4967628e-01,
-1.2259889e-01,
-4.4967628e-01,
0.0000000e00,
0.0000000e00,
-5.8398044e-01,
-1.1587733e-01,
-1.8589476e-01,
0.0000000e00,
-3.0177209e-01,
0.0000000e00,
-1.1920929e-07,
-2.9802322e-08,
0.0000000e00,
0.0000000e00,
-7.2534859e-01,
-2.1119976e-01,
-1.1224430e00,
0.0000000e00,
-2.9802322e-08,
-2.9802322e-08,
0.0000000e00,
-1.1587762e-01,
-4.8865837e-01,
0.0000000e00,
0.0000000e00,
0.0000000e00,
-2.9802322e-08,
0.0000000e00,
-1.7563977e00,
-1.7563977e00,
0.0000000e00,
-2.0581698e00,
-2.0581698e00,
0.0000000e00,
-2.1994662e00,
-2.1994662e00,
0.0000000e00,
-1.6154857e00,
-1.6154857e00,
0.0000000e00,
-2.0535989e00,
-2.0535989e00,
0.0000000e00,
-1.7265215e00,
-1.7265215e00,
0.0000000e00,
-1.7238994e00,
-1.7238994e00,
0.0000000e00,
-2.0509768e00,
-2.0509768e00,
0.0000000e00,
-1.8303051e00,
-1.8303051e00,
0.0000000e00,
-2.7415483e00,
-2.7415483e00,
0.0000000e00,
-2.0552459e00,
-2.0552459e00,
0.0000000e00,
-2.1711233e00,
-2.1711233e00,
]
)
)
# Create a synthetic depth image for testing purposes
im_size = 3
depth_img = 5.0 * np.ones((im_size, im_size))
depth_img[
np.tril_indices(im_size, 0)
] = 1.0 # This sets the lower triangle of the image to 1's
depth_img = gaussian_filter(
depth_img, sigma=0.5
) # Filter the depth image for realistic noise simulation?
labels_img = np.zeros((im_size, im_size), dtype=np.int32)
labels_img[np.tril_indices(im_size, 0)] = 1
M: int = depth_img.shape[0]
N: int = depth_img.shape[1]
# Compute dI/dx (horizontal derivative)
horizontal_depth_differences = depth_img[:-1] - depth_img[1:]
# Compute dI/dy (vertical derivative)
vertical_depth_differences = depth_img[:, :-1] - depth_img[:, 1:]
# The below code block assigns values for the orientation of every
# horizontally-oriented cut
# It creates a matrix to represent each of the horizontal cuts in the image
# graph (there will be (M-1, N) possible cuts)
# Assigning a cut of 1 points to the left and a cut of 2 points to the right.
# A cut of 0 indicates an OFF state
horizontal_oriented_cuts = np.zeros((M - 1, N))
horizontal_oriented_cuts[horizontal_depth_differences < 0] = 1
horizontal_oriented_cuts[horizontal_depth_differences > 0] = 2
# The below code block assigns values for the orientation of every
# vertically-oriented cut
# It creates a matrix to represent each of the vertical cuts in the image
# graph (there will be (M-1, N) possible cuts)
# Assigning a cut of 1 points up and a cut of 2 points down.
# A cut of 0 indicates an OFF state
vertical_oriented_cuts = np.zeros((M, N - 1))
vertical_oriented_cuts[vertical_depth_differences < 0] = 1
vertical_oriented_cuts[vertical_depth_differences > 0] = 2
gt_has_cuts = np.zeros((2, M, N))
gt_has_cuts[0, :-1] = horizontal_oriented_cuts
gt_has_cuts[1, :, :-1] = vertical_oriented_cuts
# Before constructing a Factor Graph, we specify all the valid configurations
# for the non-suppression factors
valid_configs_non_supp = np.array(
[
[0, 0, 0, 0],
[1, 0, 1, 0],
[2, 0, 2, 0],
[0, 0, 1, 1],
[0, 0, 2, 2],
[2, 0, 0, 1],
[1, 0, 0, 2],
[1, 0, 1, 1],
[2, 0, 2, 1],
[1, 0, 1, 2],
[2, 0, 2, 2],
[0, 1, 0, 1],
[1, 1, 0, 0],
[0, 1, 2, 0],
[1, 1, 0, 1],
[2, 1, 0, 1],
[0, 1, 1, 1],
[0, 1, 2, 1],
[1, 1, 1, 0],
[2, 1, 2, 0],
[0, 2, 0, 2],
[2, 2, 0, 0],
[0, 2, 1, 0],
[2, 2, 0, 2],
[1, 2, 0, 2],
[0, 2, 2, 2],
[0, 2, 1, 2],
[2, 2, 2, 0],
[1, 2, 1, 0],
]
)
valid_configs_non_supp.flags.writeable = False
# Now, we specify the valid configurations for all the suppression factors
SUPPRESSION_DIAMETER = 2
valid_configs_supp = create_valid_suppression_config_arr(SUPPRESSION_DIAMETER)
# We create a NDVarArray such that the [0,i,j] entry corresponds to the
# vertical cut variable (i.e, the one attached horizontally to the factor)
# at that location in the image, and the [1,i,j] entry corresponds to
# the horizontal cut variable (i.e, the one attached vertically to the factor)
# at that location.
grid_vars = vgroup.NDVarArray(shape=(2, M - 1, N - 1), num_states=3)
# Make a group of additional variables for the edges of the grid
extra_row_names: List[Tuple[Any, ...]] = [
(0, row, N - 1) for row in range(M - 1)
]
extra_col_names: List[Tuple[Any, ...]] = [
(1, M - 1, col) for col in range(N - 1)
]
additional_names = tuple(extra_row_names + extra_col_names)
additional_vars = vgroup.VarDict(
variable_names=additional_names, num_states=3
)
true_map_state_output = {
(grid_vars, (0, 0, 0)): 2,
(grid_vars, (0, 0, 1)): 0,
(grid_vars, (0, 1, 0)): 0,
(grid_vars, (0, 1, 1)): 2,
(grid_vars, (1, 0, 0)): 1,
(grid_vars, (1, 0, 1)): 0,
(grid_vars, (1, 1, 0)): 1,
(grid_vars, (1, 1, 1)): 0,
(additional_vars, (0, 0, 2)): 0,
(additional_vars, (0, 1, 2)): 2,
(additional_vars, (1, 2, 0)): 1,
(additional_vars, (1, 2, 1)): 0,
}
gt_has_cuts = gt_has_cuts.astype(np.int32)
# We use this array along with the gt_has_cuts array computed earlier
# in order to derive the evidence values
grid_evidence_arr = np.zeros((2, M - 1, N - 1, 3), dtype=float)
additional_vars_evidence_dict: Dict[Tuple[int, ...], np.ndarray] = {}
for i in range(2):
for row in range(M):
for col in range(N):
# The dictionary key is in composite_grid_group at loc [i,row,call]
# We know num states for each variable is 3
evidence_vals_arr = np.zeros(3)
# We assign belief value 2.0 to the correct index in the evidence vector
evidence_vals_arr[gt_has_cuts[i, row, col]] = 2.0
# This normalizes the evidence by subtracting the 0th index value
evidence_vals_arr = evidence_vals_arr - evidence_vals_arr[0]
# This adds logistic noise for every evidence entry
evidence_vals_arr[1:] += 0.1 * rng.logistic(
size=evidence_vals_arr[1:].shape
)
try:
_ = grid_vars[i, row, col]
grid_evidence_arr[i, row, col] = evidence_vals_arr
except IndexError:
try:
_ = additional_vars[i, row, col]
additional_vars_evidence_dict[i, row, col] = evidence_vals_arr
except ValueError:
pass
# Create the factor graph
fg = fgraph.FactorGraph(variable_groups=[grid_vars, additional_vars])
# Imperatively add EnumFactorGroups (each consisting of just one EnumFactor)
# to the graph!
for row in range(M - 1):
for col in range(N - 1):
if row != M - 2 and col != N - 2:
curr_vars = [
grid_vars[0, row, col],
grid_vars[1, row, col],
grid_vars[0, row, col + 1],
grid_vars[1, row + 1, col],
]
elif row != M - 2:
curr_vars = [
grid_vars[0, row, col],
grid_vars[1, row, col],
additional_vars[0, row, col + 1],
grid_vars[1, row + 1, col],
]
elif col != N - 2:
curr_vars = [
grid_vars[0, row, col],
grid_vars[1, row, col],
grid_vars[0, row, col + 1],
additional_vars[1, row + 1, col],
]
else:
curr_vars = [
grid_vars[0, row, col],
grid_vars[1, row, col],
additional_vars[0, row, col + 1],
additional_vars[1, row + 1, col],
]
if row % 2 == 0:
enum_factor = factor.EnumFactor(
variables=curr_vars,
factor_configs=valid_configs_non_supp,
log_potentials=np.zeros(
valid_configs_non_supp.shape[0], dtype=float
),
)
fg.add_factors(enum_factor)
else:
enum_factor = factor.EnumFactor(
variables=curr_vars,
factor_configs=valid_configs_non_supp,
log_potentials=np.zeros(
valid_configs_non_supp.shape[0], dtype=float
),
)
fg.add_factors(enum_factor)
# Create an EnumFactorGroup for vertical suppression factors
vert_suppression_vars: List[List[Tuple[Any, ...]]] = []
for col in range(N):
for start_row in range(M - SUPPRESSION_DIAMETER):
if col != N - 1:
vert_suppression_vars.append(
[
grid_vars[0, r, col]
for r in range(start_row, start_row + SUPPRESSION_DIAMETER)
]
)
else:
vert_suppression_vars.append(
[
additional_vars[0, r, col]
for r in range(start_row, start_row + SUPPRESSION_DIAMETER)
]
)
horz_suppression_vars: List[List[Tuple[Any, ...]]] = []
for row in range(M):
for start_col in range(N - SUPPRESSION_DIAMETER):
if row != M - 1:
horz_suppression_vars.append(
[
grid_vars[1, row, c]
for c in range(start_col, start_col + SUPPRESSION_DIAMETER)
]
)
else:
horz_suppression_vars.append(
[
additional_vars[1, row, c]
for c in range(start_col, start_col + SUPPRESSION_DIAMETER)
]
)
# Add the suppression factors to the graph via kwargs
factor_group = fgroup.EnumFactorGroup(
variables_for_factors=vert_suppression_vars,
factor_configs=valid_configs_supp,
)
fg.add_factors(factor_group)
factor_group = fgroup.EnumFactorGroup(
variables_for_factors=horz_suppression_vars,
factor_configs=valid_configs_supp,
log_potentials=np.zeros(valid_configs_supp.shape[0], dtype=float),
)
fg.add_factors(factor_group)
# Run BP
# Set the evidence
bp_state = fg.bp_state
assert np.all(
[
isinstance(jax.device_put(this_wiring), factor.Wiring)
for this_wiring in fg.fg_state.wiring.values()
]
)
bp_state.evidence[grid_vars] = grid_evidence_arr
bp_state.evidence[additional_vars] = additional_vars_evidence_dict
bp = infer.build_inferer(bp_state, backend="bp")
bp_arrays = bp.run(bp.init(), num_iters=100, temperature=0.0)
# Test that the output messages are close to the true messages
assert jnp.allclose(bp_arrays.ftov_msgs, true_final_msgs_output, atol=1e-06)
decoded_map_states = infer.decode_map_states(bp.get_beliefs(bp_arrays))
for name in true_map_state_output:
assert true_map_state_output[name] == decoded_map_states[name[0]][name[1]]
def test_e2e_heretic():
"""Test running BP in the "heretic" model."""
# Define some global constants
im_size = (30, 30)
# Instantiate all the Variables in the factor graph via VarGroups
pixel_vars = vgroup.NDVarArray(shape=im_size, num_states=3)
hidden_vars = vgroup.NDVarArray(
shape=(im_size[0] - 2, im_size[1] - 2), num_states=17
) # Each hidden var is connected to a 3x3 patch of pixel vars
bXn = np.zeros((30, 30, 3))
# Create the factor graph
fg = fgraph.FactorGraph([pixel_vars, hidden_vars])
def binary_connected_variables(
num_hidden_rows, num_hidden_cols, kernel_row, kernel_col
):
ret_list: List[List[Tuple[Any, ...]]] = []
for h_row in range(num_hidden_rows):
for h_col in range(num_hidden_cols):
ret_list.append(
[
hidden_vars[h_row, h_col],
pixel_vars[h_row + kernel_row, h_col + kernel_col],
]
)
return ret_list
W_pot = np.zeros((17, 3, 3, 3), dtype=float)
for k_row in range(3):
for k_col in range(3):
factor_group = fgroup.PairwiseFactorGroup(
variables_for_factors=binary_connected_variables(
28, 28, k_row, k_col
),
log_potential_matrix=W_pot[:, :, k_row, k_col],
)
fg.add_factors(factor_group)
# Assign evidence to pixel vars
bp_state = fg.bp_state
bp_state.evidence[pixel_vars] = np.array(bXn)
bp_state.evidence[pixel_vars[0, 0]] = np.array([0.0, 0.0, 0.0])
_ = bp_state.evidence[pixel_vars[0, 0]]
_ = bp_state.evidence[hidden_vars[0, 0]]
assert isinstance(bp_state.evidence.value, np.ndarray)
assert len(sum(fg.factors.values(), ())) == 7056
bp = infer.build_inferer(bp_state, backend="bp")
bp_arrays = bp.run(bp.init(), num_iters=1, temperature=1.0)
marginals = infer.get_marginals(bp.get_beliefs(bp_arrays))
assert jnp.allclose(jnp.sum(marginals[pixel_vars], axis=-1), 1.0)
| PGMax-main | tests/test_pgmax.py |
# Copyright 2022 Intrinsic Innovation LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
| PGMax-main | tests/__init__.py |
# Copyright 2023 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Test clipping behavior doesn't explode."""
import numpy as np
from pgmax import fgraph
from pgmax import fgroup
from pgmax import infer
from pgmax import vgroup
def test_divergent_system_decodes_correctly():
"""Test that we can still sanely decode variables once we hit MSG_NEG_INF.
Fails before the MSG_NEG_INF and NEG_INF split as it decodes to all 0,
but it should be all 1.
if NEG_INF is not sufficient negative compared to MSG_NEG_INF, then when the
messages from variables with states 0 to factors M_INC becomes lower than
NEG_INF, the outgoing messages to states 0 will be equal to NEG_INF - M_INC
which is a very large value. However these messages should be equal to M_INC.
"""
pg_vars = vgroup.NDVarArray(num_states=2, shape=(10,))
variables_for_factors = []
for i in range(10):
for j in range(i + 1, 10):
variables_for_factors.append([pg_vars[i], pg_vars[j]])
f = fgroup.EnumFactorGroup(
variables_for_factors=variables_for_factors,
factor_configs=np.array([[0, 0], [1, 1]]),
)
fg = fgraph.FactorGraph(pg_vars)
fg.add_factors(f)
bp = infer.BP(fg.bp_state)
bp_arrays = bp.init(
evidence_updates={pg_vars: np.transpose([np.zeros(10), np.ones(10)])}
)
bp_arrays2, _ = bp.run_with_diffs(bp_arrays, 100, damping=0)
beliefs = bp.get_beliefs(bp_arrays2)[pg_vars]
decoded = infer.decode_map_states(beliefs)
assert np.all(decoded == 1)
| PGMax-main | tests/test_clipping.py |
# Copyright 2022 Intrinsic Innovation LLC.c
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Test running inference with PGMax in some simple models."""
import numpy as np
from pgmax import fgraph
from pgmax import fgroup
from pgmax import infer
from pgmax import vgroup
def test_ising_model():
"""Runs inference in a small Ising model."""
variables = vgroup.NDVarArray(num_states=2, shape=(50, 50))
fg = fgraph.FactorGraph(variable_groups=variables)
variables_for_factors = []
for ii in range(50):
for jj in range(50):
kk = (ii + 1) % 50
ll = (jj + 1) % 50
variables_for_factors.append([variables[ii, jj], variables[kk, jj]])
variables_for_factors.append([variables[ii, jj], variables[ii, ll]])
factor_group = fgroup.PairwiseFactorGroup(
variables_for_factors=variables_for_factors,
log_potential_matrix=0.8 * np.array([[1.0, -1.0], [-1.0, 1.0]]),
)
fg.add_factors(factor_group)
# Run inference
bp = infer.build_inferer(fg.bp_state, backend="bp")
bp_arrays = bp.init(
evidence_updates={variables: np.random.gumbel(size=(50, 50, 2))}
)
# Get the initial energy
beliefs = bp.get_beliefs(bp_arrays)
map_states = infer.decode_map_states(beliefs)
init_energy = infer.compute_energy(fg.bp_state, bp_arrays, map_states)[0]
# Run BP
bp_arrays = bp.run(bp_arrays, num_iters=3000, temperature=0)
beliefs = bp.get_beliefs(bp_arrays)
map_states = infer.decode_map_states(beliefs)
assert map_states[variables].shape == (50, 50)
# Compute the energy of the decoding with and without debug mode
decoding_energy = infer.compute_energy(fg.bp_state, bp_arrays, map_states)[0]
decoding_energy_debug = infer.compute_energy(
fg.bp_state, bp_arrays, map_states, debug_mode=True
)[0]
assert np.allclose(decoding_energy, decoding_energy_debug, atol=5e-6)
assert decoding_energy <= init_energy - 1500
| PGMax-main | tests/test_examples.py |
# Copyright 2023 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Test the primal LP-MAP solver."""
import numpy as np
from pgmax import fgraph
from pgmax import fgroup
from pgmax import infer
from pgmax import vgroup
from pgmax.utils import primal_lp
def test_lp_primal_ising(grid_size=4, n_iters=5):
"""Test the primal LP-MAP solver on a fully connected Ising model."""
np.random.seed(0)
gt_visible = np.array([
[
0.0,
1.0,
0.0,
0.0,
1.0,
1.0,
0.0,
0.0,
0.0,
0.0,
1.0,
0.0,
0.0,
0.0,
1.0,
0.0,
],
[
0.0,
0.0,
0.0,
1.0,
0.0,
1.0,
0.0,
1.0,
0.0,
1.0,
1.0,
1.0,
1.0,
1.0,
1.0,
1.0,
],
[
0.0,
1.0,
1.0,
1.0,
1.0,
1.0,
1.0,
1.0,
0.0,
0.0,
1.0,
1.0,
0.0,
0.0,
0.0,
1.0,
],
[
1.0,
1.0,
1.0,
0.0,
0.0,
1.0,
0.0,
0.0,
1.0,
1.0,
1.0,
1.0,
1.0,
1.0,
0.0,
0.0,
],
[
1.0,
1.0,
0.0,
0.0,
0.0,
1.0,
1.0,
0.0,
1.0,
0.0,
0.0,
0.0,
0.0,
0.0,
1.0,
1.0,
],
])
gt_objvals = np.array(
[14.64071274, 21.893808, 25.80109529, 20.0094238, 16.02391594]
)
# Create a fully connected Ising model
variables = vgroup.NDVarArray(num_states=2, shape=(grid_size, grid_size))
fg = fgraph.FactorGraph(variable_groups=variables)
variables_for_factors = []
for ii in range(grid_size):
for jj in range(grid_size):
for kk in range(ii + 1, grid_size):
for ll in range(jj + 1, grid_size):
variables_for_factors.append([variables[ii, jj], variables[kk, ll]])
factor_group = fgroup.PairwiseFactorGroup(
variables_for_factors=variables_for_factors,
)
fg.add_factors(factor_group)
for it in range(n_iters):
# Evidence array
evidence_updates = {
variables: np.random.gumbel(size=(grid_size, grid_size, 2))
}
# Solve with cvxpy
cvxpy_lp_vgroups_solution, cvxpy_lp_objval = primal_lp.primal_lp_solver(
fg, evidence_updates
)
cvxpy_map_states = infer.decode_map_states(cvxpy_lp_vgroups_solution)
assert np.allclose(
cvxpy_map_states[variables].flatten(), gt_visible[it], atol=1e-6
)
assert np.allclose(cvxpy_lp_objval, gt_objvals[it], atol=1e-6)
def test_lp_primal_rbm(n_hidden=8, n_visible=12, n_iters=5):
"""Test the primal LP-MAP solver on a RBM."""
np.random.seed(0)
gt_hidden = np.array([
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0],
[1.0, 1.0, 0.0, 1.0, 1.0, 0.0, 1.0, 1.0],
[0.0, 0.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 1.0],
[1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 0.0],
])
gt_visible = np.array([
[1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 0.0],
[0.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 1.0, 0.0, 0.0, 0.0, 0.0],
[0.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 1.0, 1.0],
[0.0, 0.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0],
[0.0, 1.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 1.0, 1.0],
])
gt_objvals = np.array(
[31.16162399, 26.15338596, 27.35837594, 23.34839416, 32.82615209]
)
# Rescaling helps making the LP relaxation tight
bh = 0.1 * np.random.normal(size=(n_hidden,))
bv = 0.1 * np.random.normal(size=(n_visible,))
# pylint: disable=invalid-name
W = 0.1 * np.random.normal(size=(n_hidden, n_visible))
# Initialize factor graph
hidden_variables = vgroup.NDVarArray(num_states=2, shape=bh.shape)
visible_variables = vgroup.NDVarArray(num_states=2, shape=bv.shape)
fg = fgraph.FactorGraph(variable_groups=[hidden_variables, visible_variables])
# Create unary factors
hidden_unaries = fgroup.EnumFactorGroup(
variables_for_factors=[
[hidden_variables[ii]] for ii in range(bh.shape[0])
],
factor_configs=np.arange(2)[:, None],
log_potentials=np.stack([np.zeros_like(bh), bh], axis=1),
)
visible_unaries = fgroup.EnumFactorGroup(
variables_for_factors=[
[visible_variables[jj]] for jj in range(bv.shape[0])
],
factor_configs=np.arange(2)[:, None],
log_potentials=np.stack([np.zeros_like(bv), bv], axis=1),
)
# Create pairwise factors
log_potential_matrix = np.zeros(W.shape + (2, 2)).reshape((-1, 2, 2))
log_potential_matrix[:, 1, 1] = W.ravel()
# pylint: disable=g-complex-comprehension
variables_for_factors = [
[hidden_variables[ii], visible_variables[jj]]
for ii in range(bh.shape[0])
for jj in range(bv.shape[0])
]
pairwise_factors = fgroup.PairwiseFactorGroup(
variables_for_factors=variables_for_factors,
log_potential_matrix=log_potential_matrix,
)
# Add factors to the FactorGraph
fg.add_factors([hidden_unaries, visible_unaries, pairwise_factors])
# Evidence array
for it in range(n_iters):
evidence_updates = {
hidden_variables: np.random.gumbel(size=(bh.shape[0], 2)),
visible_variables: np.random.gumbel(size=(bv.shape[0], 2)),
}
# Solve with cvxpy
cvxpy_lp_vgroups_solution, cvxpy_lp_objval = primal_lp.primal_lp_solver(
fg, evidence_updates
)
cvxpy_map_states = infer.decode_map_states(cvxpy_lp_vgroups_solution)
cvxpy_visible = cvxpy_map_states[visible_variables].flatten()
cvxpy_hidden = cvxpy_map_states[hidden_variables].flatten()
assert np.allclose(cvxpy_hidden, gt_hidden[it], atol=1e-6)
assert np.allclose(cvxpy_visible, gt_visible[it], atol=1e-6)
assert np.allclose(cvxpy_lp_objval, gt_objvals[it], atol=1e-6)
| PGMax-main | tests/lp/test_primal_lp.py |
# Copyright 2023 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Test the Smooth Dual LP-MAP solver for different factor types."""
import numpy as np
from pgmax import fgraph
from pgmax import fgroup
from pgmax import infer
from pgmax import vgroup
from pgmax.utils import primal_lp
import pytest
RTOL = 5e-3
def test_lp_primal_vs_dual_ising():
"""Test the LP-MAP primal and dual solvers on a fully connected Ising model with 3 states, when LP relaxation is tight.
Both the accelerated gradient descent (with log sum-exp temperature > 0)
and subgradient descent (with log sum-exp temperature = 0) are tested.
At each iteration, compare the optimal objective value returned by the
primal solver, with the upper and lower bounds returned by the dual solver
"""
grid_size = 4
num_states = 3
n_iters = 10
# Create a fully connected categorical Ising model
variables = vgroup.NDVarArray(
num_states=num_states, shape=(grid_size, grid_size)
)
fg = fgraph.FactorGraph(variable_groups=variables)
variables_for_factors = []
for ii in range(grid_size):
for jj in range(grid_size):
for kk in range(ii + 1, grid_size):
for ll in range(jj + 1, grid_size):
variables_for_factors.append([variables[ii, jj], variables[kk, ll]])
factor_group = fgroup.PairwiseFactorGroup(
variables_for_factors=variables_for_factors,
log_potential_matrix=0.01 # rescaling makes the test more robust
* np.random.normal(
size=(len(variables_for_factors), num_states, num_states)
),
)
fg.add_factors(factor_group)
# Create the dual LP-MAP solver
sdlp = infer.build_inferer(fg.bp_state, backend="sdlp")
sdlp_arrays = sdlp.init()
with pytest.raises(
ValueError,
match=(
"The log sum-exp temperature of the Dual LP-MAP solver has to be"
" between 0.0 and 1.0"
),
):
sdlp_arrays = sdlp.run(
sdlp_arrays,
logsumexp_temp=1.01,
num_iters=1,
)
with pytest.raises(
ValueError,
match=(
"For gradient descent, the learning rate must be smaller than the"
" log sum-exp temperature."
),
):
sdlp_arrays = sdlp.run(
sdlp_arrays, logsumexp_temp=0.01, num_iters=1, lr=0.1
)
for it in range(n_iters):
np.random.seed(it)
if it % 2 == 0:
logsumexp_temp = 1e-3
else:
logsumexp_temp = 0.0
# Evidence updates
evidence_updates = {
variables: np.random.gumbel(size=(grid_size, grid_size, num_states))
}
# Solve the primal LP-MAP
_, cvxpy_lp_objval = primal_lp.primal_lp_solver(fg, evidence_updates)
# Solve the Smooth Dual LP-MAP
sdlp_arrays = sdlp.init(evidence_updates=evidence_updates)
sdlp_arrays = sdlp.run(
sdlp_arrays,
logsumexp_temp=logsumexp_temp,
lr=None,
num_iters=5_000,
)
sdlp_unaries_decoded, _ = sdlp.decode_primal_unaries(sdlp_arrays)
# Compare the upper and lower bounds of the primal problem (obtained
# from the dual LP-MAP solution) to the optimal objective value
primal_upper_bound = sdlp.get_primal_upper_bound(sdlp_arrays)
primal_lower_bound = sdlp.get_map_lower_bound(
sdlp_arrays, sdlp_unaries_decoded
)
assert np.isclose(cvxpy_lp_objval, primal_upper_bound, rtol=RTOL)
assert np.isclose(primal_lower_bound, primal_upper_bound, rtol=RTOL)
# Also test that both standard and debug modes return the same lower bound
primal_lower_bound_debug = sdlp.get_map_lower_bound(
sdlp_arrays, sdlp_unaries_decoded, debug_mode=True
)
assert np.isclose(primal_lower_bound, primal_lower_bound_debug)
# pylint: disable=invalid-name
def test_lp_primal_vs_dual_line_sparsification():
"""Test the LP-MAP primal and dual solvers to sparsify a line, when the LP relaxation is tight.
The FactorGraph uses ORFactors to sparsify a line: the line is represented
by the bottom variables, while the sparse representation is represented by
the top variables. Each bottom variable is active and can be explained
by each one of its 3 closest top variables.
At each iteration, compare
(1) The decodings returned by both solvers
(2) The optimal objective value returned by the primal solver, with the upper
and lower bounds returned by the dual solver
"""
line_length = 20
n_iters = 10
# Create the FactorGraph
top_variables = vgroup.NDVarArray(num_states=2, shape=(line_length,))
bottom_variables = vgroup.NDVarArray(num_states=2, shape=(line_length,))
fg = fgraph.FactorGraph(variable_groups=[top_variables, bottom_variables])
# Add ORFactors to the graph
variables_for_OR_factors = []
for factor_idx in range(line_length):
variables_for_OR_factor = [top_variables[factor_idx]]
if factor_idx >= 1:
variables_for_OR_factor.append(top_variables[factor_idx - 1])
if factor_idx <= line_length - 2:
variables_for_OR_factor.append(top_variables[factor_idx + 1])
# Add child variable at the last position
variables_for_OR_factor.append(bottom_variables[factor_idx])
variables_for_OR_factors.append(variables_for_OR_factor)
factor_group = fgroup.ORFactorGroup(variables_for_OR_factors)
fg.add_factors(factor_group)
# Evidence update: bottom variables are all turned ON
bottom_variables_evidence = np.zeros((line_length, 2))
bottom_variables_evidence[..., 0] = -10_000
# Top variables are more likely to be turned OFF
top_variables_evidence = np.zeros((line_length, 2))
top_variables_evidence[..., 1] = -100
# Create the dual LP-MAP solver
sdlp = infer.build_inferer(fg.bp_state, backend="sdlp")
for it in range(n_iters):
np.random.seed(it)
logsumexp_temp = 1e-3
lr = None
# Add Gumbel noise to the updates
top_variables_add = np.random.gumbel(size=top_variables_evidence.shape)
evidence_updates = {
top_variables: top_variables_evidence + top_variables_add,
bottom_variables: bottom_variables_evidence,
}
# Solve the primal LP-MAP
cvxpy_lp_vgroups_solution, cvxpy_lp_objval = primal_lp.primal_lp_solver(
fg, evidence_updates
)
cvxpy_map_states = infer.decode_map_states(cvxpy_lp_vgroups_solution)
# Solve the smooth dual LP-MAP
sdlp_arrays = sdlp.init(evidence_updates=evidence_updates)
sdlp_arrays = sdlp.run(
sdlp_arrays,
logsumexp_temp=logsumexp_temp,
lr=lr,
num_iters=5_000,
)
sdlp_unaries_decoded, _ = sdlp.decode_primal_unaries(sdlp_arrays)
# First compare the unaries decoded from the dual to the optimal LP solution
assert np.allclose(
sdlp_unaries_decoded[top_variables], cvxpy_map_states[top_variables]
)
assert sdlp_unaries_decoded[top_variables].sum() == (line_length + 3) // 3
# Second compare the upper and lower bounds of the primal problem (obtained
# from the dual LP-MAP solution) to the optimal objective value
primal_upper_bound = sdlp.get_primal_upper_bound(sdlp_arrays)
primal_lower_bound = sdlp.get_map_lower_bound(
sdlp_arrays, sdlp_unaries_decoded
)
assert np.isclose(cvxpy_lp_objval, primal_upper_bound, rtol=RTOL)
assert np.isclose(primal_lower_bound, primal_upper_bound, rtol=RTOL)
# Also test that both standard and debug modes return the same lower bound
primal_lower_bound_debug = sdlp.get_map_lower_bound(
sdlp_arrays, sdlp_unaries_decoded, debug_mode=True
)
assert np.isclose(primal_lower_bound, primal_lower_bound_debug)
def test_lp_primal_vs_dual_and_factors():
"""Test the LP-MAP primal and dual solvers on a model with ANDFactors, when the LP relaxation is tight.
The FactorGraph uses ANDFactors to finds the positions at which all columns
of a matrix are filled with 1s
At each iteration, compare
(1) The decodings returned by both solvers
(2) The optimal objective value returned by the primal solver, with the upper
and lower bounds returned by the dual solver
"""
num_rows = 10
num_cols = 5
p_on = 0.8
n_iters = 5
# Create the FactorGraph
matrix = vgroup.NDVarArray(num_states=2, shape=(num_rows, num_cols))
all_ones = vgroup.NDVarArray(num_states=2, shape=(num_rows,))
fg = fgraph.FactorGraph(variable_groups=[matrix, all_ones])
# Add ANDFactors to the graph
variables_for_AND_factors = []
for factor_idx in range(num_rows):
variables_for_AND_factor = matrix[factor_idx]
variables_for_AND_factor.append(all_ones[factor_idx])
variables_for_AND_factors.append(variables_for_AND_factor)
factor_group = fgroup.ANDFactorGroup(variables_for_AND_factors)
fg.add_factors(factor_group)
# Create the dual LP-MAP solver
sdlp = infer.build_inferer(fg.bp_state, backend="sdlp")
for it in range(n_iters):
np.random.seed(it)
logsumexp_temp = 1e-3
# Evidence update: bottom variables are all turned ON
matrix_obs = np.random.binomial(1, p_on, (num_rows, num_cols))
matrix_evidence = np.zeros((num_rows, num_cols, 2))
matrix_evidence[..., 1] = 1_000 * (2 * matrix_obs - 1)
evidence_updates = {matrix: matrix_evidence}
# Ground truth solution
gt_all_ones = np.all(matrix_obs, axis=1).astype(int)
# Solve the primal LP-MAP
cvxpy_lp_vgroups_solution, cvxpy_lp_objval = primal_lp.primal_lp_solver(
fg, evidence_updates
)
cvxpy_map_states = infer.decode_map_states(cvxpy_lp_vgroups_solution)
# Solve the smooth dual LP-MAP
sdlp_arrays = sdlp.init(evidence_updates=evidence_updates)
sdlp_arrays = sdlp.run(
sdlp_arrays,
logsumexp_temp=logsumexp_temp,
lr=None,
num_iters=5_000,
)
sdlp_unaries_decoded, _ = sdlp.decode_primal_unaries(sdlp_arrays)
# First compare the unaries decoded from the dual to the optimal LP solution
assert np.allclose(cvxpy_map_states[all_ones], gt_all_ones)
assert np.allclose(sdlp_unaries_decoded[all_ones], gt_all_ones)
# Second compare the upper and lower bounds of the primal problem (obtained
# from the dual LP-MAP solution) to the optimal objective value
primal_upper_bound = sdlp.get_primal_upper_bound(sdlp_arrays)
primal_lower_bound = sdlp.get_map_lower_bound(
sdlp_arrays, sdlp_unaries_decoded
)
assert np.isclose(cvxpy_lp_objval, primal_upper_bound, rtol=RTOL)
assert np.isclose(primal_lower_bound, primal_upper_bound, rtol=RTOL)
print(primal_lower_bound, primal_upper_bound)
def test_lp_primal_vs_dual_pool_factors():
"""Test the LP-MAP primal and dual solvers on a model with PoolFactors, when the LP relaxation is tight.
The factor graph uses a hierarchy of pool factors of depth n_layers, where
the n_th layer contains 2^n pool variables.
The unique pool variable at level 1 is forced to be ON.
Each pool variable at level n > 1 is
(1) a pool choice in a single pool involving a pool indicator at level
n - 1 and another pool choice at level n
(2) a pool indicator in a single pool involving 2 pools choices at level n
At each iteration, compare
(1) The decodings returned by both solvers
(2) The optimal objective value returned by the primal solver, with the upper
and lower bounds returned by the dual solver
"""
n_layers = 4
n_pool_choices_by_pool = 2
n_iters = 10
n_pool_choices_by_layers = [
n_pool_choices_by_pool**idx_layer for idx_layer in range(n_layers)
]
cumsum_variables = np.insert(np.cumsum(n_pool_choices_by_layers), 0, 0)
variables = vgroup.NDVarArray(num_states=2, shape=(cumsum_variables[-1],))
fg = fgraph.FactorGraph(variable_groups=[variables])
variables_for_PoolFactors = []
for idx_pool_layer in range(n_layers - 1):
pool_choices_indices_start = cumsum_variables[idx_pool_layer + 1]
for pool_indicator_idx in range(
cumsum_variables[idx_pool_layer], cumsum_variables[idx_pool_layer + 1]
):
variables_for_PoolFactor = [
variables[pool_choices_indices_start + pool_choice_idx]
for pool_choice_idx in range(n_pool_choices_by_pool)
] + [variables[pool_indicator_idx]]
pool_choices_indices_start += n_pool_choices_by_pool
variables_for_PoolFactors.append(variables_for_PoolFactor)
factor_group = fgroup.PoolFactorGroup(variables_for_PoolFactors)
fg.add_factors(factor_group)
# Create the dual LP-MAP solver
sdlp = infer.build_inferer(fg.bp_state, backend="sdlp")
for it in range(n_iters):
np.random.seed(it)
logsumexp_temp = 1e-3
lr = None
# Evidence updates
updates = np.random.gumbel(size=(variables.shape[0], 2))
updates[0, 1] = 1_000
evidence_updates = {variables: updates}
# Solve the primal LP-MAP
cvxpy_lp_vgroups_solution, cvxpy_lp_objval = primal_lp.primal_lp_solver(
fg, evidence_updates
)
cvxpy_map_states = infer.decode_map_states(cvxpy_lp_vgroups_solution)
# Solve the smooth dual LP-MAP
sdlp_arrays = sdlp.init(evidence_updates=evidence_updates)
sdlp_arrays = sdlp.run(
sdlp_arrays,
logsumexp_temp=logsumexp_temp,
lr=lr,
num_iters=5_000,
)
sdlp_unaries_decoded, _ = sdlp.decode_primal_unaries(sdlp_arrays)
# First compare the unaries decoded from the dual to the optimal LP solution
assert np.allclose(
sdlp_unaries_decoded[variables], cvxpy_map_states[variables]
)
assert sdlp_unaries_decoded[variables].sum() == n_layers
# Second compare the upper and lower bounds of the primal problem (obtained
# from the dual LP-MAP solution) to the optimal objective value
primal_upper_bound = sdlp.get_primal_upper_bound(sdlp_arrays)
primal_lower_bound = sdlp.get_map_lower_bound(
sdlp_arrays, sdlp_unaries_decoded
)
assert np.isclose(cvxpy_lp_objval, primal_upper_bound, rtol=RTOL)
assert np.isclose(primal_lower_bound, primal_upper_bound, rtol=RTOL)
# Also test that both standard and debug modes return the same lower bound
primal_lower_bound_debug = sdlp.get_map_lower_bound(
sdlp_arrays, sdlp_unaries_decoded, debug_mode=True
)
assert np.isclose(primal_lower_bound, primal_lower_bound_debug, rtol=RTOL)
| PGMax-main | tests/lp/test_dual_lp.py |
# Copyright 2023 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Test the convergence of the BP updates at very low temperature to the max-product updates, as well as their consistency across edges."""
import numpy as np
from pgmax import factor
from pgmax import fgraph
from pgmax import fgroup
from pgmax import infer
from pgmax import vgroup
ATOL = 1e-5
def test_convergence_consistency_enum_factors():
"""Test the convergence and consistency of the BP updates for EnumFactors, for an Ising model with categorical variables.
Simultaneously test that
(1) the BP updates at very low temperature are close to the max-product
updates for T=0
(2) for each factor, the logsumexp of the outgoing messages messages over all
its configs is constant, when evaluated at each edge connected to this factor
"""
num_states = 3
grid_size = 4
n_iters = 10
# Create an Ising model
variables = vgroup.NDVarArray(
num_states=num_states, shape=(grid_size, grid_size)
)
fg = fgraph.FactorGraph(variable_groups=variables)
variables_for_factors = []
for ii in range(grid_size):
for jj in range(grid_size):
for kk in range(ii + 1, grid_size):
for ll in range(jj + 1, grid_size):
variables_for_factors.append([variables[ii, jj], variables[kk, ll]])
factor_group = fgroup.PairwiseFactorGroup(
variables_for_factors=variables_for_factors,
log_potential_matrix=np.random.normal(
size=(len(variables_for_factors), num_states, num_states)
),
)
fg.add_factors(factor_group)
for idx in range(n_iters):
np.random.seed(idx)
# Evidence updates
evidence_updates = {
variables: np.random.gumbel(size=(grid_size, grid_size, num_states))
}
# Test for different very low temperature
if idx % 2 == 0:
temperature = 1e-3
else:
temperature = 0.01
# Create the dual solver
sdlp = infer.build_inferer(fg.bp_state, backend="sdlp")
# Messages are randomly initialized
ftov_msgs_updates = {
factor.EnumFactor: np.random.normal(
size=fg.bp_state.ftov_msgs.value.shape
)
}
sdlp_arrays = sdlp.init(
evidence_updates=evidence_updates, ftov_msgs_updates=ftov_msgs_updates
)
# Max-product updates
(
bp_updates_t0,
maxes_fac_configs_t0,
) = sdlp.get_bp_updates(sdlp_arrays=sdlp_arrays, logsumexp_temp=0.0)
# BP with low temperature updates
(
bp_updates_t_small,
logsumexp_fac_configs_t_small,
) = sdlp.get_bp_updates(sdlp_arrays=sdlp_arrays, logsumexp_temp=temperature)
# Check that the updates are close
assert np.allclose(bp_updates_t0, bp_updates_t_small, atol=temperature)
# Map factor indices to edge indices
inferer_context = infer.inferer.InfererContext(fg.bp_state)
factor_to_edges = {}
for edge_idx, factor_idx in zip(
inferer_context.edge_indices_for_edge_states,
inferer_context.factor_indices_for_edge_states,
):
if factor_idx not in factor_to_edges:
factor_to_edges[factor_idx] = [edge_idx]
else:
if edge_idx not in factor_to_edges[factor_idx]:
factor_to_edges[factor_idx].append(edge_idx)
# Check that the maxes and logsumexp of the outgoing messages over all the
# factor configs is the same, when evaluated at each edge connected to
# the same factor
for edge_indices in factor_to_edges.values():
edge_indices = np.array(edge_indices)
# Check that maxes_fac_configs are the same at each edge of a factor
maxes_fac_configs_min = maxes_fac_configs_t0[edge_indices].min()
maxes_fac_configs_max = maxes_fac_configs_t0[edge_indices].max()
assert np.allclose(
maxes_fac_configs_min, maxes_fac_configs_max, atol=ATOL
)
# Check that logsumexp_fac_configs are the same at each edge of a factor
logsumexp_fac_configs_min = logsumexp_fac_configs_t_small[
edge_indices
].min()
logsumexp_fac_configs_max = logsumexp_fac_configs_t_small[
edge_indices
].max()
assert np.allclose(
logsumexp_fac_configs_min, logsumexp_fac_configs_max, atol=ATOL
)
def test_convergence_consistency_or_factors():
"""Test the convergence and consistency of the BP updates for ORFactors.
The FactorGraph uses ORFactors to sparsify a line: the line is represented
by the bottom variables, while the sparse representation is represented by
the top variables. Each bottom variable is active and can be explained
by each one of its 3 closest top variables.
Simultaneously test that
(1) the BP updates at very low temperature are close to the max-product
updates for T=0
(2) for each factor, the logsumexp of the outgoing messages messages over all
its configs is constant, when evaluated at each edge connected to this factor
"""
line_length = 20
n_iters = 10
# Create the FactorGraph
top_variables = vgroup.NDVarArray(num_states=2, shape=(line_length,))
bottom_variables = vgroup.NDVarArray(num_states=2, shape=(line_length,))
fg = fgraph.FactorGraph(variable_groups=[top_variables, bottom_variables])
# Add ORFactors to the graph
variables_for_or_factors = []
for factor_idx in range(line_length):
variables_for_or_factor = [top_variables[factor_idx]]
if factor_idx >= 1:
variables_for_or_factor.append(top_variables[factor_idx - 1])
if factor_idx <= line_length - 2:
variables_for_or_factor.append(top_variables[factor_idx + 1])
# Add child variable at the last position
variables_for_or_factor.append(bottom_variables[factor_idx])
variables_for_or_factors.append(variables_for_or_factor)
factor_group = fgroup.ORFactorGroup(variables_for_or_factors)
fg.add_factors(factor_group)
# Evidence update: bottom variables are all turned ON
bottom_variables_evidence = np.zeros((line_length, 2))
bottom_variables_evidence[..., 0] = -10_000
# Top variables are more likely to be turned OFF
top_variables_evidence = np.zeros((line_length, 2))
top_variables_evidence[..., 1] = -100
# Create the dual solver
sdlp = infer.build_inferer(fg.bp_state, backend="sdlp")
for idx in range(n_iters):
np.random.seed(idx)
# Test for different very low temperature
if idx % 2 == 0:
temperature = 1e-3
else:
temperature = 0.01
# Add Gumbel noise to the evidence updates
top_variables_add = np.random.gumbel(size=top_variables_evidence.shape)
evidence_updates = {
top_variables: top_variables_evidence + top_variables_add,
bottom_variables: bottom_variables_evidence,
}
# Create the dual solver
sdlp = infer.build_inferer(fg.bp_state, backend="sdlp")
# Messages are randomly initialized
ftov_msgs_updates = {
factor.ORFactor: np.random.normal(
size=fg.bp_state.ftov_msgs.value.shape
)
}
sdlp_arrays = sdlp.init(
evidence_updates=evidence_updates, ftov_msgs_updates=ftov_msgs_updates
)
# Max-product updates
(
bp_updates_t0,
maxes_fac_configs_t0,
) = sdlp.get_bp_updates(sdlp_arrays=sdlp_arrays, logsumexp_temp=0.0)
# BP with low temperature updates
(
bp_updates_t_small,
logsumexp_fac_configs_t_small,
) = sdlp.get_bp_updates(sdlp_arrays=sdlp_arrays, logsumexp_temp=temperature)
# Check that the updates are close
assert np.allclose(bp_updates_t0, bp_updates_t_small, atol=temperature)
# Map factor indices to edge indices
inferer_context = infer.inferer.InfererContext(fg.bp_state)
factor_to_edges = {}
for edge_idx, factor_idx in zip(
inferer_context.edge_indices_for_edge_states,
inferer_context.factor_indices_for_edge_states,
):
if factor_idx not in factor_to_edges:
factor_to_edges[factor_idx] = [edge_idx]
else:
if edge_idx not in factor_to_edges[factor_idx]:
factor_to_edges[factor_idx].append(edge_idx)
# Check that the maxes logsumexp of the outgoing messages over all the
# factor configs is the same, when evaluated at each edge connected to
# a same factor
for edge_indices in factor_to_edges.values():
edge_indices = np.array(edge_indices)
# Check that maxes_fac_configs are the same at each edge of a factor
maxes_fac_configs_min = maxes_fac_configs_t0[edge_indices].min()
maxes_fac_configs_max = maxes_fac_configs_t0[edge_indices].max()
assert np.allclose(
maxes_fac_configs_min, maxes_fac_configs_max, atol=ATOL
)
# Check that logsumexp_fac_configs are the same at each edge of a factor
logsumexp_fac_configs_min = logsumexp_fac_configs_t_small[
edge_indices
].min()
logsumexp_fac_configs_max = logsumexp_fac_configs_t_small[
edge_indices
].max()
assert np.allclose(
logsumexp_fac_configs_min, logsumexp_fac_configs_max, atol=ATOL
)
def test_convergence_consistency_pool_factors():
"""Test the convergence and consistency of the BP updates for PoolFactors.
The factor graph uses a hierarchy of pool factors of depth n_layers, where
the n_th layer contains 2^n pool variable.
The unique pool variable at level 1 is forced to be ON.
Each pool variable at level n > 1 is
(1) a pool choice in one pool also involving a pool indicator at level n - 1
and another pool choice at level n
(2) a pool indicator in one pool also involving 2 pools choices at level n
Simultaneously test that
(1) the BP updates at very low temperature are close to the max-product
updates for T=0
(2) for each factor, the logsumexp of the outgoing messages messages over all
its configs is constant, when evaluated at each edge connected to this factor
"""
n_layers = 4
n_pool_choices_by_pool = 2
n_iters = 10
n_pool_choices_by_layers = [
n_pool_choices_by_pool**idx_layer for idx_layer in range(n_layers)
]
cumsum_variables = np.insert(np.cumsum(n_pool_choices_by_layers), 0, 0)
variables = vgroup.NDVarArray(num_states=2, shape=(cumsum_variables[-1],))
fg = fgraph.FactorGraph(variable_groups=[variables])
variables_for_pool_factors = []
for idx_pool_layer in range(n_layers - 1):
pool_choices_indices_start = cumsum_variables[idx_pool_layer + 1]
for pool_indicator_idx in range(
cumsum_variables[idx_pool_layer], cumsum_variables[idx_pool_layer + 1]
):
variables_for_pool_factor = [
variables[pool_choices_indices_start + pool_choice_idx]
for pool_choice_idx in range(n_pool_choices_by_pool)
] + [variables[pool_indicator_idx]]
pool_choices_indices_start += n_pool_choices_by_pool
variables_for_pool_factors.append(variables_for_pool_factor)
factor_group = fgroup.PoolFactorGroup(variables_for_pool_factors)
fg.add_factors(factor_group)
for idx in range(n_iters):
np.random.seed(idx)
if idx % 2 == 0:
temperature = 1e-3
else:
temperature = 0.01
# Evidence update
updates = np.random.gumbel(size=(variables.shape[0], 2))
updates[0, 1] = 10
evidence_updates = {variables: updates}
# Create the dual solver
sdlp = infer.build_inferer(fg.bp_state, backend="sdlp")
# Messages are randomly initialized
ftov_msgs_updates = {
factor.PoolFactor: np.random.normal(
size=fg.bp_state.ftov_msgs.value.shape
)
}
sdlp_arrays = sdlp.init(
evidence_updates=evidence_updates, ftov_msgs_updates=ftov_msgs_updates
)
# Max-product updates
(
bp_updates_t0,
maxes_fac_configs_t0,
) = sdlp.get_bp_updates(sdlp_arrays=sdlp_arrays, logsumexp_temp=0.0)
# BP with low temperature updates
(
bp_updates_t_small,
logsumexp_fac_configs_t_small,
) = sdlp.get_bp_updates(sdlp_arrays=sdlp_arrays, logsumexp_temp=temperature)
# Check that the updates are close
assert np.allclose(bp_updates_t0, bp_updates_t_small, atol=temperature)
# Map factor indices to edge indices
inferer_context = infer.inferer.InfererContext(fg.bp_state)
factor_to_edges = {}
for edge_idx, factor_idx in zip(
inferer_context.edge_indices_for_edge_states,
inferer_context.factor_indices_for_edge_states,
):
if factor_idx not in factor_to_edges:
factor_to_edges[factor_idx] = [edge_idx]
else:
if edge_idx not in factor_to_edges[factor_idx]:
factor_to_edges[factor_idx].append(edge_idx)
# Check that the maxes and logsumexp of the outgoing messages over all the
# factor configs is the same, when evaluated at each edge connected to
# a same factor
for edge_indices in factor_to_edges.values():
edge_indices = np.array(edge_indices)
# Check that maxes_fac_configs are the same at each edge of a factor
maxes_fac_configs_min = maxes_fac_configs_t0[edge_indices].min()
maxes_fac_configs_max = maxes_fac_configs_t0[edge_indices].max()
assert np.allclose(
maxes_fac_configs_min, maxes_fac_configs_max, atol=ATOL
)
# Check that logsumexp_fac_configs are the same at each edge of a factor
logsumexp_fac_configs_min = logsumexp_fac_configs_t_small[
edge_indices
].min()
logsumexp_fac_configs_max = logsumexp_fac_configs_t_small[
edge_indices
].max()
assert np.allclose(
logsumexp_fac_configs_min, logsumexp_fac_configs_max, atol=ATOL
)
| PGMax-main | tests/lp/test_bp_for_lp.py |
# Copyright 2022 Intrinsic Innovation LLC.
# Copyright 2022 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Test the correct implementation of the different factors."""
import re
import numpy as np
from pgmax import factor
from pgmax import vgroup
import pytest
def test_enumeration_factor():
"""Test the correct implementation of enumeration factors."""
variables = vgroup.NDVarArray(num_states=3, shape=(1,))
with pytest.raises(
NotImplementedError,
match="Please implement compile_wiring in for your factor",
):
factor.Factor(
variables=[variables[0]],
log_potentials=np.array([0.0]),
)
with pytest.raises(
ValueError, match="Configurations should be integers. Got"
):
factor.EnumFactor(
variables=[variables[0]],
factor_configs=np.array([[1.0]]),
log_potentials=np.array([0.0]),
)
with pytest.raises(ValueError, match="Potential should be floats. Got"):
factor.EnumFactor(
variables=[variables[0]],
factor_configs=np.array([[1]]),
log_potentials=np.array([0]),
)
with pytest.raises(ValueError, match="factor_configs should be a 2D array"):
factor.EnumFactor(
variables=[variables[0]],
factor_configs=np.array([1]),
log_potentials=np.array([0.0]),
)
with pytest.raises(
ValueError,
match=re.escape(
"Number of variables 1 doesn't match given configurations (1, 2)"
),
):
factor.EnumFactor(
variables=[variables[0]],
factor_configs=np.array([[1, 2]]),
log_potentials=np.array([0.0]),
)
with pytest.raises(
ValueError, match=re.escape("Expected log potentials of shape (1,)")
):
factor.EnumFactor(
variables=[variables[0]],
factor_configs=np.array([[1]]),
log_potentials=np.array([0.0, 1.0]),
)
with pytest.raises(
ValueError, match="Invalid configurations for given variables"
):
factor.EnumFactor(
variables=[variables[0]],
factor_configs=np.array([[10]]),
log_potentials=np.array([0.0]),
)
with pytest.raises(
ValueError, match="list_var_states_for_edges cannot be None"
):
factor.concatenate_var_states_for_edges(None)
with pytest.raises(
ValueError, match="var_states_for_edges cannot be None"
):
factor.concatenate_var_states_for_edges([None])
def test_logical_factor():
"""Test the correct implementation of the logical factors."""
child = vgroup.NDVarArray(num_states=2, shape=(1,))[0]
wrong_parent = vgroup.NDVarArray(num_states=3, shape=(1,))[0]
parent = vgroup.NDVarArray(num_states=2, shape=(1,))[0]
with pytest.raises(
ValueError,
match=(
"A LogicalFactor requires at least one parent variable and one child"
" variable"
),
):
factor.logical.LogicalFactor(variables=(child,))
with pytest.raises(
ValueError, match="All the variables in a LogicalFactor should be binary"
):
factor.logical.LogicalFactor(variables=(wrong_parent, child))
logical_factor = factor.logical.LogicalFactor(variables=(parent, child))
num_parents = len(logical_factor.variables) - 1
parents_edge_states = np.vstack(
[
np.zeros(num_parents, dtype=int),
np.arange(0, 2 * num_parents, 2, dtype=int),
],
).T
child_edge_state = np.array([2 * num_parents], dtype=int)
with pytest.raises(
ValueError, match="The highest LogicalFactor index must be 0"
):
factor.logical.LogicalWiring(
var_states_for_edges=None,
parents_edge_states=parents_edge_states + np.array([[1, 0]]),
children_edge_states=child_edge_state,
edge_states_offset=1,
)
with pytest.raises(
ValueError,
match="The LogicalWiring must have 1 different LogicalFactor indices",
):
factor.logical.LogicalWiring(
var_states_for_edges=None,
parents_edge_states=parents_edge_states + np.array([[0], [1]]),
children_edge_states=child_edge_state,
edge_states_offset=1,
)
with pytest.raises(
ValueError,
match=re.escape(
"The LogicalWiring's edge_states_offset must be 1 (for OR) and -1"
" (for AND), but is 0"
),
):
factor.logical.LogicalWiring(
var_states_for_edges=None,
parents_edge_states=parents_edge_states,
children_edge_states=child_edge_state,
edge_states_offset=0,
)
def test_pool_factor():
"""Test the correct implementation of the pool factors."""
pool_choice = vgroup.NDVarArray(num_states=2, shape=(1,))[0]
wrong_pool_indicator = vgroup.NDVarArray(num_states=3, shape=(1,))[0]
pool_indicator = vgroup.NDVarArray(num_states=2, shape=(1,))[0]
with pytest.raises(
ValueError,
match=(
"A PoolFactor requires at least one pool choice and one pool "
"indicator."
),
):
factor.pool.PoolFactor(variables=(pool_choice,))
with pytest.raises(
ValueError, match="All the variables in a PoolFactor should all be binary"
):
factor.pool.PoolFactor(variables=(wrong_pool_indicator, pool_choice))
pool_factor = factor.pool.PoolFactor(variables=(pool_indicator, pool_choice))
num_children = len(pool_factor.variables) - 1
pool_choices_edge_states = np.vstack(
[
np.zeros(num_children, dtype=int),
np.arange(0, 2 * num_children, 2, dtype=int),
],
).T
pool_indicators_edge_state = np.array([2 * num_children], dtype=int)
with pytest.raises(
ValueError, match="The highest PoolFactor index must be 0"
):
factor.pool.PoolWiring(
var_states_for_edges=None,
pool_choices_edge_states=pool_choices_edge_states + np.array([[1, 0]]),
pool_indicators_edge_states=pool_indicators_edge_state,
)
with pytest.raises(
ValueError,
match="The PoolWiring must have 1 different PoolFactor indices",
):
factor.pool.PoolWiring(
var_states_for_edges=None,
pool_indicators_edge_states=pool_indicators_edge_state,
pool_choices_edge_states=pool_choices_edge_states
+ np.array([[0], [1]]),
)
| PGMax-main | tests/factor/test_factor.py |
# Copyright 2022 Intrinsic Innovation LLC.
# Copyright 2023 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Test building factor graphs and running inference with OR factors."""
import itertools
import jax
import numpy as np
from pgmax import factor
from pgmax import fgraph
from pgmax import fgroup
from pgmax import infer
from pgmax import vgroup
# pylint: disable=invalid-name
def test_run_bp_with_ORFactors():
"""Test building factor graphs and running inference with OR factors.
Simultaneously test
(1) the support of ORFactors in a FactorGraph and their specialized
inference for different temperatures
(2) the support of several factor types in a FactorGraph and during
inference
(3) the computation of the energy in standard and debug mode
To do so, observe that an ORFactor can be defined as an equivalent
EnumFactor (which list all the valid OR configurations) and define two
equivalent FactorGraphs:
FG1: first half of factors are defined as EnumFactors, second half are
defined as ORFactors
FG2: first half of factors are defined as ORFactors, second half are defined
as EnumFactors
Inference for the EnumFactors is run with pass_enum_fac_to_var_messages
while inference for the ORFactors is run with pass_logical_fac_to_var_messages
Note: for the first seed, add all the EnumFactors to FG1 and all the
ORFactors to FG2
"""
for idx in range(16):
np.random.seed(idx)
# Parameters
num_factors = np.random.randint(10, 20)
num_parents = np.random.randint(1, 10, num_factors)
num_parents_cumsum = np.insert(np.cumsum(num_parents), 0, 0)
# Setting the temperature
# The efficient message updates for OR/AND factors with linear complexity
# comes at the cost of a decrease in computational stability
# (1) Larger factors have higher and rarer the errors
# (2) Temperature around 0.05 have higher errors
if idx % 4 == 0:
# Max-product
temperature = 0.0
atol = 1e-5
elif idx % 4 == 1:
# Low temperature are a hard test for stable updates
temperature = 0.001
# The efficient message updates for OR/AND factors with linear complexity
# comes at the cost of a decrease in stability for large factors
# and low temperature
atol = 5e-3
elif idx % 4 == 2:
temperature = np.random.uniform(
low=0.1, high=factor.logical.TEMPERATURE_STABILITY_THRE
)
atol = 5e-3
else:
temperature = np.random.uniform(
low=factor.logical.TEMPERATURE_STABILITY_THRE, high=1.0
)
atol = 1e-5
# We create different variables for the 2 FactorGraphs even if
# we could use the same variables for both graphs
# Graph 1
parents_variables1 = vgroup.NDVarArray(
num_states=2, shape=(num_parents.sum(),)
)
children_variables1 = vgroup.NDVarArray(num_states=2, shape=(num_factors,))
fg1 = fgraph.FactorGraph(
variable_groups=[parents_variables1, children_variables1]
)
# Graph 2
parents_variables2 = vgroup.NDVarArray(
num_states=2, shape=(num_parents.sum(),)
)
children_variables2 = vgroup.NDVarArray(num_states=2, shape=(num_factors,))
fg2 = fgraph.FactorGraph(
variable_groups=[parents_variables2, children_variables2]
)
# Variable names for factors
variables_for_factors1 = []
variables_for_factors2 = []
for factor_idx in range(num_factors):
variables1 = []
for idx1 in range(
num_parents_cumsum[factor_idx], num_parents_cumsum[factor_idx + 1]
):
variables1.append(parents_variables1[idx1])
variables1 += [children_variables1[factor_idx]]
variables_for_factors1.append(variables1)
variables2 = []
for idx2 in range(
num_parents_cumsum[factor_idx], num_parents_cumsum[factor_idx + 1]
):
variables2.append(parents_variables2[idx2])
variables2 += [children_variables2[factor_idx]]
variables_for_factors2.append(variables2)
# Option 1: Define EnumFactors equivalent to the ORFactors
for factor_idx in range(num_factors):
this_num_parents = num_parents[factor_idx]
configs = np.array(
list(itertools.product([0, 1], repeat=this_num_parents + 1))
)
# Children state is last
valid_ON_configs = configs[
np.logical_and(configs[:, :-1].sum(axis=1) >= 1, configs[:, -1] == 1)
]
valid_configs = np.concatenate(
[np.zeros((1, this_num_parents + 1), dtype=int), valid_ON_configs],
axis=0,
)
assert valid_configs.shape[0] == 2**this_num_parents
if factor_idx < num_factors // 2:
# Add the first half of factors to FactorGraph1
enum_factor = factor.EnumFactor(
variables=variables_for_factors1[factor_idx],
factor_configs=valid_configs,
log_potentials=np.zeros(valid_configs.shape[0]),
)
fg1.add_factors(enum_factor)
else:
if idx != 0:
# Add the second half of factors to FactorGraph2
enum_factor = factor.EnumFactor(
variables=variables_for_factors2[factor_idx],
factor_configs=valid_configs,
log_potentials=np.zeros(valid_configs.shape[0]),
)
fg2.add_factors(enum_factor)
else:
# Add all the EnumFactors to FactorGraph1 for the first iter
enum_factor = factor.EnumFactor(
variables=variables_for_factors1[factor_idx],
factor_configs=valid_configs,
log_potentials=np.zeros(valid_configs.shape[0]),
)
fg1.add_factors(enum_factor)
# Option 2: Define the ORFactors
variables_for_ORFactors_fg1 = []
variables_for_ORFactors_fg2 = []
for factor_idx in range(num_factors):
if factor_idx < num_factors // 2:
# Add the first half of factors to FactorGraph2
variables_for_ORFactors_fg2.append(variables_for_factors2[factor_idx])
else:
if idx != 0:
# Add the second half of factors to FactorGraph1
variables_for_ORFactors_fg1.append(variables_for_factors1[factor_idx])
else:
# Add all the ORFactors to FactorGraph2 for the first iter
variables_for_ORFactors_fg2.append(variables_for_factors2[factor_idx])
if idx != 0:
factor_group = fgroup.ORFactorGroup(variables_for_ORFactors_fg1)
fg1.add_factors(factor_group)
factor_group = fgroup.ORFactorGroup(variables_for_ORFactors_fg2)
fg2.add_factors(factor_group)
# Set up inference
bp1 = infer.build_inferer(fg1.bp_state, backend="bp")
bp2 = infer.build_inferer(fg2.bp_state, backend="bp")
# Randomly initialize the evidence
evidence_parents = jax.device_put(
np.random.gumbel(size=(sum(num_parents), 2))
)
evidence_children = jax.device_put(np.random.gumbel(size=(num_factors, 2)))
evidence_updates1 = {
parents_variables1: evidence_parents,
children_variables1: evidence_children,
}
evidence_updates2 = {
parents_variables2: evidence_parents,
children_variables2: evidence_children,
}
# Randomly initialize the messages
ftov_msgs_updates1 = {}
ftov_msgs_updates2 = {}
for idx in range(num_factors):
ftov = np.random.normal(size=(2,))
ftov_msgs_updates1[children_variables1[idx]] = ftov
ftov_msgs_updates2[children_variables2[idx]] = ftov
for idx in range(num_parents_cumsum[-1]):
ftov = np.random.normal(size=(2,))
ftov_msgs_updates1[parents_variables1[idx]] = ftov
ftov_msgs_updates2[parents_variables2[idx]] = ftov
# Run BP
bp_arrays1 = bp1.init(
evidence_updates=evidence_updates1, ftov_msgs_updates=ftov_msgs_updates1
)
bp_arrays1 = bp1.run(bp_arrays1, num_iters=5, temperature=temperature)
bp_arrays2 = bp2.init(
evidence_updates=evidence_updates2, ftov_msgs_updates=ftov_msgs_updates2
)
bp_arrays2 = bp2.run(bp_arrays2, num_iters=5, temperature=temperature)
# Get beliefs
beliefs1 = bp1.get_beliefs(bp_arrays1)
beliefs2 = bp2.get_beliefs(bp_arrays2)
assert np.allclose(
beliefs1[children_variables1], beliefs2[children_variables2], atol=atol
)
assert np.allclose(
beliefs1[parents_variables1], beliefs2[parents_variables2], atol=atol
)
# Get the map states and compare their energies
map_states1 = infer.decode_map_states(beliefs1)
map_states2 = infer.decode_map_states(beliefs2)
energy_decoding1 = infer.compute_energy(
fg1.bp_state, bp_arrays1, map_states1
)[0]
energy_decoding2 = infer.compute_energy(
fg2.bp_state, bp_arrays2, map_states2
)[0]
energy_decoding1_debug, var_energies1, factor_energies1 = (
infer.compute_energy(
fg1.bp_state, bp_arrays1, map_states1, debug_mode=True
)
)
energy_decoding2_debug, var_energies2, factor_energies2 = (
infer.compute_energy(
fg2.bp_state, bp_arrays2, map_states2, debug_mode=True
)
)
assert np.allclose(energy_decoding1, energy_decoding2, atol=atol)
assert np.allclose(energy_decoding1, energy_decoding1_debug, atol=atol)
assert np.allclose(energy_decoding2, energy_decoding2_debug, atol=atol)
# Also compare the energy of all the individual variables and factors
for child_idx in range(num_factors):
var_energy1 = var_energies1[children_variables1[child_idx]]
var_energy2 = var_energies2[children_variables2[child_idx]]
assert np.allclose(var_energy1, var_energy2, atol=atol)
for parent_idx in range(num_parents_cumsum[-1]):
var_energy1 = var_energies1[parents_variables1[parent_idx]]
var_energy2 = var_energies2[parents_variables2[parent_idx]]
assert np.allclose(var_energy1, var_energy2, atol=atol)
for factor_idx in range(num_factors):
factor_energy1 = factor_energies1[
frozenset(variables_for_factors1[factor_idx])
]
factor_energy2 = factor_energies2[
frozenset(variables_for_factors2[factor_idx])
]
assert np.allclose(factor_energy1, factor_energy2, atol=atol)
| PGMax-main | tests/factor/test_or.py |
# Copyright 2022 Intrinsic Innovation LLC.
# Copyright 2023 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Test building factor graphs and running inference with AND factors."""
import itertools
import jax
import numpy as np
from pgmax import factor
from pgmax import fgraph
from pgmax import fgroup
from pgmax import infer
from pgmax import vgroup
# pylint: disable=invalid-name
def test_run_bp_with_ANDFactors():
"""Test building factor graphs and running inference with AND factors.
Simultaneously test
(1) the support of ANDFactors in a FactorGraph and their specialized
inference for different temperatures
(2) the support of several factor types in a FactorGraph and during
inference
(3) the computation of the energy in standard and debug mode
To do so, observe that an ANDFactor can be defined as an equivalent
EnumFactor (which list all the valid AND configurations)
and define two equivalent FactorGraphs:
FG1: first half of factors are defined as EnumFactors, second half are
defined as ANDFactors
FG2: first half of factors are defined as ANDFactors, second half are
defined as EnumFactors
Inference for the EnumFactors is run with pass_enum_fac_to_var_messages
while inference for the ANDFactors is run with
pass_logical_fac_to_var_messages
Note: for the first seed, add all the EnumFactors to FG1 and all the
ANDFactors to FG2
"""
for idx in range(16):
np.random.seed(idx)
# Parameters
num_factors = np.random.randint(10, 20)
num_parents = np.random.randint(1, 10, num_factors)
num_parents_cumsum = np.insert(np.cumsum(num_parents), 0, 0)
# Setting the temperature
# The efficient message updates for OR/AND factors with linear complexity
# comes at the cost of a decrease in computational stability
# (1) Larger factors have higher and rarer the errors
# (2) Temperature around 0.05 have higher errors
if idx % 4 == 0:
# Max-product
temperature = 0.0
atol = 1e-5
elif idx % 4 == 1:
# Low temperature are a hard test for stable updates
temperature = 0.001
# The efficient message updates for OR/AND factors with linear complexity
# comes at the cost of a decrease in stability for large factors
# and low temperature
atol = 5e-3
elif idx % 4 == 2:
temperature = np.random.uniform(
low=0.1, high=factor.logical.TEMPERATURE_STABILITY_THRE
)
atol = 5e-3
else:
temperature = np.random.uniform(
low=factor.logical.TEMPERATURE_STABILITY_THRE, high=1.0
)
atol = 1e-5
# We create different variables for the 2 FactorGraphs even if
# we could use the same variables for both graphs
# Graph 1
parents_variables1 = vgroup.NDVarArray(
num_states=2, shape=(num_parents.sum(),)
)
children_variables1 = vgroup.NDVarArray(num_states=2, shape=(num_factors,))
fg1 = fgraph.FactorGraph(
variable_groups=[parents_variables1, children_variables1]
)
# Graph 2
parents_variables2 = vgroup.NDVarArray(
num_states=2, shape=(num_parents.sum(),)
)
children_variables2 = vgroup.NDVarArray(num_states=2, shape=(num_factors,))
fg2 = fgraph.FactorGraph(
variable_groups=[parents_variables2, children_variables2]
)
# Option 1: Define EnumFactors equivalent to the ANDFactors
variables_for_factors1 = []
variables_for_factors2 = []
for factor_idx in range(num_factors):
variables1 = []
for idx1 in range(
num_parents_cumsum[factor_idx], num_parents_cumsum[factor_idx + 1]
):
variables1.append(parents_variables1[idx1])
variables1 += [children_variables1[factor_idx]]
variables_for_factors1.append(variables1)
variables2 = []
for idx2 in range(
num_parents_cumsum[factor_idx], num_parents_cumsum[factor_idx + 1]
):
variables2.append(parents_variables2[idx2])
variables2 += [children_variables2[factor_idx]]
variables_for_factors2.append(variables2)
# Option 1: Define EnumFactors equivalent to the ANDFactors
for factor_idx in range(num_factors):
this_num_parents = num_parents[factor_idx]
configs = np.array(
list(itertools.product([0, 1], repeat=this_num_parents + 1))
)
# Children state is last
valid_AND_configs = configs[
np.logical_and(
configs[:, :-1].sum(axis=1) < this_num_parents,
configs[:, -1] == 0,
)
]
valid_configs = np.concatenate(
[np.ones((1, this_num_parents + 1), dtype=int), valid_AND_configs],
axis=0,
)
assert valid_configs.shape[0] == 2**this_num_parents
if factor_idx < num_factors // 2:
# Add the first half of factors to FactorGraph1
enum_factor = factor.EnumFactor(
variables=variables_for_factors1[factor_idx],
factor_configs=valid_configs,
log_potentials=np.zeros(valid_configs.shape[0]),
)
fg1.add_factors(enum_factor)
else:
if idx != 0:
# Add the second half of factors to FactorGraph2
enum_factor = factor.EnumFactor(
variables=variables_for_factors2[factor_idx],
factor_configs=valid_configs,
log_potentials=np.zeros(valid_configs.shape[0]),
)
fg2.add_factors(enum_factor)
else:
# Add all the EnumFactors to FactorGraph1 for the first iter
enum_factor = factor.EnumFactor(
variables=variables_for_factors1[factor_idx],
factor_configs=valid_configs,
log_potentials=np.zeros(valid_configs.shape[0]),
)
fg1.add_factors(enum_factor)
# Option 2: Define the ANDFactors
variables_for_ANDFactors_fg1 = []
variables_for_ANDFactors_fg2 = []
for factor_idx in range(num_factors):
if factor_idx < num_factors // 2:
# Add the first half of factors to FactorGraph2
variables_for_ANDFactors_fg2.append(variables_for_factors2[factor_idx])
else:
if idx != 0:
# Add the second half of factors to FactorGraph1
variables_for_ANDFactors_fg1.append(
variables_for_factors1[factor_idx]
)
else:
# Add all the ANDFactors to FactorGraph2 for the first iter
variables_for_ANDFactors_fg2.append(
variables_for_factors2[factor_idx]
)
if idx != 0:
factor_group = fgroup.ANDFactorGroup(variables_for_ANDFactors_fg1)
fg1.add_factors(factor_group)
factor_group = fgroup.ANDFactorGroup(variables_for_ANDFactors_fg2)
fg2.add_factors(factor_group)
# Set up inference
bp1 = infer.build_inferer(fg1.bp_state, backend="bp")
bp2 = infer.build_inferer(fg2.bp_state, backend="bp")
# Randomly initialize the evidence
evidence_parents = jax.device_put(
np.random.gumbel(size=(sum(num_parents), 2))
)
evidence_children = jax.device_put(np.random.gumbel(size=(num_factors, 2)))
evidence_updates1 = {
parents_variables1: evidence_parents,
children_variables1: evidence_children,
}
evidence_updates2 = {
parents_variables2: evidence_parents,
children_variables2: evidence_children,
}
# Randomly initialize the messages
ftov_msgs_updates1 = {}
ftov_msgs_updates2 = {}
for idx in range(num_factors):
ftov = np.random.normal(size=(2,))
ftov_msgs_updates1[children_variables1[idx]] = ftov
ftov_msgs_updates2[children_variables2[idx]] = ftov
for idx in range(num_parents_cumsum[-1]):
ftov = np.random.normal(size=(2,))
ftov_msgs_updates1[parents_variables1[idx]] = ftov
ftov_msgs_updates2[parents_variables2[idx]] = ftov
# Run BP
bp_arrays1 = bp1.init(
evidence_updates=evidence_updates1, ftov_msgs_updates=ftov_msgs_updates1
)
bp_arrays1 = bp1.run(bp_arrays1, num_iters=5, temperature=temperature)
bp_arrays2 = bp2.init(
evidence_updates=evidence_updates2, ftov_msgs_updates=ftov_msgs_updates2
)
bp_arrays2 = bp2.run(bp_arrays2, num_iters=5, temperature=temperature)
# Get beliefs
beliefs1 = bp1.get_beliefs(bp_arrays1)
beliefs2 = bp2.get_beliefs(bp_arrays2)
assert np.allclose(
beliefs1[children_variables1], beliefs2[children_variables2], atol=atol
)
assert np.allclose(
beliefs1[parents_variables1], beliefs2[parents_variables2], atol=atol
)
# Get the map states and compare their energies
map_states1 = infer.decode_map_states(beliefs1)
map_states2 = infer.decode_map_states(beliefs2)
energy_decoding1 = infer.compute_energy(
fg1.bp_state, bp_arrays1, map_states1
)[0]
energy_decoding2 = infer.compute_energy(
fg2.bp_state, bp_arrays2, map_states2
)[0]
energy_decoding1_debug, var_energies1, factor_energies1 = (
infer.compute_energy(
fg1.bp_state, bp_arrays1, map_states1, debug_mode=True
)
)
energy_decoding2_debug, var_energies2, factor_energies2 = (
infer.compute_energy(
fg2.bp_state, bp_arrays2, map_states2, debug_mode=True
)
)
assert np.allclose(energy_decoding1, energy_decoding2, atol=atol)
assert np.allclose(energy_decoding1, energy_decoding1_debug, atol=atol)
assert np.allclose(energy_decoding2, energy_decoding2_debug, atol=atol)
# Also compare the energy of all the individual variables and factors
for child_idx in range(num_factors):
var_energy1 = var_energies1[children_variables1[child_idx]]
var_energy2 = var_energies2[children_variables2[child_idx]]
assert np.allclose(var_energy1, var_energy2, atol=atol)
for parent_idx in range(num_parents_cumsum[-1]):
var_energy1 = var_energies1[parents_variables1[parent_idx]]
var_energy2 = var_energies2[parents_variables2[parent_idx]]
assert np.allclose(var_energy1, var_energy2, atol=atol)
for factor_idx in range(num_factors):
factor_energy1 = factor_energies1[
frozenset(variables_for_factors1[factor_idx])
]
factor_energy2 = factor_energies2[
frozenset(variables_for_factors2[factor_idx])
]
assert np.allclose(factor_energy1, factor_energy2, atol=atol)
| PGMax-main | tests/factor/test_and.py |
# Copyright 2023 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Test building factor graphs and running inference with Pool factors."""
import jax
import numpy as np
from pgmax import factor
from pgmax import fgraph
from pgmax import fgroup
from pgmax import infer
from pgmax import vgroup
ATOL = 1e-5
# pylint: disable=invalid-name
def test_run_bp_with_PoolFactors():
"""Test building factor graphs and running inference with pool factors.
Simultaneously test
(1) the support of PoolFactors in a FactorGraph and their specialized
inference for different temperatures
(2) the support of several factor types in a FactorGraph and during
inference
(3) the computation of the energy in standard and debug mode
To do so, observe that a PoolFactor can be defined as an equivalent
EnumFactor (which list all the valid Pool configurations) and define two
equivalent FactorGraphs:
FG1: first half of factors are defined as EnumFactors, second half are
defined as PoolFactors
FG2: first half of factors are defined as PoolFactors, second half are defined
as EnumFactors
Inference for the EnumFactors is run with pass_enum_fac_to_var_messages
while inference for the PoolFactors is run with pass_pool_fac_to_var_messages.
Note: for the first seed, add all the EnumFactors to FG1 and all the
PoolFactors to FG2
"""
for idx in range(15):
np.random.seed(idx)
# Parameters
num_factors = np.random.randint(10, 20)
num_pool_choices = np.random.randint(1, 10, num_factors)
num_pool_choices_cumsum = np.insert(np.cumsum(num_pool_choices), 0, 0)
# Setting the temperature
if idx % 3 == 0:
# Max-product
temperature = 0.0
elif idx % 3 == 1:
# Low temperature are a harder test for stable updates
temperature = 0.001
else:
temperature = np.random.uniform(low=0.001, high=1.0)
# We create different variables for the 2 FactorGraphs even if
# we could use the same variables for both graphs
# Graph 1
pool_indicators_variables1 = vgroup.NDVarArray(
num_states=2, shape=(num_factors,)
)
pool_choices_variables1 = vgroup.NDVarArray(
num_states=2, shape=(num_pool_choices.sum(),)
)
fg1 = fgraph.FactorGraph(
variable_groups=[pool_indicators_variables1, pool_choices_variables1]
)
# Graph 2
pool_indicators_variables2 = vgroup.NDVarArray(
num_states=2, shape=(num_factors,)
)
pool_choices_variables2 = vgroup.NDVarArray(
num_states=2, shape=(num_pool_choices.sum(),)
)
fg2 = fgraph.FactorGraph(
variable_groups=[pool_indicators_variables2, pool_choices_variables2]
)
# Variable names for factors
variables_for_factors1 = []
variables_for_factors2 = []
for factor_idx in range(num_factors):
variables1 = []
# Pool choices variables must be added first
for idx1 in range(
num_pool_choices_cumsum[factor_idx],
num_pool_choices_cumsum[factor_idx + 1],
):
variables1.append(pool_choices_variables1[idx1])
variables1.append(pool_indicators_variables1[factor_idx])
variables_for_factors1.append(variables1)
variables2 = []
for idx2 in range(
num_pool_choices_cumsum[factor_idx],
num_pool_choices_cumsum[factor_idx + 1],
):
variables2.append(pool_choices_variables2[idx2])
variables2.append(pool_indicators_variables2[factor_idx])
variables_for_factors2.append(variables2)
# Option 1: Define EnumFactors equivalent to the PoolFactors
for factor_idx in range(num_factors):
this_num_pool_choices = num_pool_choices[factor_idx]
valid_configs = np.zeros(
(this_num_pool_choices + 1, this_num_pool_choices + 1), dtype=int
)
valid_configs[1:, -1] = 1
valid_configs[1:, :-1] = np.eye(this_num_pool_choices)
if factor_idx < num_factors // 2:
# Add the first half of factors to FactorGraph1
enum_factor = factor.EnumFactor(
variables=variables_for_factors1[factor_idx],
factor_configs=valid_configs,
log_potentials=np.zeros(valid_configs.shape[0]),
)
fg1.add_factors(enum_factor)
else:
if idx != 0:
# Add the second half of factors to FactorGraph2
enum_factor = factor.EnumFactor(
variables=variables_for_factors2[factor_idx],
factor_configs=valid_configs,
log_potentials=np.zeros(valid_configs.shape[0]),
)
fg2.add_factors(enum_factor)
else:
# Add all the EnumFactors to FactorGraph1 for the first iter
enum_factor = factor.EnumFactor(
variables=variables_for_factors1[factor_idx],
factor_configs=valid_configs,
log_potentials=np.zeros(valid_configs.shape[0]),
)
fg1.add_factors(enum_factor)
# Option 2: Define the PoolFactors
variables_for_PoolFactors_fg1 = []
variables_for_PoolFactors_fg2 = []
for factor_idx in range(num_factors):
if factor_idx < num_factors // 2:
# Add the first half of factors to FactorGraph2
variables_for_PoolFactors_fg2.append(variables_for_factors2[factor_idx])
else:
if idx != 0:
# Add the second half of factors to FactorGraph1
variables_for_PoolFactors_fg1.append(
variables_for_factors1[factor_idx]
)
else:
# Add all the PoolFactors to FactorGraph2 for the first iter
variables_for_PoolFactors_fg2.append(
variables_for_factors2[factor_idx]
)
if idx != 0:
factor_group = fgroup.PoolFactorGroup(variables_for_PoolFactors_fg1)
fg1.add_factors(factor_group)
factor_group = fgroup.PoolFactorGroup(variables_for_PoolFactors_fg2)
fg2.add_factors(factor_group)
# Run inference
bp1 = infer.build_inferer(fg1.bp_state, backend="bp")
bp2 = infer.build_inferer(fg2.bp_state, backend="bp")
evidence_pool_indicators = jax.device_put(
np.random.gumbel(size=(num_factors, 2))
)
evidence_pool_choices = jax.device_put(
np.random.gumbel(size=(sum(num_pool_choices), 2))
)
evidence_updates1 = {
pool_indicators_variables1: evidence_pool_indicators,
pool_choices_variables1: evidence_pool_choices,
}
evidence_updates2 = {
pool_indicators_variables2: evidence_pool_indicators,
pool_choices_variables2: evidence_pool_choices,
}
# Randomly initialize the messages
ftov_msgs_updates1 = {}
ftov_msgs_updates2 = {}
for idx in range(num_factors):
ftov = np.random.normal(size=(2,))
ftov_msgs_updates1[pool_indicators_variables1[idx]] = ftov
ftov_msgs_updates2[pool_indicators_variables2[idx]] = ftov
for idx in range(num_pool_choices_cumsum[-1]):
ftov = np.random.normal(size=(2,))
ftov_msgs_updates1[pool_choices_variables1[idx]] = ftov
ftov_msgs_updates2[pool_choices_variables2[idx]] = ftov
# Run BP
bp_arrays1 = bp1.init(
evidence_updates=evidence_updates1, ftov_msgs_updates=ftov_msgs_updates1
)
bp_arrays1 = bp1.run(bp_arrays1, num_iters=5, temperature=temperature)
bp_arrays2 = bp2.init(
evidence_updates=evidence_updates2, ftov_msgs_updates=ftov_msgs_updates2
)
bp_arrays2 = bp2.run(bp_arrays2, num_iters=5, temperature=temperature)
# Get beliefs
beliefs1 = bp1.get_beliefs(bp_arrays1)
beliefs2 = bp2.get_beliefs(bp_arrays2)
assert np.allclose(
beliefs1[pool_choices_variables1],
beliefs2[pool_choices_variables2],
atol=ATOL,
)
assert np.allclose(
beliefs1[pool_indicators_variables1],
beliefs2[pool_indicators_variables2],
atol=ATOL,
)
# Get the map states and compare their energies
map_states1 = infer.decode_map_states(beliefs1)
map_states2 = infer.decode_map_states(beliefs2)
energy_decoding1 = infer.compute_energy(
fg1.bp_state, bp_arrays1, map_states1
)[0]
energy_decoding2 = infer.compute_energy(
fg2.bp_state, bp_arrays2, map_states2
)[0]
energy_decoding1_debug, var_energies1, factor_energies1 = (
infer.compute_energy(
fg1.bp_state, bp_arrays1, map_states1, debug_mode=True
)
)
energy_decoding2_debug, var_energies2, factor_energies2 = (
infer.compute_energy(
fg2.bp_state, bp_arrays2, map_states2, debug_mode=True
)
)
assert np.allclose(energy_decoding1, energy_decoding2, atol=ATOL)
assert np.allclose(energy_decoding1, energy_decoding1_debug, atol=ATOL)
assert np.allclose(energy_decoding2, energy_decoding2_debug, atol=ATOL)
# Also compare the energy of all the individual variables and factors
for child_idx in range(num_factors):
var_energy1 = var_energies1[pool_indicators_variables1[child_idx]]
var_energy2 = var_energies2[pool_indicators_variables2[child_idx]]
assert np.allclose(var_energy1, var_energy2, atol=ATOL)
for parent_idx in range(num_pool_choices_cumsum[-1]):
var_energy1 = var_energies1[pool_choices_variables1[parent_idx]]
var_energy2 = var_energies2[pool_choices_variables2[parent_idx]]
assert np.allclose(var_energy1, var_energy2, atol=ATOL)
for factor_idx in range(num_factors):
factor_energy1 = factor_energies1[
frozenset(variables_for_factors1[factor_idx])
]
factor_energy2 = factor_energies2[
frozenset(variables_for_factors2[factor_idx])
]
assert np.allclose(factor_energy1, factor_energy2, atol=ATOL)
| PGMax-main | tests/factor/test_pool.py |
# Copyright 2022 Intrinsic Innovation LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Test the correct implementation of the different variables groups."""
import re
import jax
import jax.numpy as jnp
import numpy as np
from pgmax import vgroup
import pytest
def test_variable_dict():
"""Test the correct implementation of variable dict."""
num_states = np.full((4,), fill_value=2)
with pytest.raises(
ValueError, match=re.escape("Expected num_states shape (3,). Got (4,).")
):
vgroup.VarDict(variable_names=tuple([0, 1, 2]), num_states=num_states)
num_states = np.full((3,), fill_value=2, dtype=np.float32)
with pytest.raises(
ValueError,
match=re.escape(
"num_states should be an integer or a NumPy array of dtype int"
),
):
vgroup.VarDict(variable_names=tuple([0, 1, 2]), num_states=num_states)
variable_dict = vgroup.VarDict(variable_names=tuple([0, 1, 2]), num_states=15)
with pytest.raises(
ValueError, match="data is referring to a non-existent variable 3"
):
variable_dict.flatten({3: np.zeros(10)})
with pytest.raises(
ValueError,
match=re.escape(
"Variable 2 expects a data array of shape (15,) or (1,). Got (10,)."
),
):
variable_dict.flatten({2: np.zeros(10)})
with pytest.raises(
ValueError, match="Can only unflatten 1D array. Got a 2D array."
):
variable_dict.unflatten(jnp.zeros((10, 20)), True)
assert jnp.all(
jnp.array(
jax.tree_util.tree_leaves(
jax.tree_util.tree_map(
lambda x, y: jnp.all(x == y),
variable_dict.unflatten(jnp.zeros(3), False),
{name: np.zeros(1) for name in range(3)},
)
)
)
)
with pytest.raises(
ValueError,
match=re.escape(
"flat_data should be shape (num_variable_states(=45),). Got (100,)"
),
):
variable_dict.unflatten(jnp.zeros((100)), True)
with pytest.raises(
ValueError,
match=re.escape(
"flat_data should be shape (num_variables(=3),). Got (100,)"
),
):
variable_dict.unflatten(jnp.zeros((100)), False)
def test_nd_variable_array():
"""Test the correct implementation of variable array."""
max_size = int(vgroup.vgroup.MAX_SIZE)
with pytest.raises(
ValueError,
match=re.escape(
f"Currently only support NDVarArray of size smaller than {max_size}."
f" Got {max_size + 1}"
),
):
vgroup.NDVarArray(shape=(max_size + 1,), num_states=2)
num_states = np.full((2, 3), fill_value=2)
with pytest.raises(
ValueError,
match=re.escape("Expected num_states shape (2, 2). Got (2, 3)."),
):
vgroup.NDVarArray(shape=(2, 2), num_states=num_states)
num_states = np.full((2, 3), fill_value=2, dtype=np.float32)
with pytest.raises(
ValueError,
match=re.escape(
"num_states should be an integer or a NumPy array of dtype int"
),
):
vgroup.NDVarArray(shape=(2, 2), num_states=num_states)
variable_group0 = vgroup.NDVarArray(shape=(5, 5), num_states=2)
assert len(variable_group0[:3, :3]) == 9
print(variable_group0)
variable_group = vgroup.NDVarArray(
shape=(2, 2), num_states=np.array([[1, 2], [3, 4]])
)
print(variable_group)
if variable_group0 < variable_group:
pass
with pytest.raises(
ValueError,
match=re.escape(
"data should be of shape (2, 2) or (2, 2, 4). Got (3, 3)."
),
):
variable_group.flatten(np.zeros((3, 3)))
assert jnp.all(
variable_group.flatten(np.array([[1, 2], [3, 4]]))
== jnp.array([1, 2, 3, 4])
)
assert jnp.all(
variable_group.flatten(np.zeros((2, 2, 4))) == jnp.zeros((10,))
)
with pytest.raises(
ValueError, match="Can only unflatten 1D array. Got a 2D array."
):
variable_group.unflatten(np.zeros((10, 20)), True)
with pytest.raises(
ValueError,
match=re.escape("flat_data size should be equal to 10. Got size 12."),
):
variable_group.unflatten(np.zeros((12,)), True)
with pytest.raises(
ValueError,
match=re.escape("flat_data size should be equal to 4. Got size 12."),
):
variable_group.unflatten(np.zeros((12,)), False)
assert jnp.all(
variable_group.unflatten(np.zeros(4), False) == jnp.zeros((2, 2))
)
unflattened = jnp.full((2, 2, 4), fill_value=jnp.nan)
unflattened = unflattened.at[0, 0, 0].set(0)
unflattened = unflattened.at[0, 1, :1].set(0)
unflattened = unflattened.at[1, 0, :2].set(0)
unflattened = unflattened.at[1, 1].set(0)
mask = ~jnp.isnan(unflattened)
assert jnp.all(
variable_group.unflatten(np.zeros(10), True)[mask] == unflattened[mask]
)
def test_varray_repr():
var_array = vgroup.NDVarArray(shape=(0,), num_states=np.zeros((0,), int))
assert repr(var_array).startswith("NDVarArray(shape=(0,), num_states=[]")
var_array = vgroup.NDVarArray(shape=(2,), num_states=np.array([2, 2]))
assert repr(var_array).startswith("NDVarArray(shape=(2,), num_states=2")
var_array = vgroup.NDVarArray(shape=(2,), num_states=np.array([2, 3]))
assert repr(var_array).startswith(
"NDVarArray(shape=(2,), min_num_states=2, max_num_states=3"
)
| PGMax-main | tests/vgroup/test_vgroup.py |
# Copyright 2022 Intrinsic Innovation LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Test the correct implementation of a factor graph."""
import dataclasses
import re
import jax
import jax.numpy as jnp
import numpy as np
from pgmax import factor
from pgmax import fgraph
from pgmax import fgroup
from pgmax import infer
from pgmax import vgroup
import pytest
def test_factor_graph():
"""Test the correct implementation of a factor graph."""
vg = vgroup.VarDict(variable_names=(0,), num_states=15)
fg = fgraph.FactorGraph(vg)
wrong_enum_factor = factor.EnumFactor(
variables=[vg[0], vg[0]],
factor_configs=np.array([[idx, idx] for idx in range(15)]),
log_potentials=np.zeros(15),
)
with pytest.raises(
ValueError,
match=re.escape(
f"A Factor of type {factor.EnumFactor} involving variables"
f" {[(vg.__hash__(), 15), (vg.__hash__(), 15)]} contains variables"
" duplicates."
),
):
fg.add_factors(wrong_enum_factor)
enum_factor = factor.EnumFactor(
variables=[vg[0]],
factor_configs=np.arange(15)[:, None],
log_potentials=np.zeros(15),
)
fg.add_factors(enum_factor)
factor_group = fgroup.EnumFactorGroup(
variables_for_factors=[[vg[0]]],
factor_configs=np.arange(15)[:, None],
log_potentials=jnp.zeros(15),
)
with pytest.raises(
ValueError,
match=re.escape(
f"A Factor of type {factor.EnumFactor} involving variables"
f" {frozenset([(vg.__hash__(), 15)])} already exists."
),
):
fg.add_factors(factor_group)
def test_bp_state():
"""Test the correct implementation of a Belief Propagation state."""
vg = vgroup.VarDict(variable_names=(0,), num_states=15)
fg0 = fgraph.FactorGraph(vg)
enum_factor = factor.EnumFactor(
variables=[vg[0]],
factor_configs=np.arange(15)[:, None],
log_potentials=np.zeros(15),
)
fg0.add_factors(enum_factor)
fg1 = fgraph.FactorGraph(vg)
fg1.add_factors(enum_factor)
with pytest.raises(
ValueError,
match=(
"log_potentials, ftov_msgs and evidence should be derived from the"
" same fg_state"
),
):
infer.BPState(
log_potentials=fg0.bp_state.log_potentials,
ftov_msgs=fg1.bp_state.ftov_msgs,
evidence=fg1.bp_state.evidence,
)
def test_log_potentials():
"""Test the correct implementation of log potentials."""
vg = vgroup.VarDict(variable_names=(0,), num_states=15)
fg = fgraph.FactorGraph(vg)
factor_group = fgroup.EnumFactorGroup(
variables_for_factors=[[vg[0]]],
factor_configs=np.arange(10)[:, None],
)
fg.add_factors(factor_group)
with pytest.raises(
ValueError,
match=re.escape("Expected log potentials shape (10,) for factor group."),
):
fg.bp_state.log_potentials[factor_group] = jnp.zeros((1, 15))
with pytest.raises(
ValueError,
match=re.escape("Invalid FactorGroup for log potentials updates."),
):
factor_group2 = fgroup.EnumFactorGroup(
variables_for_factors=[[vg[0]]],
factor_configs=np.arange(10)[:, None],
)
fg.bp_state.log_potentials[factor_group2] = jnp.zeros((1, 15))
with pytest.raises(
ValueError,
match=re.escape("Invalid FactorGroup queried to access log potentials."),
):
_ = fg.bp_state.log_potentials[vg[0]]
with pytest.raises(
ValueError,
match=re.escape("Expected log potentials shape (10,). Got (15,)"),
):
infer.LogPotentials(fg_state=fg.fg_state, value=np.zeros(15))
log_potentials = infer.LogPotentials(
fg_state=fg.fg_state, value=np.zeros((10,))
)
assert jnp.all(log_potentials[factor_group] == jnp.zeros((10,)))
def test_ftov_msgs():
"""Test the correct implementation of ftov messages."""
vg = vgroup.VarDict(variable_names=(0,), num_states=15)
fg = fgraph.FactorGraph(vg)
factor_group = fgroup.EnumFactorGroup(
variables_for_factors=[[vg[0]]],
factor_configs=np.arange(10)[:, None],
)
fg.add_factors(factor_group)
with pytest.raises(
ValueError,
match=re.escape(
"Provided variable or factor type is not in the FactorGraph"
),
):
fg.bp_state.ftov_msgs[0] = np.ones(10)
with pytest.raises(
ValueError,
match=re.escape(
f"Expected ftov_msgs shape (15,) for variable ({vg.__hash__()}, 15)."
" Got incompatible shape (10,)."
),
):
fg.bp_state.ftov_msgs[vg[0]] = np.ones(10)
with pytest.raises(
ValueError, match=re.escape("Expected messages shape (15,). Got (10,)")
):
infer.FToVMessages(fg_state=fg.fg_state, value=np.zeros(10))
ftov_msgs = infer.FToVMessages(fg_state=fg.fg_state, value=np.zeros(15))
with pytest.raises(
TypeError, match=re.escape("'FToVMessages' object is not subscriptable")
):
_ = ftov_msgs[(10,)]
with pytest.raises(
ValueError,
match=re.escape(
f"Expected ftov_msgs shape (15,) for factor type {factor.EnumFactor}."
" Got incompatible shape (10,)."
),
):
infer.bp_state.update_ftov_msgs(
jax.device_put(ftov_msgs.value),
{factor.EnumFactor: jax.device_put(np.zeros(10))},
fg.fg_state,
)
def test_evidence():
"""Test the correct implementation of evidence."""
vg = vgroup.VarDict(variable_names=(0, 1), num_states=15)
fg = fgraph.FactorGraph(vg)
factor_group = fgroup.EnumFactorGroup(
variables_for_factors=[[vg[0], vg[1]]],
factor_configs=np.arange(10)[:, None] + np.zeros((1, 2)),
)
fg.add_factors(factor_group)
evidence = infer.Evidence(
fg_state=fg.fg_state,
value=np.zeros((30,)),
)
assert jnp.all(evidence.value == jnp.zeros((30,)))
with pytest.raises(
ValueError, match=re.escape("Expected evidence shape (30,). Got (20,).")
):
infer.Evidence(
fg_state=fg.fg_state,
value=np.zeros(
20,
),
)
with pytest.raises(
ValueError,
match=re.escape(
f"Expected evidence shape (15,) for variable {vg[0]}."
" Got incompatible shape (10,)."
),
):
infer.bp_state.update_evidence(
jax.device_put(evidence.value),
{vg[0]: jax.device_put(np.zeros(10))},
fg.fg_state,
)
vg2 = vgroup.VarDict(variable_names=(0,), num_states=15)
with pytest.raises(
ValueError,
match=re.escape(
"Got evidence for a variable or a VarGroup not in the FactorGraph!"
),
):
infer.bp_state.update_evidence(
jax.device_put(evidence.value),
{vg2[0]: jax.device_put(np.zeros(15))},
fg.fg_state,
)
def test_bp():
"""Test running belief propagation with the old and new interfaces."""
vg = vgroup.VarDict(variable_names=(0,), num_states=15)
fg = fgraph.FactorGraph(vg)
factor_group = fgroup.EnumFactorGroup(
variables_for_factors=[[vg[0]]],
factor_configs=np.arange(10)[:, None],
)
fg.add_factors(factor_group)
vdict_evidence = {
var: np.random.gumbel(size=(var[1],)) for var in vg.variables
}
bp = infer.build_inferer(fg.bp_state, backend="bp")
init_bp_arrays = bp.update()
init_bp_arrays = bp.update(
bp_arrays=init_bp_arrays,
log_potentials_updates={factor_group: np.ones(10)},
evidence_updates=vdict_evidence,
)
bp_arrays = bp.run(init_bp_arrays, num_iters=10, temperature=1.0)
# Test the old interface
old_bp = infer.BP(fg.bp_state, temperature=1.0)
old_bp_arrays = old_bp.run_bp(init_bp_arrays, num_iters=10)
assert np.allclose(bp_arrays.ftov_msgs, old_bp_arrays.ftov_msgs)
assert np.allclose(bp_arrays.ftov_msgs, old_bp_arrays.ftov_msgs)
bp_arrays = dataclasses.replace(bp_arrays, log_potentials=jnp.zeros((10,)))
bp_state = bp.to_bp_state(bp_arrays)
assert bp_state.fg_state == fg.fg_state
beliefs = bp.get_beliefs(bp_arrays)
assert beliefs[vg][0].shape == (15,)
def test_bp_different_num_states():
"""Test belief propagation when variables have different number of states."""
# FactorFraph where VarDict and NDVarArray both have distinct number of states
num_states = np.array([2, 3, 4])
vdict = vgroup.VarDict(
variable_names=tuple(["a", "b", "c"]), num_states=num_states
)
varray = vgroup.NDVarArray(shape=(3,), num_states=num_states)
fg = fgraph.FactorGraph([vdict, varray])
# Add factors
# We enforce the variables with same number of states to be in the same state
for var_dict, var_arr, num_state in zip(
["a", "b", "c"], [0, 1, 2], num_states
):
enum_factor = factor.EnumFactor(
variables=[vdict[var_dict], varray[var_arr]],
factor_configs=np.array([[idx, idx] for idx in range(num_state)]),
log_potentials=np.zeros(num_state),
)
fg.add_factors(enum_factor)
# BP functions
bp = infer.build_inferer(fg.bp_state, backend="bp")
# Evidence for both VarDict and NDVarArray
vdict_evidence = {
var: np.random.gumbel(size=(var[1],)) for var in vdict.variables
}
bp_arrays = bp.init(evidence_updates=vdict_evidence)
varray_evidence = {
varray: np.random.gumbel(size=(num_states.shape[0], num_states.max()))
}
bp_arrays = bp.update(bp_arrays=bp_arrays, evidence_updates=varray_evidence)
assert np.all(bp_arrays.evidence != 0)
# Run BP
bp_arrays = bp.run(bp_arrays, num_iters=50, temperature=0.0)
beliefs = bp.get_beliefs(bp_arrays)
map_states = infer.decode_map_states(beliefs)
vdict_states = map_states[vdict]
varray_states = map_states[varray]
# Verify that variables with same number of states are in the same state
for var_dict, var_arr in zip(["a", "b", "c"], [0, 1, 2]):
assert vdict_states[var_dict] == varray_states[var_arr]
| PGMax-main | tests/fgraph/test_fgraph.py |
# Copyright 2022 Intrinsic Innovation LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Test the correct implementation of the different factor groups."""
import re
import jax.numpy as jnp
import numpy as np
from pgmax import fgroup
from pgmax import vgroup
import pytest
def test_single_factor():
"""Test the correct implementation of the single factor group."""
with pytest.raises(
ValueError, match="Cannot create a FactorGroup with no Factor."
):
fgroup.ORFactorGroup(variables_for_factors=[])
# pylint: disable=invalid-name
A = vgroup.NDVarArray(num_states=2, shape=(10,))
B = vgroup.NDVarArray(num_states=2, shape=(10,))
variables0 = (A[0], B[0])
variables1 = (A[1], B[1])
ORFactor0 = fgroup.ORFactorGroup(variables_for_factors=[variables0])
with pytest.raises(
ValueError,
match="SingleFactorGroup should only contain one factor. Got 2",
):
fgroup.SingleFactorGroup(
variables_for_factors=[variables0, variables1],
single_factor=ORFactor0,
)
ORFactor1 = fgroup.ORFactorGroup(variables_for_factors=[variables1])
if ORFactor0 < ORFactor1:
pass
def test_enumeration_factor_group():
"""Test the correct implementation of the enumeration factor group."""
vg = vgroup.NDVarArray(shape=(2, 2), num_states=3)
with pytest.raises(
ValueError,
match=re.escape(
"Expected log potentials shape: (1,) or (2, 1). Got (3, 2)"
),
):
enumeration_factor_group = fgroup.EnumFactorGroup(
variables_for_factors=[
[vg[0, 0], vg[0, 1], vg[1, 1]],
[vg[0, 1], vg[1, 0], vg[1, 1]],
],
factor_configs=np.zeros((1, 3), dtype=int),
log_potentials=np.zeros((3, 2)),
)
with pytest.raises(
ValueError, match=re.escape("Potentials should be floats")
):
enumeration_factor_group = fgroup.EnumFactorGroup(
variables_for_factors=[
[vg[0, 0], vg[0, 1], vg[1, 1]],
[vg[0, 1], vg[1, 0], vg[1, 1]],
],
factor_configs=np.zeros((1, 3), dtype=int),
log_potentials=np.zeros((2, 1), dtype=int),
)
enumeration_factor_group = fgroup.EnumFactorGroup(
variables_for_factors=[
[vg[0, 0], vg[0, 1], vg[1, 1]],
[vg[0, 1], vg[1, 0], vg[1, 1]],
],
factor_configs=np.zeros((1, 3), dtype=int),
)
name = [vg[0, 0], vg[1, 1]]
with pytest.raises(
ValueError,
match=re.escape(
"The queried factor connected to the set of variables"
f" {frozenset(name)} is not present in the factor group."
),
):
_ = enumeration_factor_group[name]
assert (
enumeration_factor_group[[vg[0, 1], vg[1, 0], vg[1, 1]]]
== enumeration_factor_group.factors[1]
)
with pytest.raises(
ValueError,
match=re.escape(
"data should be of shape (2, 1) or (2, 9) or (1,). Got (4, 5)."
),
):
enumeration_factor_group.flatten(np.zeros((4, 5)))
assert jnp.all(enumeration_factor_group.flatten(np.ones(1)) == jnp.ones(2))
assert jnp.all(
enumeration_factor_group.flatten(np.ones((2, 9))) == jnp.ones(18)
)
with pytest.raises(
ValueError,
match=re.escape("Can only unflatten 1D array. Got a 3D array."),
):
enumeration_factor_group.unflatten(jnp.ones((1, 2, 3)))
with pytest.raises(
ValueError,
match=re.escape(
"flat_data should be compatible with shape (2, 1) or (2, 9). Got"
" (30,)"
),
):
enumeration_factor_group.unflatten(jnp.zeros(30))
assert jnp.all(
enumeration_factor_group.unflatten(jnp.arange(2)) == jnp.array([[0], [1]])
)
assert jnp.all(
enumeration_factor_group.unflatten(jnp.ones(18)) == jnp.ones((2, 9))
)
def test_pairwise_factor_group():
"""Test the correct implementation of the pairwise factor group."""
vg = vgroup.NDVarArray(shape=(2, 2), num_states=3)
with pytest.raises(
ValueError,
match=re.escape("log_potential_matrix should be either a 2D array"),
):
fgroup.PairwiseFactorGroup(
[[vg[0, 0], vg[1, 1]]], np.zeros((1,), dtype=float)
)
with pytest.raises(
ValueError, match=re.escape("Potential matrix should be floats")
):
fgroup.PairwiseFactorGroup(
[[vg[0, 0], vg[1, 1]]], np.zeros((3, 3), dtype=int)
)
with pytest.raises(
ValueError,
match=re.escape(
"Expected log_potential_matrix for 1 factors. Got"
" log_potential_matrix for 2 factors."
),
):
fgroup.PairwiseFactorGroup(
[[vg[0, 0], vg[1, 1]]], np.zeros((2, 3, 3), dtype=float)
)
with pytest.raises(
ValueError,
match=re.escape(
"All pairwise factors should connect to exactly 2 variables. Got a"
" factor connecting to 3 variables"
),
):
fgroup.PairwiseFactorGroup(
[[vg[0, 0], vg[1, 1], vg[0, 1]]], np.zeros((3, 3), dtype=float)
)
name = [vg[0, 0], vg[1, 1]]
with pytest.raises(
ValueError,
match=re.escape(f"The specified pairwise factor {name}"),
):
fgroup.PairwiseFactorGroup([name], np.zeros((4, 4), dtype=float))
pairwise_factor_group = fgroup.PairwiseFactorGroup(
[[vg[0, 0], vg[1, 1]], [vg[1, 0], vg[0, 1]]],
)
with pytest.raises(
ValueError,
match=re.escape(
"data should be of shape (2, 3, 3) or (2, 6) or (3, 3). Got (4, 4)."
),
):
pairwise_factor_group.flatten(np.zeros((4, 4)))
assert jnp.all(
pairwise_factor_group.flatten(np.zeros((3, 3))) == jnp.zeros(2 * 3 * 3)
)
assert jnp.all(
pairwise_factor_group.flatten(np.zeros((2, 6))) == jnp.zeros(12)
)
with pytest.raises(
ValueError, match="Can only unflatten 1D array. Got a 2D array"
):
pairwise_factor_group.unflatten(np.zeros((10, 20)))
assert jnp.all(
pairwise_factor_group.unflatten(np.zeros(2 * 3 * 3))
== jnp.zeros((2, 3, 3))
)
assert jnp.all(
pairwise_factor_group.unflatten(np.zeros(2 * 6)) == jnp.zeros((2, 6))
)
with pytest.raises(
ValueError,
match=re.escape(
"flat_data should be compatible with shape (2, 3, 3) or (2, 6). Got"
" (10,)."
),
):
pairwise_factor_group.unflatten(np.zeros(10))
| PGMax-main | tests/fgroup/test_fgroup.py |
# Copyright 2022 Intrinsic Innovation LLC.
# Copyright 2022 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# pylint: disable=invalid-name
"""Test the equivalence of the wiring compiled in equivalent FactorGraphs."""
import re
import numpy as np
from pgmax import factor
from pgmax import fgraph
from pgmax import fgroup
from pgmax import vgroup
import pytest
def test_wiring_with_PairwiseFactorGroup():
"""Test the equivalence of the wiring compiled at the PairwiseFactorGroup level vs at the individual EnumFactor level."""
A = vgroup.NDVarArray(num_states=2, shape=(10,))
B = vgroup.NDVarArray(num_states=2, shape=(10,))
# Test that compile_wiring enforces the correct factor_edges_num_states shape
fg = fgraph.FactorGraph(variable_groups=[A, B])
factor_group = fgroup.PairwiseFactorGroup(
variables_for_factors=[[A[idx], B[idx]] for idx in range(10)]
)
fg.add_factors(factor_group)
factor_group = fg.factor_groups[factor.EnumFactor][0]
object.__setattr__(
factor_group, "factor_configs", factor_group.factor_configs[:, :1]
)
with pytest.raises(
ValueError,
match=re.escape(
"Expected factor_edges_num_states shape is (10,). Got (20,)."
),
):
factor_group.compile_wiring(
fg._vars_to_starts # pylint: disable=protected-access
)
# FactorGraph with a single PairwiseFactorGroup
fg1 = fgraph.FactorGraph(variable_groups=[A, B])
factor_group = fgroup.PairwiseFactorGroup(
variables_for_factors=[[A[idx], B[idx]] for idx in range(10)]
)
fg1.add_factors(factor_group)
assert len(fg1.factor_groups[factor.EnumFactor]) == 1
# FactorGraph with multiple PairwiseFactorGroup
fg2 = fgraph.FactorGraph(variable_groups=[A, B])
for idx in range(10):
factor_group = fgroup.PairwiseFactorGroup(
variables_for_factors=[[A[idx], B[idx]]]
)
fg2.add_factors(factor_group)
assert len(fg2.factor_groups[factor.EnumFactor]) == 10
# FactorGraph with multiple SingleFactorGroup
fg3 = fgraph.FactorGraph(variable_groups=[A, B])
factors = []
for idx in range(10):
enum_factor = factor.EnumFactor(
variables=[A[idx], B[idx]],
factor_configs=np.array([[0, 0], [0, 1], [1, 0], [1, 1]]),
log_potentials=np.zeros((4,)),
)
factors.append(enum_factor)
fg3.add_factors(factors)
assert len(fg3.factor_groups[factor.EnumFactor]) == 10
assert len(fg1.factors) == len(fg2.factors) == len(fg3.factors)
# Compile wiring via factor_group.compile_wiring
wiring1 = fg1.wiring[factor.EnumFactor]
wiring2 = fg2.wiring[factor.EnumFactor]
# Compile wiring via factor.compile_wiring
wiring3 = fg3.wiring[factor.EnumFactor]
assert np.all(wiring1.var_states_for_edges == wiring2.var_states_for_edges)
assert np.all(
wiring1.factor_configs_edge_states == wiring2.factor_configs_edge_states
)
assert np.all(wiring1.var_states_for_edges == wiring3.var_states_for_edges)
assert np.all(
wiring1.factor_configs_edge_states == wiring3.factor_configs_edge_states
)
def test_wiring_with_ORFactorGroup():
"""Test the equivalence of the wiring compiled at the ORFactorGroup level vs at the individual ORFactor level."""
A = vgroup.NDVarArray(num_states=2, shape=(10,))
B = vgroup.NDVarArray(num_states=2, shape=(10,))
C = vgroup.NDVarArray(num_states=2, shape=(10,))
# FactorGraph with a single ORFactorGroup
fg1 = fgraph.FactorGraph(variable_groups=[A, B, C])
factor_group = fgroup.ORFactorGroup(
variables_for_factors=[[A[idx], B[idx], C[idx]] for idx in range(10)]
)
fg1.add_factors(factor_group)
assert len(fg1.factor_groups[factor.ORFactor]) == 1
# FactorGraph with multiple ORFactorGroup
fg2 = fgraph.FactorGraph(variable_groups=[A, B, C])
for idx in range(5):
factor_group = fgroup.ORFactorGroup(
variables_for_factors=[
[A[2 * idx], B[2 * idx], C[2 * idx]],
[A[2 * idx + 1], B[2 * idx + 1], C[2 * idx + 1]],
],
)
fg2.add_factors(factor_group)
assert len(fg2.factor_groups[factor.ORFactor]) == 5
# FactorGraph with multiple SingleFactorGroup
fg3 = fgraph.FactorGraph(variable_groups=[A, B, C])
for idx in range(10):
or_factor = factor.ORFactor(
variables=[A[idx], B[idx], C[idx]],
)
fg3.add_factors(or_factor)
assert len(fg3.factor_groups[factor.ORFactor]) == 10
assert len(fg1.factors) == len(fg2.factors) == len(fg3.factors)
# Compile wiring via factor_group.compile_wiring
wiring1 = fg1.wiring[factor.ORFactor]
wiring2 = fg2.wiring[factor.ORFactor]
# Compile wiring via factor.compile_wiring
wiring3 = fg3.wiring[factor.ORFactor]
assert np.all(wiring1.var_states_for_edges == wiring2.var_states_for_edges)
assert np.all(wiring1.parents_edge_states == wiring2.parents_edge_states)
assert np.all(wiring1.children_edge_states == wiring2.children_edge_states)
assert np.all(wiring1.var_states_for_edges == wiring3.var_states_for_edges)
assert np.all(wiring1.parents_edge_states == wiring3.parents_edge_states)
assert np.all(wiring1.children_edge_states == wiring3.children_edge_states)
def test_wiring_with_ANDFactorGroup():
"""Test the equivalence of the wiring compiled at the ANDFactorGroup level vs at the individual ANDFactor level."""
A = vgroup.NDVarArray(num_states=2, shape=(10,))
B = vgroup.NDVarArray(num_states=2, shape=(10,))
C = vgroup.NDVarArray(num_states=2, shape=(10,))
# FactorGraph with a single ANDFactorGroup
fg1 = fgraph.FactorGraph(variable_groups=[A, B, C])
factor_group = fgroup.ANDFactorGroup(
variables_for_factors=[[A[idx], B[idx], C[idx]] for idx in range(10)],
)
fg1.add_factors(factor_group)
assert len(fg1.factor_groups[factor.ANDFactor]) == 1
# FactorGraph with multiple ANDFactorGroup
fg2 = fgraph.FactorGraph(variable_groups=[A, B, C])
for idx in range(5):
factor_group = fgroup.ANDFactorGroup(
variables_for_factors=[
[A[2 * idx], B[2 * idx], C[2 * idx]],
[A[2 * idx + 1], B[2 * idx + 1], C[2 * idx + 1]],
],
)
fg2.add_factors(factor_group)
assert len(fg2.factor_groups[factor.ANDFactor]) == 5
# FactorGraph with multiple SingleFactorGroup
fg3 = fgraph.FactorGraph(variable_groups=[A, B, C])
for idx in range(10):
and_factor = factor.ANDFactor(
variables=[A[idx], B[idx], C[idx]],
)
fg3.add_factors(and_factor)
assert len(fg3.factor_groups[factor.ANDFactor]) == 10
assert len(fg1.factors) == len(fg2.factors) == len(fg3.factors)
# Compile wiring via factor_group.compile_wiring
wiring1 = fg1.wiring[factor.ANDFactor]
wiring2 = fg2.wiring[factor.ANDFactor]
# Compile wiring via factor.compile_wiring
wiring3 = fg3.wiring[factor.ANDFactor]
assert np.all(wiring1.var_states_for_edges == wiring2.var_states_for_edges)
assert np.all(wiring1.parents_edge_states == wiring2.parents_edge_states)
assert np.all(wiring1.children_edge_states == wiring2.children_edge_states)
assert np.all(wiring1.var_states_for_edges == wiring3.var_states_for_edges)
assert np.all(wiring1.parents_edge_states == wiring3.parents_edge_states)
assert np.all(wiring1.children_edge_states == wiring3.children_edge_states)
def test_wiring_with_PoolFactorGroup():
"""Test the equivalence of the wiring compiled at the PoolFactorGroup level vs at the individual PoolFactor level."""
A = vgroup.NDVarArray(num_states=2, shape=(10,))
B = vgroup.NDVarArray(num_states=2, shape=(10,))
C = vgroup.NDVarArray(num_states=2, shape=(10,))
# FactorGraph with a single PoolFactorGroup
fg1 = fgraph.FactorGraph(variable_groups=[A, B, C])
factor_group = fgroup.PoolFactorGroup(
variables_for_factors=[[A[idx], B[idx], C[idx]] for idx in range(10)],
)
fg1.add_factors(factor_group)
assert len(fg1.factor_groups[factor.PoolFactor]) == 1
# FactorGraph with multiple PoolFactorGroup
fg2 = fgraph.FactorGraph(variable_groups=[A, B, C])
for idx in range(5):
factor_group = fgroup.PoolFactorGroup(
variables_for_factors=[
[A[2 * idx], B[2 * idx], C[2 * idx]],
[A[2 * idx + 1], B[2 * idx + 1], C[2 * idx + 1]],
],
)
fg2.add_factors(factor_group)
assert len(fg2.factor_groups[factor.PoolFactor]) == 5
# FactorGraph with multiple SingleFactorGroup
fg3 = fgraph.FactorGraph(variable_groups=[A, B, C])
for idx in range(10):
pool_factor = factor.PoolFactor(
variables=[A[idx], B[idx], C[idx]],
)
fg3.add_factors(pool_factor)
assert len(fg3.factor_groups[factor.PoolFactor]) == 10
assert len(fg1.factors) == len(fg2.factors) == len(fg3.factors)
# Compile wiring via factor_group.compile_wiring
wiring1 = fg1.wiring[factor.PoolFactor]
wiring2 = fg2.wiring[factor.PoolFactor]
# Compile wiring via factor.compile_wiring
wiring3 = fg3.wiring[factor.PoolFactor]
assert np.all(wiring1.var_states_for_edges == wiring2.var_states_for_edges)
assert np.all(
wiring1.pool_indicators_edge_states == wiring2.pool_indicators_edge_states
)
assert np.all(
wiring1.pool_choices_edge_states == wiring2.pool_choices_edge_states
)
assert np.all(wiring1.var_states_for_edges == wiring3.var_states_for_edges)
assert np.all(
wiring1.pool_indicators_edge_states == wiring3.pool_indicators_edge_states
)
assert np.all(
wiring1.pool_choices_edge_states == wiring3.pool_choices_edge_states
)
| PGMax-main | tests/fgroup/test_wiring.py |
# Copyright 2022 Intrinsic Innovation LLC.
# Copyright 2023 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Configuration file for the Sphinx documentation builder."""
# This file only contains a selection of the most common options. For a full
# list see the documentation:
# https://www.sphinx-doc.org/en/master/usage/configuration.html
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
import os
import sys
sys.path.append(os.path.abspath(".."))
# -- Project information -----------------------------------------------------
project = "PGMax"
author = (
"Guangyao Zhou, Antoine Dedieu, Nishanth Kumar, Miguel Lazaro-Gredilla,"
" Shrinu Kushagra, Dileep George"
)
copyright = ( # pylint: disable=redefined-builtin
"2022 Intrinsic Innovation LLC, 2023 DeepMind Technologies Limited"
)
# -- General configuration ---------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
"sphinx.ext.autodoc",
"sphinx.ext.coverage",
"sphinx.ext.napoleon",
"sphinx.ext.autosummary",
]
autosummary_generate = True # Turn on sphinx.ext.autosummary
# Add any paths that contain templates here, relative to this directory.
templates_path = ["_templates"]
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = [] # type: ignore
master_doc = "index"
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = "sphinx_rtd_theme"
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
# html_static_path = ["_static"]
| PGMax-main | docs/conf.py |
# Copyright 2022 Intrinsic Innovation LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A container package for the entire PGMax library."""
__version__ = "0.6.1"
| PGMax-main | pgmax/__init__.py |
# Copyright 2023 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A module containing helper functions."""
import functools
from typing import Any, Callable
import jax.numpy as jnp
# A large negative value used to clip message assignments.
# It can be useful to limit this for gradient/numerical stability
# in small models but it can cause max product to fail to find
# a globally optimal solution.
MSG_NEG_INF = -1e32
# A large absolute value used to clip log potentials at runtime, as inf
# log potentials can result in NaN results during message normalization
# You probably want LOG_POTENTIAL_MAX_ABS * MSG_NEG_INF to be finite.
# Doesn't apply in computing the energy of a decoding by default.
LOG_POTENTIAL_MAX_ABS = 1e6
# What value to use as a "base" value for, e.g., log potentials of specific
# configs. If NEG_INF is too close to MSG_NEG_INF, it'll blow up as unlikely
# configs become extremely likely.
NEG_INF = -jnp.inf
def cached_property(func: Callable[..., Any]) -> property:
"""Customized cached property decorator.
Args:
func: Member function to be decorated
Returns:
Decorated cached property
"""
return property(functools.lru_cache(None)(func))
| PGMax-main | pgmax/utils/__init__.py |
# Copyright 2023 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A solver for the primal of the LP relaxation of the MAP problem using cvxpy with ECOS."""
import time
from typing import Any, Dict, Hashable, Optional, Sequence, Tuple
from absl import logging
import cvxpy as cp
import jax.numpy as jnp
import numpy as np
from pgmax import factor
from pgmax import fgraph
from pgmax import infer
from pgmax import vgroup
from pgmax.infer import bp_state as bpstate
def primal_lp_solver(
fg: fgraph.FactorGraph,
evidence_updates: Optional[Dict[Any, jnp.ndarray]] = None,
) -> Tuple[Dict[Hashable, Any], float]:
"""Solves the primal of the LP-MAP problem with the Ecos solver.
Note: There is no need to compute the wiring
Args:
fg: FactorGraph with FactorGroups
evidence_updates: Optional dictionary containing evidence updates.
Returns:
lp_vgroups_solution: Dictionary mapping each VarGroup of the FactorGraph
to its assignment in the LP-MAP solution
obj_val: Associated objective value
"""
# Update the evidence
evidence_value = bpstate.update_evidence(
fg.bp_state.evidence.value, evidence_updates, fg.bp_state.fg_state
)
evidence = infer.Evidence(fg.fg_state, evidence_value)
start = time.time()
lp_vars = {}
lp_factor_vars = {}
lp_constraints = []
lp_obj_val = 0
# Build the LP objective value and add constraints in two steps
# Step 1: loop through the variables
for variable_group in fg.variable_groups:
# VarGroups can have different sizes, which is not supported by cp.Variable
for var in variable_group.variables:
lp_vars[var] = cp.Variable(var[1])
# Add constraints
lp_constraints.append(cp.sum(lp_vars[var]) == 1)
lp_constraints.append(lp_vars[var] >= 0)
# Update the objective value
lp_obj_val += cp.sum(cp.multiply(lp_vars[var], evidence[var]))
# Step 2: loop through the different factor types
for factor_type, factor_type_groups in fg.factor_groups.items():
assert factor_type in [
factor.EnumFactor, factor.ORFactor, factor.ANDFactor, factor.PoolFactor
]
# Enumeration factors
if factor_type == factor.EnumFactor:
for factor_group in factor_type_groups:
assert (
len(factor_group.variables_for_factors)
== factor_group.log_potentials.shape[0]
)
for variables_for_factor, factor_log_potentials in zip(
factor_group.variables_for_factors, factor_group.log_potentials
):
assert (
factor_log_potentials.shape[0]
== factor_group.factor_configs.shape[0]
)
# Factor variables
lp_this_factor_vars = cp.Variable(
factor_group.factor_configs.shape[0]
)
lp_factor_vars[tuple(variables_for_factor)] = lp_this_factor_vars
# Add constraints
lp_constraints.append(lp_this_factor_vars >= 0)
# Update objective value
lp_obj_val += cp.sum(
cp.multiply(lp_this_factor_vars, factor_log_potentials)
)
# Consistency constraint
for var_idx, variable in enumerate(variables_for_factor):
for var_state in range(variable[1]):
sum_indices = np.where(
factor_group.factor_configs[:, var_idx] == var_state
)[0]
lp_constraints.append(
cp.sum(
[lp_this_factor_vars[sum_idx] for sum_idx in sum_indices]
)
== lp_vars[variable][var_state]
)
# OR factors
elif factor_type == factor.ORFactor:
for factor_group in factor_type_groups:
for variables_for_factor in factor_group.variables_for_factors:
parents_variables = variables_for_factor[:-1]
child_variable = variables_for_factor[-1]
# Add OR constraints
lp_constraints.append(
lp_vars[child_variable][1]
<= cp.sum(
[
lp_vars[parent_variable][1]
for parent_variable in parents_variables
]
)
)
for parent_variable in parents_variables:
lp_constraints.append(
lp_vars[parent_variable][1] <= lp_vars[child_variable][1]
)
# AND factors
elif factor_type == factor.ANDFactor:
for factor_group in factor_type_groups:
for variables_for_factor in factor_group.variables_for_factors:
parents_variables = variables_for_factor[:-1]
child_variable = variables_for_factor[-1]
# Add AND constraints
lp_constraints.append(
cp.sum(
[
lp_vars[parent_variable][1]
for parent_variable in parents_variables
]
)
<= lp_vars[child_variable][1] + len(parents_variables) - 1
)
for parent_variable in parents_variables:
lp_constraints.append(
lp_vars[child_variable][1] <= lp_vars[parent_variable][1]
)
# Pool factors
elif factor_type == factor.PoolFactor:
for factor_group in factor_type_groups:
for variables_for_factor in factor_group.variables_for_factors:
pool_choices = variables_for_factor[:-1]
pool_indicator = variables_for_factor[-1]
# Add Pool constraints
lp_constraints.append(
cp.sum([lp_vars[pool_choice][1] for pool_choice in pool_choices])
== lp_vars[pool_indicator][1]
)
# Call the LP solver
prob = cp.Problem(cp.Maximize(lp_obj_val), lp_constraints)
logging.info("Building the cvxpy model took %.3f s", (time.time() - start))
start = time.time()
prob.solve(solver=cp.ECOS)
logging.info("Solving the model with ECOS took %.3f s", (time.time() - start))
lp_vars_solution = {k: v.value for k, v in lp_vars.items()}
lp_vgroups_solution = unflatten_lp_vars(lp_vars_solution, fg.variable_groups)
obj_val = prob.value
return lp_vgroups_solution, obj_val
def unflatten_lp_vars(
lp_vars_solution: Dict[Tuple[int, int], np.ndarray],
variable_groups: Sequence[vgroup.VarGroup],
) -> Dict[Hashable, Any]:
"""Returns a mapping from variable groups to their LP solutions.
Args:
lp_vars_solution: Mapping from the variables to their LP solutions
variable_groups: All the variable groups in the FactorGraph.
"""
lp_vgroups_solution = {}
for variable_group in variable_groups:
flat_lp_vars_solution = np.array(
[lp_vars_solution[var] for var in variable_group.variables]
).flatten()
lp_vgroups_solution[variable_group] = variable_group.unflatten(
flat_lp_vars_solution, per_state=True
)
return lp_vgroups_solution
| PGMax-main | pgmax/utils/primal_lp.py |
# Copyright 2022 Intrinsic Innovation LLC.
# Copyright 2023 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Defines a logical factor."""
import dataclasses
import functools
from typing import Any, Dict, Hashable, List, Mapping, Optional, Sequence, Tuple, Union
import warnings
import jax
import jax.numpy as jnp
import numba as nb
import numpy as np
from pgmax.factor import factor
from pgmax.factor import update_utils
from pgmax.utils import NEG_INF
# Temperature threshold, under which we estimate that stability problems may
# arise and we add some extra compute to fix them
TEMPERATURE_STABILITY_THRE = 0.5
# pylint: disable=unexpected-keyword-arg
@jax.tree_util.register_pytree_node_class
@dataclasses.dataclass(frozen=True, eq=False)
class LogicalWiring(factor.Wiring):
"""Wiring for LogicalFactors.
Attributes:
parents_edge_states: Array of shape (num_parents, 2)
parents_edge_states[ii, 0] contains the global LogicalFactor index
parents_edge_states[ii, 1] contains the message index of the parent
variable's relevant state
The message index of the parent variable's other state is
parents_edge_states[ii, 1] + edge_states_offset
Both indices only take into account the LogicalFactors of the same subtype
(OR/AND) of the FactorGraph
children_edge_states: Array of shape (num_factors,)
children_edge_states[ii] contains the message index of the child
variable's relevant state
The message index of the child variable's other state is
children_edge_states[ii] + edge_states_offset
Only takes into account the LogicalFactors of the same subtype (OR/AND)
of the FactorGraph
edge_states_offset: Offset to go from a variable's relevant state to its
other state
For ORFactors the edge_states_offset is 1
For ANDFactors the edge_states_offset is -1
Raises:
ValueError: If:
(1) The are no num_logical_factors different factor indices
(2) There is a factor index higher than num_logical_factors - 1
(3) The edge_states_offset is not 1 or -1
"""
parents_edge_states: Union[np.ndarray, jnp.ndarray]
children_edge_states: Union[np.ndarray, jnp.ndarray]
edge_states_offset: int
def __post_init__(self):
super().__post_init__()
if self.children_edge_states.shape[0] > 0:
logical_factor_indices = self.parents_edge_states[:, 0]
num_logical_factors = self.children_edge_states.shape[0]
if np.unique(logical_factor_indices).shape[0] != num_logical_factors:
raise ValueError(
f"The LogicalWiring must have {num_logical_factors} different"
" LogicalFactor indices"
)
if logical_factor_indices.max() >= num_logical_factors:
raise ValueError(
f"The highest LogicalFactor index must be {num_logical_factors - 1}"
)
if self.edge_states_offset != 1 and self.edge_states_offset != -1:
raise ValueError(
"The LogicalWiring's edge_states_offset must be 1 (for OR) and -1"
f" (for AND), but is {self.edge_states_offset}"
)
def get_inference_arguments(self) -> Dict[str, Any]:
"""Return the list of arguments to run BP with LogicalWirings."""
return {
"parents_factor_indices": self.parents_edge_states[..., 0],
"parents_msg_indices": self.parents_edge_states[..., 1],
"children_edge_states": self.children_edge_states,
"edge_states_offset": self.edge_states_offset,
}
@dataclasses.dataclass(frozen=True, eq=False)
class LogicalFactor(factor.Factor):
"""A logical OR/AND factor of the form (p1,...,pn, c) where p1,...,pn are the parents variables and c is the child variable.
Attributes:
edge_states_offset: Offset to go from a variable's relevant state to its
other state
For ORFactors the edge_states_offset is 1
For ANDFactors the edge_states_offset is -1
Raises:
ValueError: If:
(1) There are less than 2 variables
(2) The variables are not all binary
"""
log_potentials: np.ndarray = dataclasses.field(
init=False,
default_factory=lambda: np.empty((0,)),
)
edge_states_offset: int = dataclasses.field(init=False)
def __post_init__(self):
if len(self.variables) < 2:
raise ValueError(
"A LogicalFactor requires at least one parent variable and one child"
" variable"
)
if not np.all([variable[1] == 2 for variable in self.variables]):
raise ValueError("All the variables in a LogicalFactor should be binary")
@staticmethod
def concatenate_wirings(wirings: Sequence[LogicalWiring]) -> LogicalWiring:
"""Concatenate a list of LogicalWirings.
Args:
wirings: A list of LogicalWirings
Returns:
Concatenated LogicalWiring
"""
if not wirings:
return LogicalWiring(
var_states_for_edges=np.empty((0, 3), dtype=int),
parents_edge_states=np.empty((0, 2), dtype=int),
children_edge_states=np.empty((0,), dtype=int),
edge_states_offset=1,
)
# Factors indices offsets
num_factors_cumsum = np.insert(
np.cumsum(
[wiring.parents_edge_states[-1, 0] + 1 for wiring in wirings]
),
0,
0,
)[:-1]
# Messages offsets
concatenated_var_states_for_edges = factor.concatenate_var_states_for_edges(
[wiring.var_states_for_edges for wiring in wirings]
)
# Factors indices offsets
num_factors_cumsum = np.insert(
np.cumsum(
[wiring.parents_edge_states[-1, 0] + 1 for wiring in wirings]
),
0,
0,
)[:-1]
# Messages offsets
# Note: this is all the factor_to_msgs_starts for the LogicalFactors
num_edge_states_cumsum = np.insert(
np.cumsum(
[wiring.var_states_for_edges.shape[0] for wiring in wirings]
),
0,
0,
)[:-1]
parents_edge_states = []
children_edge_states = []
for ww, or_wiring in enumerate(wirings):
offsets = np.array(
[[num_factors_cumsum[ww], num_edge_states_cumsum[ww]]], dtype=int
)
parents_edge_states.append(or_wiring.parents_edge_states + offsets)
children_edge_states.append(
or_wiring.children_edge_states + offsets[:, 1]
)
return LogicalWiring(
var_states_for_edges=concatenated_var_states_for_edges,
parents_edge_states=np.concatenate(parents_edge_states, axis=0),
children_edge_states=np.concatenate(children_edge_states, axis=0),
edge_states_offset=wirings[0].edge_states_offset,
)
@staticmethod
def compile_wiring(
factor_edges_num_states: np.ndarray,
variables_for_factors: Sequence[List[Tuple[int, int]]],
factor_sizes: np.ndarray,
vars_to_starts: Mapping[Tuple[int, int], int],
edge_states_offset: int,
) -> LogicalWiring:
"""Compile a LogicalWiring for a LogicalFactor or a FactorGroup with LogicalFactors.
Internally calls _compile_var_states_numba and
_compile_logical_wiring_numba for speed.
Args:
factor_edges_num_states: An array concatenating the number of states for
the variables connected to each Factor of the FactorGroup. Each variable
will appear once for each Factor it connects to.
variables_for_factors: A tuple of tuples containing variables connected to
each Factor of the FactorGroup. Each variable will appear once for each
Factor it connects to.
factor_sizes: An array containing the different factor sizes.
vars_to_starts: A dictionary that maps variables to their global starting
indices For an n-state variable, a global start index of m means the
global indices of its n variable states are m, m + 1, ..., m + n - 1
edge_states_offset: Offset to go from a variable's relevant state to its
other state
For ORFactors the edge_states_offset is 1
For ANDFactors the edge_states_offset is -1
Returns:
The LogicalWiring
"""
# Step 1: compute var_states_for_edges
first_var_state_by_edges = []
factor_indices = []
for factor_idx, variables_for_factor in enumerate(variables_for_factors):
for variable in variables_for_factor:
first_var_state_by_edges.append(vars_to_starts[variable])
factor_indices.append(factor_idx)
first_var_state_by_edges = np.array(first_var_state_by_edges)
factor_indices = np.array(factor_indices)
# All the variables in a LogicalFactorGroup are binary
num_edges_states_cumsum = np.arange(
0, 2 * first_var_state_by_edges.shape[0] + 2, 2
)
var_states_for_edges = np.empty(
shape=(2 * first_var_state_by_edges.shape[0], 3), dtype=int
)
factor.compile_var_states_for_edges_numba(
var_states_for_edges,
num_edges_states_cumsum,
first_var_state_by_edges,
factor_indices,
)
# Step 2: compute parents_edge_states and children_edge_states
num_parents = factor_sizes - 1
num_parents_cumsum = np.insert(np.cumsum(num_parents), 0, 0)
parents_edge_states = np.empty(shape=(num_parents_cumsum[-1], 2), dtype=int)
children_edge_states = np.empty(shape=(factor_sizes.shape[0],), dtype=int)
# Relevant state differs for ANDFactors and ORFactors
relevant_state = (-edge_states_offset + 1) // 2
# Note: edges_num_states_cumsum corresponds to the factor_to_msgs_start
factor_num_edges_states_cumsum = np.insert(
np.cumsum(2 * factor_sizes), 0, 0
)
_compile_logical_wiring_numba(
parents_edge_states=parents_edge_states,
children_edge_states=children_edge_states,
num_parents=num_parents,
num_parents_cumsum=num_parents_cumsum,
factor_num_edges_states_cumsum=factor_num_edges_states_cumsum,
relevant_state=relevant_state,
)
return LogicalWiring(
var_states_for_edges=var_states_for_edges,
parents_edge_states=parents_edge_states,
children_edge_states=children_edge_states,
edge_states_offset=edge_states_offset,
)
@staticmethod
@jax.jit
def compute_energy(
edge_states_one_hot_decoding: jnp.ndarray,
parents_factor_indices: jnp.ndarray,
parents_msg_indices: jnp.ndarray,
children_edge_states: jnp.ndarray,
edge_states_offset: int,
log_potentials: Optional[jnp.ndarray] = None,
) -> float:
"""Returns the contribution to the energy of several LogicalFactors.
Args:
edge_states_one_hot_decoding: Array of shape (num_edge_states,)
Flattened array of one-hot decoding of the edge states connected to the
LogicalFactors
parents_factor_indices: Array of shape (num_parents,)
parents_factor_indices[ii] contains the global LogicalFactor index of
the parent variable's relevant state.
Only takes into account the LogicalFactors of the same subtype (OR/AND)
of the FactorGraph
parents_msg_indices: Array of shape (num_parents,)
parents_msg_indices[ii] contains the message index of the parent
variable's relevant state.
The message index of the parent variable's other state is
parents_msg_indices[ii] + edge_states_offset.
Only takes into account the LogicalFactors of the same subtype (OR/AND)
of the FactorGraph
children_edge_states: Array of shape (num_factors,)
children_edge_states[ii] contains the message index of the child
variable's relevant state
The message index of the child variable's other state is
children_edge_states[ii] + edge_states_offset
Only takes into account the LogicalFactors of the same subtype (OR/AND)
of the FactorGraph
edge_states_offset: Offset to go from a variable's relevant state to its
other state
For ORFactors the edge_states_offset is 1
For ANDFactors the edge_states_offset is -1
log_potentials: Optional array of log potentials
"""
num_factors = children_edge_states.shape[0]
# parents_edge_states[..., 1] indicates the relevant state,
# which is 0 for ORFactors and 1 for ANDFactors
# If all the parents variables of a factor are in the factor type's relevant
# state, then the child variable must be in the relevant state too
logical_parents_decoded = (
jnp.ones(shape=(num_factors,))
.at[parents_factor_indices]
.multiply(edge_states_one_hot_decoding[parents_msg_indices])
)
logical_children_decoded = edge_states_one_hot_decoding[
children_edge_states
]
energy = jnp.where(
jnp.any(logical_parents_decoded != logical_children_decoded),
jnp.inf, # invalid decoding
0.0
)
return energy
@dataclasses.dataclass(frozen=True, eq=False)
class ORFactor(LogicalFactor):
"""An OR factor of the form (p1,...,pn, c) where p1,...,pn are the parents variables and c is the child variable.
An OR factor is defined as:
F(p1, p2, ..., pn, c) = 0 <=> c = OR(p1, p2, ..., pn)
F(p1, p2, ..., pn, c) = -inf o.w.
Attributes:
edge_states_offset: Offset to go from a variable's relevant state to its
other state
For ORFactors the edge_states_offset is 1.
"""
edge_states_offset: int = dataclasses.field(init=False, default=1)
@staticmethod
def compute_factor_energy(
variables: List[Hashable],
vars_to_map_states: Dict[Hashable, Any],
**kwargs,
) -> float:
"""Returns the contribution to the energy of a single ORFactor.
Args:
variables: List of variables connected by the ORFactor
vars_to_map_states: A dictionary mapping each individual variable to
its MAP state.
**kwargs: Other parameters, not used
"""
vars_decoded_states = np.array(
[vars_to_map_states[var] for var in variables]
)
parents_decoded_states = vars_decoded_states[:-1]
child_decoded_states = vars_decoded_states[-1]
if np.any(parents_decoded_states) != child_decoded_states:
warnings.warn(
f"Invalid decoding for OR factor {variables} "
f"with parents set to {parents_decoded_states} "
f"and child set to {child_decoded_states}!"
)
factor_energy = np.inf
else:
factor_energy = 0.0
return factor_energy
@dataclasses.dataclass(frozen=True, eq=False)
class ANDFactor(LogicalFactor):
"""An AND factor of the form (p1,...,pn, c) where p1,...,pn are the parents variables and c is the child variable.
An AND factor is defined as:
F(p1, p2, ..., pn, c) = 0 <=> c = AND(p1, p2, ..., pn)
F(p1, p2, ..., pn, c) = -inf o.w.
Attributes:
edge_states_offset: Offset to go from a variable's relevant state to its
other state
For ANDFactors the edge_states_offset is -1.
"""
edge_states_offset: int = dataclasses.field(init=False, default=-1)
@staticmethod
def compute_factor_energy(
variables: List[Hashable],
vars_to_map_states: Dict[Hashable, Any],
**kwargs,
) -> float:
"""Returns the contribution to the energy of a single ANDFactor.
Args:
variables: List of variables connected by the ANDFactor
vars_to_map_states: A dictionary mapping each individual variable to
its MAP state.
**kwargs: Other parameters, not used
"""
vars_decoded_states = np.array(
[vars_to_map_states[var] for var in variables]
)
parents_decoded_states = vars_decoded_states[:-1]
child_decoded_states = vars_decoded_states[-1]
if np.all(parents_decoded_states) != child_decoded_states:
warnings.warn(
f"Invalid decoding for AND factor {variables} "
f"with parents set to {parents_decoded_states} "
f"and child set to {child_decoded_states}!"
)
factor_energy = np.inf
else:
factor_energy = 0.0
return factor_energy
# pylint: disable=g-doc-args
@nb.jit(parallel=False, cache=True, fastmath=True, nopython=True)
def _compile_logical_wiring_numba(
parents_edge_states: np.ndarray,
children_edge_states: np.ndarray,
num_parents: np.ndarray,
num_parents_cumsum: np.ndarray,
factor_num_edges_states_cumsum: np.ndarray,
relevant_state: int,
):
"""Fast numba computation of the parents_edge_states and children_edge_states of a LogicalWiring.
parents_edge_states and children_edge_states are updated in-place.
"""
for factor_idx in nb.prange(num_parents.shape[0]):
start_parents, end_parents = (
num_parents_cumsum[factor_idx],
num_parents_cumsum[factor_idx + 1],
)
parents_edge_states[start_parents:end_parents, 0] = factor_idx
parents_edge_states[start_parents:end_parents, 1] = np.arange(
factor_num_edges_states_cumsum[factor_idx] + relevant_state,
factor_num_edges_states_cumsum[factor_idx]
+ 2 * num_parents[factor_idx],
2,
)
children_edge_states[factor_idx] = (
factor_num_edges_states_cumsum[factor_idx]
+ 2 * num_parents[factor_idx]
+ relevant_state
)
# pylint: disable=unused-argument
@functools.partial(jax.jit, static_argnames=("temperature", "normalize"))
def pass_logical_fac_to_var_messages(
vtof_msgs: jnp.ndarray,
parents_factor_indices: jnp.ndarray,
parents_msg_indices: jnp.ndarray,
children_edge_states: jnp.ndarray,
edge_states_offset: int,
temperature: float,
normalize: bool,
log_potentials: Optional[jnp.ndarray] = None,
) -> jnp.ndarray:
"""Passes messages from LogicalFactors to Variables.
Args:
vtof_msgs: Array of shape (num_edge_states,). This holds all the
flattened variable to all the LogicalFactors messages.
parents_factor_indices: Array of shape (num_parents,)
parents_factor_indices[ii] contains the global LogicalFactor index of the
parent variable's relevant state.
Only takes into account the LogicalFactors of the same subtype (OR/AND)
of the FactorGraph
parents_msg_indices: Array of shape (num_parents,)
parents_msg_indices[ii] contains the message index of the parent
variable's relevant state.
The message index of the parent variable's other state is
parents_msg_indices[ii] + edge_states_offset.
Only takes into account the LogicalFactors of the same subtype (OR/AND)
of the FactorGraph
children_edge_states: Array of shape (num_factors,)
children_edge_states[ii] contains the message index of the child
variable's relevant state
The message index of the child variable's other state is
children_edge_states[ii] + edge_states_offset
Only takes into account the LogicalFactors of the same subtype (OR/AND)
of the FactorGraph
edge_states_offset: Offset to go from a variable's relevant state to its
other state
For ORFactors the edge_states_offset is 1
For ANDFactors the edge_states_offset is -1
temperature: Temperature for loopy belief propagation. 1.0 corresponds
to sum-product, 0.0 corresponds to max-product.
normalize: Whether we normalize the outgoing messages. Set to True for BP
and to False for Smooth Dual LP.
log_potentials: Optional array of log potentials
Returns:
Array of shape (num_edge_states,). This holds all the flattened
LogicalFactors of the same subtype (OR/AND) to variable messages.
Note: The updates below are derived to be mathematically stable at low
temperatures in (0, 0.1].
logaddexp_with_temp and logminusexp_with_temp are also derived to be
numerically stable at these low temperatures.
However, we observed that deriving efficient message updates for OR/AND
factors -with linear complexity- comes at the cost of a decrease in
numerical stability for two regimes:
(1) Larger factors have higher but rarer errors
(2) Temperatures around 0.1 have higher errors than lower (and higher)
temperatures
More compute could be spent to improve the stability of these two cases.
"""
num_factors = children_edge_states.shape[0]
parents_tof_msgs_relevant = vtof_msgs[
parents_msg_indices + edge_states_offset
]
parents_tof_msgs_other = vtof_msgs[parents_msg_indices]
parents_tof_msgs_diffs = parents_tof_msgs_relevant - parents_tof_msgs_other
children_tof_msgs_relevant = vtof_msgs[
children_edge_states + edge_states_offset
]
children_tof_msgs_other = vtof_msgs[children_edge_states]
# Get the easier outgoing messages to children variables
factorsum_parents_other = (
jnp.zeros((num_factors,))
.at[parents_factor_indices]
.add(parents_tof_msgs_other)
)
children_msgs_other = factorsum_parents_other
# Get first maxes and argmaxes for incoming parents messages of each factor
(
first_parents_diffs_maxes,
first_parents_diffs_argmaxes,
) = update_utils.get_maxes_and_argmaxes(
parents_tof_msgs_diffs, parents_factor_indices, num_factors
)
# Get second maxes
second_parents_diffs_maxes = (
jnp.full(shape=(num_factors,), fill_value=NEG_INF)
.at[parents_factor_indices]
.max(parents_tof_msgs_diffs.at[first_parents_diffs_argmaxes].set(NEG_INF))
)
# Consider the max-product case separately.
# See https://arxiv.org/pdf/2111.02458.pdf, Appendix C.3
if temperature == 0.0:
# First, get the easier outgoing message to parents variables
maxes_parents_by_edges = jnp.maximum(
parents_tof_msgs_other, parents_tof_msgs_relevant
)
factorsum_maxes_parents_by_edges = (
jnp.full(shape=(num_factors,), fill_value=0.0)
.at[parents_factor_indices]
.add(maxes_parents_by_edges)
)
parents_msgs_relevant = (
factorsum_maxes_parents_by_edges[parents_factor_indices]
+ children_tof_msgs_relevant[parents_factor_indices]
- maxes_parents_by_edges
)
# Outgoing messages to children variables
min_w_zeros_first_parents_diffs_maxes = jnp.minimum(
0.0, first_parents_diffs_maxes
)
children_msgs_relevant = (
factorsum_maxes_parents_by_edges + min_w_zeros_first_parents_diffs_maxes
)
# Finally, derive the harder outgoing messages to parents variables
parents_msgs_other_option1 = (
children_tof_msgs_other[parents_factor_indices]
+ factorsum_parents_other[parents_factor_indices]
- parents_tof_msgs_other
)
parents_msgs_other_option2 = (
parents_msgs_relevant
+ min_w_zeros_first_parents_diffs_maxes[parents_factor_indices]
)
# Consider the first_parents_diffs_argmaxes separately
parents_msgs_other_option2_for_argmax = parents_msgs_relevant[
first_parents_diffs_argmaxes
] + jnp.minimum(0.0, second_parents_diffs_maxes)
parents_msgs_other_option2 = parents_msgs_other_option2.at[
first_parents_diffs_argmaxes
].set(parents_msgs_other_option2_for_argmax)
parents_msgs_other = jnp.maximum(
parents_msgs_other_option1, parents_msgs_other_option2
)
else:
# Get the easier outgoing messages to parents variables
# The steps are similar to temperature=0
logsumexp_parents_by_edges = update_utils.logaddexp_with_temp(
parents_tof_msgs_relevant, parents_tof_msgs_other, temperature
)
factorsum_logsumexp_parents_by_edges = (
jnp.full(shape=(num_factors,), fill_value=0.0)
.at[parents_factor_indices]
.add(logsumexp_parents_by_edges)
)
factorsum_logsumexp_parents_by_edges_wo_self = (
factorsum_logsumexp_parents_by_edges[parents_factor_indices]
- logsumexp_parents_by_edges
)
factorsum_parents_other_wo_self = (
factorsum_parents_other[parents_factor_indices] - parents_tof_msgs_other
)
parents_msgs_relevant = (
children_tof_msgs_relevant[parents_factor_indices]
+ factorsum_logsumexp_parents_by_edges_wo_self
)
# As before, the harder outgoing messages to children variables follow
children_msgs_relevant = update_utils.logminusexp_with_temp(
factorsum_logsumexp_parents_by_edges,
factorsum_parents_other,
temperature,
eps=1e-4
)
if temperature < TEMPERATURE_STABILITY_THRE:
# If factorsum_logsumexp_parents_by_edges and factorsum_parents_other
# are closer than the high threshold eps, logminusexp_with_temp above sets
# the outgoing messages to NEG_INF, which we replace by a lower bound.
children_msgs_relevant = jnp.maximum(
children_msgs_relevant,
update_utils.logaddexp_with_temp(
factorsum_parents_other + first_parents_diffs_maxes,
factorsum_parents_other + second_parents_diffs_maxes,
temperature
)
)
# Finally, derive the harder outgoing messages to parents variables
parents_msgs_other_option1 = (
children_tof_msgs_other[parents_factor_indices]
+ factorsum_parents_other_wo_self
)
parents_msgs_other_option2 = (
children_tof_msgs_relevant[parents_factor_indices]
+ factorsum_logsumexp_parents_by_edges_wo_self
)
parents_msgs_other_option3 = (
children_tof_msgs_relevant[parents_factor_indices]
+ factorsum_parents_other_wo_self
)
logaddexp_parents_msgs_other_option1_2 = update_utils.logaddexp_with_temp(
parents_msgs_other_option1, parents_msgs_other_option2, temperature
)
parents_msgs_other = update_utils.logminusexp_with_temp(
logaddexp_parents_msgs_other_option1_2,
parents_msgs_other_option3,
temperature,
eps=1e-4
)
if temperature < TEMPERATURE_STABILITY_THRE:
# If logaddexp_parents_msgs_other_option1_2 and parents_msgs_other_option3
# are closer than the high threshold eps, logminusexp_with_temp above sets
# the outgoing messages to NEG_INF, which we replace by a lower bound.
factorsum_parents_other_plus_max_diff_wo_self = (
factorsum_parents_other_wo_self
+ first_parents_diffs_maxes[parents_factor_indices]
)
factorsum_parents_other_plus_max_diff_wo_self = (
factorsum_parents_other_plus_max_diff_wo_self.at[
first_parents_diffs_argmaxes
].set(
factorsum_parents_other_wo_self[first_parents_diffs_argmaxes]
+ second_parents_diffs_maxes
)
)
# Note: we can always spend more compute to refine the lower bound
parents_msgs_other_lower_bound = update_utils.logaddexp_with_temp(
parents_msgs_other_option1,
children_tof_msgs_relevant[parents_factor_indices]
+ factorsum_parents_other_plus_max_diff_wo_self,
temperature,
)
parents_msgs_other = jnp.maximum(
parents_msgs_other, parents_msgs_other_lower_bound,
)
# Special case: factors with a single parent
num_parents = jnp.bincount(parents_factor_indices, length=num_factors)
first_elements = jnp.concatenate(
[jnp.zeros(1, dtype=int), jnp.cumsum(num_parents)]
)[:-1]
parents_msgs_relevant = parents_msgs_relevant.at[first_elements].set(
jnp.where(
num_parents == 1,
children_tof_msgs_relevant,
parents_msgs_relevant[first_elements],
),
)
parents_msgs_other = parents_msgs_other.at[first_elements].set(
jnp.where(
num_parents == 1,
children_tof_msgs_other,
parents_msgs_other[first_elements],
),
)
# Outgoing messages
ftov_msgs = jnp.zeros_like(vtof_msgs)
if normalize:
ftov_msgs = ftov_msgs.at[
parents_msg_indices + edge_states_offset
].set(parents_msgs_relevant - parents_msgs_other)
ftov_msgs = ftov_msgs.at[children_edge_states + edge_states_offset].set(
children_msgs_relevant - children_msgs_other
)
else:
ftov_msgs = ftov_msgs.at[
parents_msg_indices + edge_states_offset
].set(parents_msgs_relevant)
ftov_msgs = ftov_msgs.at[parents_msg_indices].set(parents_msgs_other)
ftov_msgs = ftov_msgs.at[children_edge_states + edge_states_offset].set(
children_msgs_relevant
)
ftov_msgs = ftov_msgs.at[children_edge_states].set(children_msgs_other)
return ftov_msgs
| PGMax-main | pgmax/factor/logical.py |
# Copyright 2022 Intrinsic Innovation LLC.
# Copyright 2022 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A sub-package defining different types of factors."""
import collections
from typing import Callable, OrderedDict, Type
import jax.numpy as jnp
from pgmax.factor import enum
from pgmax.factor import logical
from pgmax.factor import pool
from pgmax.factor.enum import EnumFactor
from pgmax.factor.enum import EnumWiring
from pgmax.factor.factor import concatenate_var_states_for_edges
from pgmax.factor.factor import Factor
from pgmax.factor.factor import Wiring
from pgmax.factor.logical import ANDFactor
from pgmax.factor.logical import LogicalFactor
from pgmax.factor.logical import LogicalWiring
from pgmax.factor.logical import ORFactor
from pgmax.factor.pool import PoolFactor
from pgmax.factor.pool import PoolWiring
FAC_TO_VAR_UPDATES: OrderedDict[Type[Factor], Callable[..., jnp.ndarray]] = (
collections.OrderedDict([
(EnumFactor, enum.pass_enum_fac_to_var_messages),
(ORFactor, logical.pass_logical_fac_to_var_messages),
(ANDFactor, logical.pass_logical_fac_to_var_messages),
(PoolFactor, pool.pass_pool_fac_to_var_messages),
])
)
| PGMax-main | pgmax/factor/__init__.py |
# Copyright 2023 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utils function for message updates."""
import functools
from typing import Optional, Tuple
import jax
import jax.numpy as jnp
from pgmax.utils import NEG_INF
@functools.partial(jax.jit, static_argnames="num_labels")
def get_maxes_and_argmaxes(
data: jnp.array,
labels: jnp.array,
num_labels: int,
) -> Tuple[jnp.ndarray, jnp.ndarray]:
"""Given a flattened sequence of elements and their corresponding labels, returns the maxes and argmaxes of each label.
Args:
data: Array of shape (a_len,) where a_len is an arbitrary integer.
labels: Label array of shape (a_len,), assigning a label to each entry.
Labels must be 0,..., num_labels - 1.
num_labels: Number of different labels.
Returns:
Maxes and argmaxes arrays
Note:
To leverage the restricted list of primitives supported by JAX ndarray.at
https://jax.readthedocs.io/en/latest/_autosummary/jax.numpy.ndarray.at.html
we represent the argmax of an array x of length N and max x^* by
argmax x = max_k {k - N * 1(x[k] = x^*)}
"""
num_obs = data.shape[0]
maxes = jnp.full(shape=(num_labels,), fill_value=NEG_INF).at[labels].max(data)
only_maxes_pos = jnp.arange(num_obs) - num_obs * jnp.where(
data != maxes[labels], 1, 0
)
argmaxes = (
jnp.full(
shape=(num_labels,),
fill_value=jnp.iinfo(jnp.int32).min,
dtype=jnp.int32,
)
.at[labels]
.max(only_maxes_pos)
)
return maxes, argmaxes
@functools.partial(jax.jit, static_argnames=("num_labels"))
def logsumexps_with_temp(
data: jnp.array,
labels: jnp.array,
num_labels: int,
temperature: float,
maxes: Optional[jnp.array] = None,
) -> jnp.ndarray:
"""Given a flattened sequence of elements and their corresponding labels, returns the stable logsumexp for each label at the given temperature.
Args:
data: Array of shape (a_len,) where a_len is an arbitrary integer.
labels: Label array of shape (a_len,), assigning a label to each entry.
Labels must be 0,..., num_labels - 1.
num_labels: Number of different labels.
temperature: Temperature for loopy belief propagation.
maxes: Optional array of precomputed maxes per label
Returns:
The array of logsumexp for each label
"""
if maxes is None:
maxes = (
jnp.full(shape=(num_labels,), fill_value=NEG_INF).at[labels].max(data)
)
logsumexps_wo_maxes = temperature * jnp.log(
jnp.zeros((num_labels,))
.at[labels]
.add(jnp.exp((data - maxes[labels]) / temperature))
)
return logsumexps_wo_maxes + maxes
@functools.partial(jax.jit, static_argnames=("num_labels"))
def softmax_and_logsumexps_with_temp(
data: jnp.array,
labels: jnp.array,
num_labels: int,
temperature: float,
) -> Tuple[jnp.ndarray, jnp.ndarray]:
"""Given a flattened sequence of elements and their corresponding labels, returns for each label the stable (1) softmax and (2) logsumexp at the given temperature.
Args:
data: Array of shape (a_len,) where a_len is an arbitrary integer.
labels: Label array of shape (a_len,), assigning a label to each entry.
Labels must be 0,..., num_labels - 1.
num_labels: Number of different labels.
temperature: Temperature for loopy belief propagation.
Returns:
softmax_data: Stable softmax with per-label normalization at the given
temperature.
logsumexp_data: Stable logsumexp for each label at the given temperature.
"""
maxes = (
jnp.full(shape=(num_labels,), fill_value=NEG_INF).at[labels].max(data)
)
exp_data = temperature * jnp.exp((data - maxes[labels]) / temperature)
sumexp_data = (
jnp.full(shape=(num_labels,), fill_value=0.0).at[labels].add(exp_data)
)
logsumexp_data = maxes + temperature * jnp.log(sumexp_data / temperature)
softmax_data = exp_data / sumexp_data[labels]
return softmax_data, logsumexp_data
@jax.jit
def log1mexp(x):
"""Returns a stable implementation of f(x) = log(1 - exp(-x)) for x >= 0.
See https://cran.r-project.org/web/packages/Rmpfr/vignettes/log1mexp-note.pdf
Args:
x: input
"""
return jnp.where(
x <= jnp.log(2),
jnp.log(-jnp.expm1(-x)),
jnp.log1p(-jnp.exp(-x))
)
@jax.jit
def logaddexp_with_temp(
data1: jnp.array,
data2: jnp.array,
temperature: float,
) -> jnp.ndarray:
"""Returns the stable logaddexp of two arrays of same length at a given temperature, ie T * log(exp(data1/T) + exp(data2/T)).
Args:
data1: Array of shape (a_len,) where a_len is an arbitrary integer.
data2: Array of same shape (a_len,)
temperature: Temperature for loopy belief propagation.
"""
maxes = jnp.maximum(data1, data2)
mins = jnp.minimum(data1, data2)
logsumexps_wo_maxes = jnp.log1p(jnp.exp((mins - maxes) / temperature))
return temperature * logsumexps_wo_maxes + maxes
@jax.jit
def logminusexp_with_temp(
data1: jnp.array,
data2: jnp.array,
temperature: float,
eps=1e-30
) -> jnp.ndarray:
"""Returns a stable version of T * log(exp(data1/T) - exp(data2/T)) where data1 >= data2 entry-wise.
Args:
data1: Array of shape (a_len,) where a_len is an arbitrary integer.
data2: Array of same shape (a_len,)
temperature: Temperature for loopy belief propagation.
eps: If the difference between the entries is lower than this threshold,
we set the result to NEG_INF
"""
return jnp.where(
data1 >= data2 + eps,
temperature * log1mexp((data1 - data2) / temperature)
+ data1,
NEG_INF,
)
| PGMax-main | pgmax/factor/update_utils.py |
# Copyright 2022 Intrinsic Innovation LLC.
# Copyright 2023 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A module containing the base classes for factors in a factor graph."""
import dataclasses
from typing import Any, Dict, Hashable, List, Sequence, Tuple, Union
import jax
import jax.numpy as jnp
import numba as nb
import numpy as np
@jax.tree_util.register_pytree_node_class
@dataclasses.dataclass(frozen=True, eq=False)
class Wiring:
"""Wiring for factors.
Attributes:
var_states_for_edges: Array of shape (num_edge_states, 3)
For each edge state:
var_states_for_edges[ii, 0] contains its global variable state index
var_states_for_edges[ii, 1] contains its global edge index
var_states_for_edges[ii, 2] contains its global factor index
"""
var_states_for_edges: Union[np.ndarray, jnp.ndarray]
def __post_init__(self):
for field in self.__dataclass_fields__:
if isinstance(getattr(self, field), np.ndarray):
getattr(self, field).flags.writeable = False
def get_inference_arguments(self):
"""Return the list of arguments to run BP for this Wiring."""
raise NotImplementedError(
"Please subclass the Wiring class and override this method."
)
def tree_flatten(self):
return jax.tree_util.tree_flatten(dataclasses.asdict(self))
@classmethod
def tree_unflatten(cls, aux_data, children):
return cls(**aux_data.unflatten(children))
@dataclasses.dataclass(frozen=True, eq=False)
class Factor:
"""A factor.
Attributes:
variables: List of variables connected by the Factor. Each variable is
represented by a tuple (variable hash, variable num_states)
log_potentials: Array of log potentials
Raises:
NotImplementedError: If compile_wiring is not implemented
"""
variables: List[Tuple[int, int]]
log_potentials: np.ndarray
def __post_init__(self):
if not hasattr(self, "compile_wiring"):
raise NotImplementedError(
"Please implement compile_wiring in for your factor"
)
@staticmethod
def concatenate_wirings(wirings: Sequence[Wiring]) -> Wiring:
"""Concatenate a list of Wirings.
Args:
wirings: A list of Wirings
Returns:
Concatenated Wiring
"""
raise NotImplementedError(
"Please subclass the Wiring class and override this method."
)
@staticmethod
def compute_energy(
wiring: Wiring,
edge_states_one_hot_decoding: jnp.ndarray,
log_potentials: jnp.ndarray,
) -> float:
"""Returns the contribution to the energy of several Factors of same type.
Args:
wiring: Wiring
edge_states_one_hot_decoding: Array of shape (num_edge_states,)
Flattened array of one-hot decoding of the edge states connected to the
Factors
log_potentials: Array of log potentials
"""
raise NotImplementedError(
"Please subclass the Factor class and override this method."
)
@staticmethod
def compute_factor_energy(
variables: List[Hashable],
vars_to_map_states: Dict[Hashable, Any],
) -> Tuple[float, Dict[Hashable, Any]]:
"""Returns the contribution to the energy of a single Factor.
Args:
variables: List of variables connected by the Factor
vars_to_map_states: A dictionary mapping each individual variable to
its MAP state.
"""
raise NotImplementedError(
"Please subclass the Factor class and override this method"
)
def concatenate_var_states_for_edges(
list_var_states_for_edges: Sequence[np.ndarray],
) -> np.ndarray:
"""Concatenate a list of var_states_for_edges.
Args:
list_var_states_for_edges: A list of var_states_for_edges
Returns:
Concatenated var_states_for_edges
Raises: ValueError if
(1) list_var_states_for_edges is None
(2) one of the list_var_states_for_edges entry is None
"""
if list_var_states_for_edges is None:
raise ValueError("list_var_states_for_edges cannot be None")
# Remove empty
list_var_states_for_edges_wo_empty = []
for var_states_for_edges in list_var_states_for_edges:
# var_states_for_edges can be empty but cannot be None
if var_states_for_edges is None:
raise ValueError("var_states_for_edges cannot be None")
if var_states_for_edges.shape[0] > 0:
list_var_states_for_edges_wo_empty.append(var_states_for_edges)
num_edges_cumsum = np.insert(
np.cumsum(
[
var_states_for_edges[-1, 1] + 1
for var_states_for_edges in list_var_states_for_edges_wo_empty
]
),
0,
0,
)[:-1]
num_factors_cumsum = np.insert(
np.cumsum(
[
var_states_for_edges[-1, 2] + 1
for var_states_for_edges in list_var_states_for_edges_wo_empty
]
),
0,
0,
)[:-1]
var_states_for_edges_w_offsets = []
for idx, var_states_for_edges in enumerate(
list_var_states_for_edges_wo_empty
):
var_states_for_edges_w_offsets.append(
var_states_for_edges
+ np.array(
[[0, num_edges_cumsum[idx], num_factors_cumsum[idx]]],
dtype=int,
)
)
return np.concatenate(var_states_for_edges_w_offsets, axis=0)
# pylint: disable=g-doc-args
# pylint: disable=g-doc-return-or-yield
@nb.jit(parallel=False, cache=True, fastmath=True, nopython=True)
def compile_var_states_for_edges_numba(
var_states_for_edges: np.ndarray,
num_edges_states_cumsum: np.ndarray,
first_var_state_by_edges: np.ndarray,
factor_indices: np.ndarray,
):
"""Fast numba computation of the var_states_for_edges of a Wiring.
var_states_for_edges is updated in-place
"""
for edge_idx in nb.prange(num_edges_states_cumsum.shape[0] - 1):
start_edge_state_idx, end_edge_state_idx = (
num_edges_states_cumsum[edge_idx],
num_edges_states_cumsum[edge_idx + 1],
)
# Variable states for each edge state
var_states_for_edges[
start_edge_state_idx:end_edge_state_idx, 0
] = first_var_state_by_edges[edge_idx] + np.arange(
end_edge_state_idx - start_edge_state_idx
)
# Edge index for each edge state
var_states_for_edges[start_edge_state_idx:end_edge_state_idx, 1] = edge_idx
# Factor index for each edge state
var_states_for_edges[
start_edge_state_idx:end_edge_state_idx, 2
] = factor_indices[edge_idx]
return var_states_for_edges
| PGMax-main | pgmax/factor/factor.py |
# Copyright 2022 Intrinsic Innovation LLC.
# Copyright 2023 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Defines an enumeration factor."""
import dataclasses
import functools
from typing import Any, Dict, Hashable, List, Mapping, Sequence, Tuple, Union
import warnings
import jax
import jax.numpy as jnp
import numba as nb
import numpy as np
from pgmax.factor import factor
from pgmax.factor import update_utils
from pgmax.utils import NEG_INF
# pylint: disable=unexpected-keyword-arg
@jax.tree_util.register_pytree_node_class
@dataclasses.dataclass(frozen=True, eq=False)
class EnumWiring(factor.Wiring):
"""Wiring for EnumFactors.
Attributes:
factor_configs_edge_states: Array of shape (num_factor_configs, 2)
factor_configs_edge_states[ii] contains a pair of global enumeration
factor_config and global edge_state indices
factor_configs_edge_states[ii, 0] contains the global EnumFactor
config index
factor_configs_edge_states[ii, 1] contains the corresponding global
edge_state index
Both indices only take into account the EnumFactors of the FactorGraph
num_val_configs: Number of valid configurations for this wiring
num_factors: Number of factors covered by this wiring
"""
factor_configs_edge_states: Union[np.ndarray, jnp.ndarray]
def __post_init__(self):
super().__post_init__()
if self.factor_configs_edge_states.shape[0] == 0:
num_val_configs = 0
else:
num_val_configs = int(self.factor_configs_edge_states[-1, 0]) + 1
object.__setattr__(self, "num_val_configs", num_val_configs)
if self.var_states_for_edges.shape[0] == 0:
num_factors = 0
else:
num_factors = int(self.var_states_for_edges[-1, 2]) + 1
object.__setattr__(self, "num_factors", num_factors)
def get_inference_arguments(self) -> Dict[str, Any]:
"""Return the list of arguments to run BP with EnumWirings."""
assert hasattr(self, "num_val_configs")
return {
"factor_configs_indices": self.factor_configs_edge_states[..., 0],
"factor_configs_edge_states": self.factor_configs_edge_states[..., 1],
"num_val_configs": self.num_val_configs,
"num_factors": self.num_factors
}
@dataclasses.dataclass(frozen=True, eq=False)
class EnumFactor(factor.Factor):
"""An enumeration factor.
Attributes:
factor_configs: Array of shape (num_val_configs, num_variables)
An array containing an explicit enumeration of all valid configurations
log_potentials: Array of shape (num_val_configs,)
An array containing the log of the potential value for each valid
configuration
Raises:
ValueError: If:
(1) The dtype of the factor_configs array is not int
(2) The dtype of the potential array is not float
(3) factor_configs does not have the correct shape
(4) The potential array does not have the correct shape
(5) The factor_configs array contains invalid values
"""
factor_configs: np.ndarray
log_potentials: np.ndarray
def __post_init__(self):
self.factor_configs.flags.writeable = False
if not np.issubdtype(self.factor_configs.dtype, np.integer):
raise ValueError(
f"Configurations should be integers. Got {self.factor_configs.dtype}."
)
if not np.issubdtype(self.log_potentials.dtype, np.floating):
raise ValueError(
f"Potential should be floats. Got {self.log_potentials.dtype}."
)
if self.factor_configs.ndim != 2:
raise ValueError(
"factor_configs should be a 2D array containing a list of valid"
" configurations for EnumFactor. Got a factor_configs array of shape"
f" {self.factor_configs.shape}."
)
if len(self.variables) != self.factor_configs.shape[1]:
raise ValueError(
f"Number of variables {len(self.variables)} doesn't match given"
f" configurations {self.factor_configs.shape}"
)
if self.log_potentials.shape != (self.factor_configs.shape[0],):
raise ValueError(
"Expected log potentials of shape"
f" {(self.factor_configs.shape[0],)} for"
f" ({self.factor_configs.shape[0]}) valid configurations. Got log"
f" potentials of shape {self.log_potentials.shape}."
)
vars_num_states = np.array([variable[1] for variable in self.variables])
if not np.logical_and(
self.factor_configs >= 0, self.factor_configs < vars_num_states[None]
).all():
raise ValueError("Invalid configurations for given variables")
@staticmethod
def concatenate_wirings(wirings: Sequence[EnumWiring]) -> EnumWiring:
"""Concatenate a list of EnumWirings.
Args:
wirings: A list of EnumWirings
Returns:
Concatenated EnumWiring
"""
if not wirings:
return EnumWiring(
var_states_for_edges=np.empty((0, 3), dtype=int),
factor_configs_edge_states=np.empty((0, 2), dtype=int),
)
concatenated_var_states_for_edges = factor.concatenate_var_states_for_edges(
[wiring.var_states_for_edges for wiring in wirings]
)
factor_configs_cumsum = np.insert(
np.cumsum(
[wiring.factor_configs_edge_states[-1, 0] + 1 for wiring in wirings]
),
0,
0,
)[:-1]
# Note: this correspomds to all the factor_to_msgs_starts of the EnumFactors
num_edge_states_cumsum = np.insert(
np.cumsum(
[wiring.var_states_for_edges.shape[0] for wiring in wirings]
),
0,
0,
)[:-1]
factor_configs_edge_states = []
for ww, wiring in enumerate(wirings):
factor_configs_edge_states.append(
wiring.factor_configs_edge_states
+ np.array(
[[factor_configs_cumsum[ww], num_edge_states_cumsum[ww]]],
dtype=int,
)
)
return EnumWiring(
var_states_for_edges=concatenated_var_states_for_edges,
factor_configs_edge_states=np.concatenate(
factor_configs_edge_states, axis=0
),
)
@staticmethod
def compile_wiring(
factor_edges_num_states: np.ndarray,
variables_for_factors: Sequence[List[Tuple[int, int]]],
factor_configs: np.ndarray,
vars_to_starts: Mapping[Tuple[int, int], int],
num_factors: int,
) -> EnumWiring:
"""Compile an EnumWiring for an EnumFactor or a FactorGroup with EnumFactors.
Internally calls _compile_var_states_numba and
_compile_enumeration_wiring_numba for speed.
Args:
factor_edges_num_states: An array concatenating the number of states
for the variables connected to each Factor of the FactorGroup.
Each variable will appear once for each Factor it connects to.
variables_for_factors: A list of list of variables. Each list within
the outer list contains the variables connected to a Factor. The
same variable can be connected to multiple Factors.
factor_configs: Array of shape (num_val_configs, num_variables)
containing an explicit enumeration of all valid configurations.
vars_to_starts: A dictionary that maps variables to their global
starting indices For an n-state variable, a global start index of
m means the global indices of its n variable states are m, m + 1,
..., m + n - 1
num_factors: Number of Factors in the FactorGroup.
Raises: ValueError if factor_edges_num_states is not of shape
(num_factors * num_variables, )
Returns:
The EnumWiring
"""
# Step 1: compute var_states_for_edges
first_var_state_by_edges = []
factor_indices = []
for factor_idx, variables_for_factor in enumerate(variables_for_factors):
for variable in variables_for_factor:
first_var_state_by_edges.append(vars_to_starts[variable])
factor_indices.append(factor_idx)
first_var_state_by_edges = np.array(first_var_state_by_edges)
factor_indices = np.array(factor_indices)
num_edges_states_cumsum = np.insert(
np.cumsum(factor_edges_num_states), 0, 0
)
var_states_for_edges = np.empty(
shape=(num_edges_states_cumsum[-1], 3), dtype=int
)
factor.compile_var_states_for_edges_numba(
var_states_for_edges,
num_edges_states_cumsum,
first_var_state_by_edges,
factor_indices,
)
# Step 2: compute factor_configs_edge_states
num_configs, num_variables = factor_configs.shape
if factor_edges_num_states.shape != (num_factors * num_variables,):
raise ValueError(
"Expected factor_edges_num_states shape is"
f" {(num_factors * num_variables,)}. Got"
f" {factor_edges_num_states.shape}."
)
factor_configs_edge_states = np.empty(
(num_factors * num_configs * num_variables, 2), dtype=int
)
factor_edges_starts = np.insert(np.cumsum(factor_edges_num_states), 0, 0)
_compile_enumeration_wiring_numba(
factor_configs_edge_states,
factor_configs,
factor_edges_starts,
num_factors,
)
return EnumWiring(
var_states_for_edges=var_states_for_edges,
factor_configs_edge_states=factor_configs_edge_states,
)
@staticmethod
@functools.partial(
jax.jit, static_argnames=("num_val_configs", "num_factors")
)
def compute_energy(
edge_states_one_hot_decoding: jnp.ndarray,
log_potentials: jnp.ndarray,
factor_configs_indices: jnp.ndarray,
factor_configs_edge_states: jnp.ndarray,
num_val_configs: int,
num_factors: int
) -> float:
"""Returns the contribution to the energy of several EnumFactors.
Args:
edge_states_one_hot_decoding: Array of shape (num_edge_states,)
Flattened array of one-hot decoding of the edge states connected to the
EnumFactors
log_potentials: Array of shape (num_val_configs, ). An entry at index i
is the log potential function value for the configuration with global
EnumFactor config index i.
factor_configs_indices: Array of shape (num_factor_configs,) containing
the global EnumFactor config indices.
Only takes into account the EnumFactors of the FactorGraph
factor_configs_edge_states: Array of shape (num_factor_configs,)
containingthe global edge_state index associated with each EnumFactor
config index.
Only takes into account the EnumFactors of the FactorGraph
num_val_configs: the total number of valid configurations for all the
EnumFactors in the factor graph.
num_factors: the total number of EnumFactors in the factor graph.
"""
# One-hot decoding of all the factors configs
fac_config_decoded = (
jnp.ones(shape=(num_val_configs,), dtype=bool)
.at[factor_configs_indices]
.multiply(edge_states_one_hot_decoding[factor_configs_edge_states])
)
# Replace infinite log potentials
clipped_nan_log_potentials = jnp.where(
jnp.logical_and(jnp.isinf(log_potentials), fac_config_decoded != 1),
-NEG_INF * jnp.sign(log_potentials),
log_potentials,
)
energy = jnp.where(
jnp.sum(fac_config_decoded) != num_factors,
jnp.inf, # invalid decoding
-jnp.sum(clipped_nan_log_potentials, where=fac_config_decoded),
)
return energy
@staticmethod
def compute_factor_energy(
variables: List[Hashable],
vars_to_map_states: Dict[Hashable, Any],
factor_configs: jnp.ndarray,
log_potentials: jnp.ndarray,
) -> float:
"""Returns the contribution to the energy of a single EnumFactor.
Args:
variables: List of variables connected by the EnumFactor
vars_to_map_states: A dictionary mapping each individual variable to
its MAP state.
factor_configs: Array of shape (num_val_configs, num_variables)
An array containing an explicit enumeration of all valid configurations
log_potentials: Array of shape (num_val_configs,)
An array containing the log of the potential value for each valid
configuration
"""
vars_states_to_configs_indices = {}
for factor_config_idx, vars_states in enumerate(factor_configs):
vars_states_to_configs_indices[tuple(vars_states)] = factor_config_idx
vars_decoded_states = tuple([vars_to_map_states[var] for var in variables])
if vars_decoded_states not in vars_states_to_configs_indices:
warnings.warn(
f"Invalid decoding for Enum factor {variables} "
f"with variables set to {vars_decoded_states}!"
)
factor_energy = np.inf
else:
factor_config_idx = vars_states_to_configs_indices[
vars_decoded_states
]
factor_energy = -log_potentials[factor_config_idx]
return float(factor_energy)
# pylint: disable=g-doc-args
@nb.jit(parallel=False, cache=True, fastmath=True, nopython=True)
def _compile_enumeration_wiring_numba(
factor_configs_edge_states: np.ndarray,
factor_configs: np.ndarray,
factor_edges_starts: np.ndarray,
num_factors: int,
):
"""Fast numba computation of the factor_configs_edge_states of an EnumWiring.
factor_edges_starts is updated in-place.
"""
num_configs, num_variables = factor_configs.shape
for factor_idx in nb.prange(num_factors):
for config_idx in range(num_configs):
factor_config_idx = num_configs * factor_idx + config_idx
factor_configs_edge_states[
num_variables
* factor_config_idx : num_variables
* (factor_config_idx + 1),
0,
] = factor_config_idx
for var_idx in range(num_variables):
factor_configs_edge_states[
num_variables * factor_config_idx + var_idx, 1
] = (
factor_edges_starts[num_variables * factor_idx + var_idx]
+ factor_configs[config_idx, var_idx]
)
# pylint: disable=unused-argument
@functools.partial(
jax.jit, static_argnames=("num_val_configs", "num_factors", "temperature")
)
def pass_enum_fac_to_var_messages(
vtof_msgs: jnp.ndarray,
factor_configs_indices: jnp.ndarray,
factor_configs_edge_states: jnp.ndarray,
log_potentials: jnp.ndarray,
num_val_configs: int,
num_factors: int,
temperature: float,
normalize: bool
) -> jnp.ndarray:
"""Passes messages from EnumFactors to Variables.
The update is performed in two steps.
(1) First, a "summary" array is generated that has an entry for every valid
configuration for every EnumFactor. The elements of this array are simply
the sums of messages across each valid config.
(2) Then, the info from factor_configs_edge_states is used to apply the
scattering operation and generate a flat set of output messages.
Args:
vtof_msgs: Array of shape (num_edge_states,)
This holds all the flattened variable to all the EnumFactors messages
factor_configs_indices: Array of shape (num_factor_configs,) containing the
global EnumFactor config indices.
Only takes into account the EnumFactors of the FactorGraph
factor_configs_edge_states: Array of shape (num_factor_configs,) containing
the global edge_state index associated with each EnumFactor config index.
Only takes into account the EnumFactors of the FactorGraph
log_potentials: Array of shape (num_val_configs, ). An entry at index i
is the log potential function value for the configuration with global
EnumFactor config index i.
num_val_configs: the total number of valid configurations for all the
EnumFactors in the factor graph.
num_factors: total number of EnumFactors in the factor graph.
temperature: Temperature for loopy belief propagation. 1.0 corresponds
to sum-product, 0.0 corresponds to max-product.
normalize: Whether we normalize the outgoing messages. Not used for
EnumFactors.
Returns:
Array of shape (num_edge_states,). This holds all the flattened
EnumFactors to variable messages.
"""
fac_config_summary_sum = (
jnp.zeros(shape=(num_val_configs,))
.at[factor_configs_indices]
.add(vtof_msgs[factor_configs_edge_states])
) + log_potentials
max_factor_config_summary_for_edge_states = (
jnp.full(shape=(vtof_msgs.shape[0],), fill_value=-jnp.inf)
.at[factor_configs_edge_states]
.max(fac_config_summary_sum[factor_configs_indices])
)
if temperature == 0.0:
ftov_msgs = max_factor_config_summary_for_edge_states
else:
ftov_msgs = update_utils.logsumexps_with_temp(
data=fac_config_summary_sum[factor_configs_indices],
labels=factor_configs_edge_states,
num_labels=vtof_msgs.shape[0],
temperature=temperature,
maxes=max_factor_config_summary_for_edge_states
)
# Remove incoming messages
ftov_msgs -= vtof_msgs
return ftov_msgs
| PGMax-main | pgmax/factor/enum.py |
# Copyright 2023 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Defines a pool factor."""
import dataclasses
import functools
from typing import Any, Dict, Hashable, List, Mapping, Optional, Sequence, Tuple, Union
import warnings
import jax
import jax.numpy as jnp
import numpy as np
from pgmax.factor import factor
from pgmax.factor import logical
from pgmax.factor import update_utils
from pgmax.utils import NEG_INF
# pylint: disable=unexpected-keyword-arg
@jax.tree_util.register_pytree_node_class
@dataclasses.dataclass(frozen=True, eq=False)
class PoolWiring(factor.Wiring):
"""Wiring for PoolFactors.
Attributes:
pool_choices_edge_states: Array of shape (num_pool_choices, 2)
pool_choices_edge_states[ii, 0] contains the global PoolFactor index
pool_choices_edge_states[ii, 1] contains the message index of the pool
choice variable's state 0.
The message index of the pool choice variable's state 1 is
pool_choices_edge_states[ii, 1] + 1
Both indices only take into account the PoolFactors of the FactorGraph
pool_indicators_edge_states: Array of shape (num_pool_factors,)
pool_indicators_edge_states[ii] contains the message index of the pool
indicator variable's state 0
The message index of the pool indicator variable's state 1 is
pool_indicators_edge_states[ii] + 1
Only takes into account the PoolFactors of the FactorGraph
Raises:
ValueError: If:
(1) The are no num_pool_factors different factor indices
(2) There is a factor index higher than num_pool_factors - 1
"""
pool_choices_edge_states: Union[np.ndarray, jnp.ndarray]
pool_indicators_edge_states: Union[np.ndarray, jnp.ndarray]
def __post_init__(self):
super().__post_init__()
if self.pool_choices_edge_states.shape[0] > 0:
pool_factor_indices = self.pool_choices_edge_states[:, 0]
num_pool_factors = self.pool_indicators_edge_states.shape[0]
if np.unique(pool_factor_indices).shape[0] != num_pool_factors:
raise ValueError(
f"The PoolWiring must have {num_pool_factors} different"
" PoolFactor indices"
)
if pool_factor_indices.max() >= num_pool_factors:
raise ValueError(
f"The highest PoolFactor index must be {num_pool_factors - 1}"
)
def get_inference_arguments(self) -> Dict[str, Any]:
"""Return the list of arguments to run BP with LogicalWirings."""
return {
"pool_choices_factor_indices": self.pool_choices_edge_states[..., 0],
"pool_choices_msg_indices": self.pool_choices_edge_states[..., 1],
"pool_indicators_edge_states": self.pool_indicators_edge_states,
}
@dataclasses.dataclass(frozen=True, eq=False)
class PoolFactor(factor.Factor):
"""A Pool factor of the form (pc1, ...,pcn, pi) where (pc1,...,pcn) are the pool choices and pi is the pool indicator.
A Pool factor is defined as:
F(pc1, ...,pcn, pi) = 0 <=> (pc1=...=pcn=pi=0) OR (pi=1 AND pc1 +...+ pcn=1)
F(pc1, ...,pcn, pi) = -inf o.w.
i.e. either (a) all the variables are set to 0, or (b) the pool indicator
variable is set to 1 and exactly one of the pool choices variables is set to 1
Note: placing the pool indicator at the end allows us to reuse our
existing infrastucture for wiring logical factors
"""
log_potentials: np.ndarray = dataclasses.field(
init=False,
default_factory=lambda: np.empty((0,)),
)
def __post_init__(self):
if len(self.variables) < 2:
raise ValueError(
"A PoolFactor requires at least one pool choice and one pool "
"indicator."
)
if not np.all([variable[1] == 2 for variable in self.variables]):
raise ValueError("All the variables in a PoolFactor should all be binary")
@staticmethod
def concatenate_wirings(wirings: Sequence[PoolWiring]) -> PoolWiring:
"""Concatenate a list of PoolWirings.
Args:
wirings: A list of PoolWirings
Returns:
Concatenated PoolWiring
"""
if not wirings:
return PoolWiring(
var_states_for_edges=np.empty((0, 3), dtype=int),
pool_choices_edge_states=np.empty((0, 2), dtype=int),
pool_indicators_edge_states=np.empty((0,), dtype=int),
)
logical_wirings = []
for wiring in wirings:
logical_wiring = logical.LogicalWiring(
var_states_for_edges=wiring.var_states_for_edges,
parents_edge_states=wiring.pool_choices_edge_states,
children_edge_states=wiring.pool_indicators_edge_states,
edge_states_offset=1,
)
logical_wirings.append(logical_wiring)
logical_wiring = logical.LogicalFactor.concatenate_wirings(logical_wirings)
return PoolWiring(
var_states_for_edges=logical_wiring.var_states_for_edges,
pool_choices_edge_states=logical_wiring.parents_edge_states,
pool_indicators_edge_states=logical_wiring.children_edge_states,
)
# pylint: disable=g-doc-args
@staticmethod
def compile_wiring(
factor_edges_num_states: np.ndarray,
variables_for_factors: Sequence[List[Tuple[int, int]]],
factor_sizes: np.ndarray,
vars_to_starts: Mapping[Tuple[int, int], int],
) -> PoolWiring:
"""Compile a PoolWiring for a PoolFactor or for a FactorGroup with PoolFactors.
Internally uses the logical factor compile_wiring.
Args: See LogicalFactor.compile_wiring docstring.
Returns:
The PoolWiring
"""
logical_wiring = logical.LogicalFactor.compile_wiring(
factor_edges_num_states=factor_edges_num_states,
variables_for_factors=variables_for_factors,
factor_sizes=factor_sizes,
vars_to_starts=vars_to_starts,
edge_states_offset=1,
)
return PoolWiring(
var_states_for_edges=logical_wiring.var_states_for_edges,
pool_choices_edge_states=logical_wiring.parents_edge_states,
pool_indicators_edge_states=logical_wiring.children_edge_states,
)
@staticmethod
@jax.jit
def compute_energy(
edge_states_one_hot_decoding: jnp.ndarray,
pool_choices_factor_indices: jnp.ndarray,
pool_choices_msg_indices: jnp.ndarray,
pool_indicators_edge_states: jnp.ndarray,
log_potentials: Optional[jnp.ndarray] = None,
) -> float:
"""Returns the contribution to the energy of several PoolFactors.
Args:
edge_states_one_hot_decoding: Array of shape (num_edge_states,)
Flattened array of one-hot decoding of the edge states connected to the
PoolFactors
pool_choices_factor_indices: Array of shape (num_pool_choices,)
pool_choices_factor_indices[ii] contains the global PoolFactor index of
the pool choice variable's state 0
Only takes into account the PoolFactors of the FactorGraph
pool_choices_msg_indices: Array of shape (num_pool_choices,)
pool_choices_msg_indices[ii] contains the message index of the pool
choice variable's state 0
The message index of the pool choice variable's state 1 is
pool_choices_msg_indices[ii] + 1
Only takes into account the PoolFactors of the FactorGraph
pool_indicators_edge_states: Array of shape (num_pool_factors,)
pool_indicators_edge_states[ii] contains the message index of the pool
indicator variable's state 0
The message index of the pool indicator variable's state 1 is
pool_indicators_edge_states[ii] + 1
Only takes into account the PoolFactors of the FactorGraph
log_potentials: Optional array of log potentials
"""
num_factors = pool_indicators_edge_states.shape[0]
# pool_choices_edge_states[..., 1] + 1 contains the state 1
# Either all the pool_choices and the pool_indicator are set to 0
# or exactly one of the pool_choices and the pool_indicator are set to 1
pool_choices_decoded = (
jnp.zeros(shape=(num_factors,))
.at[pool_choices_factor_indices]
.add(
edge_states_one_hot_decoding[pool_choices_msg_indices + 1]
)
)
pool_indicators_decoded = edge_states_one_hot_decoding[
pool_indicators_edge_states + 1
]
energy = jnp.where(
jnp.any(pool_choices_decoded != pool_indicators_decoded),
jnp.inf, # invalid decoding
0.0
)
return energy
@staticmethod
def compute_factor_energy(
variables: List[Hashable],
vars_to_map_states: Dict[Hashable, Any],
**kwargs,
) -> float:
"""Returns the contribution to the energy of a single PoolFactor.
Args:
variables: List of variables connected by the PoolFactor
vars_to_map_states: A dictionary mapping each individual variable to
its MAP state.
**kwargs: Other parameters, not used
"""
vars_decoded_states = np.array(
[vars_to_map_states[var] for var in variables]
)
pool_choices_decoded_states = vars_decoded_states[:-1]
pool_indicators_decoded_states = vars_decoded_states[-1]
if (
np.sum(pool_choices_decoded_states)
!= pool_indicators_decoded_states
):
warnings.warn(
f"Invalid decoding for Pool factor {variables} "
f"with pool choices set to {pool_choices_decoded_states} "
f"and pool indicators set to {pool_indicators_decoded_states}!"
)
factor_energy = np.inf
else:
factor_energy = 0.0
return factor_energy
# pylint: disable=unused-argument
@functools.partial(jax.jit, static_argnames=("temperature", "normalize"))
def pass_pool_fac_to_var_messages(
vtof_msgs: jnp.ndarray,
pool_choices_factor_indices: jnp.ndarray,
pool_choices_msg_indices: jnp.ndarray,
pool_indicators_edge_states: jnp.ndarray,
temperature: float,
normalize: bool,
log_potentials: Optional[jnp.ndarray] = None,
) -> jnp.ndarray:
"""Passes messages from PoolFactors to Variables.
Args:
vtof_msgs: Array of shape (num_edge_states,). This holds all the flattened
variable to all the PoolFactors messages.
pool_choices_factor_indices: Array of shape (num_pool_choices,)
pool_choices_factor_indices[ii] contains the global PoolFactor index of
the pool choice variable's state 0
Only takes into account the PoolFactors of the FactorGraph
pool_choices_msg_indices: Array of shape (num_pool_choices,)
pool_choices_msg_indices[ii] contains the message index of the pool choice
variable's state 0
The message index of the pool choice variable's state 1 is
pool_choices_msg_indices[ii] + 1
Only takes into account the PoolFactors of the FactorGraph
pool_indicators_edge_states: Array of shape (num_pool_factors,)
pool_indicators_edge_states[ii] contains the message index of the pool
indicator variable's state 0
The message index of the pool indicator variable's state 1 is
pool_indicators_edge_states[ii] + 1
Only takes into account the PoolFactors of the FactorGraph
temperature: Temperature for loopy belief propagation. 1.0 corresponds to
sum-product, 0.0 corresponds to max-product.
normalize: Whether we normalize the outgoing messages. Set to True for BP
and to False for Smooth Dual LP.
log_potentials: Optional array of log potentials
Returns:
Array of shape (num_edge_states,). This holds all the flattened PoolFactors
to variable messages.
Note: The updates below are derived to be mathematically stable at low
temperatures in (0, 0.1].
logaddexp_with_temp, logminusexp_with_temp and logsumexps_with_temp are also
derived to be numerically stable at these low temperatures.
"""
num_factors = pool_indicators_edge_states.shape[0]
pool_choices_tof_msgs_diffs = (
vtof_msgs[pool_choices_msg_indices + 1]
- vtof_msgs[pool_choices_msg_indices]
)
pool_choices_tof_msgs_zeros = vtof_msgs[pool_choices_msg_indices]
pool_indicators_tof_msgs_diffs = (
vtof_msgs[pool_indicators_edge_states + 1]
- vtof_msgs[pool_indicators_edge_states]
)
pool_indicators_tof_msgs_ones = vtof_msgs[pool_indicators_edge_states + 1]
# First, get the easier outgoing messages to pool choices and pool indicators
sums_pool_choices_tof_msgs_zeros = (
jnp.zeros((num_factors,))
.at[pool_choices_factor_indices]
.add(pool_choices_tof_msgs_zeros)
)
pool_choices_msgs_ones = (
sums_pool_choices_tof_msgs_zeros[pool_choices_factor_indices]
+ pool_indicators_tof_msgs_ones[pool_choices_factor_indices]
- pool_choices_tof_msgs_zeros
)
pool_indicators_msgs_zeros = sums_pool_choices_tof_msgs_zeros
# Second derive the other outgoing messages
# Get the maxes and argmaxes of pool_choices_tof_msgs_diffs per factor
(
pool_choices_diffs_maxes,
pool_choices_diffs_argmaxes,
) = update_utils.get_maxes_and_argmaxes(
pool_choices_tof_msgs_diffs, pool_choices_factor_indices, num_factors
)
# Consider the max-product case separately.
if temperature == 0.0:
# Get the second maxes and argmaxes per factor
pool_choices_diffs_wo_maxes = pool_choices_tof_msgs_diffs.at[
pool_choices_diffs_argmaxes
].set(NEG_INF)
pool_choices_diffs_second_maxes = (
jnp.full(shape=(num_factors,), fill_value=NEG_INF)
.at[pool_choices_factor_indices]
.max(pool_choices_diffs_wo_maxes)
)
# Get the difference between the outgoing messages
pool_choices_msgs_diffs = jnp.minimum(
pool_indicators_tof_msgs_diffs, -pool_choices_diffs_maxes
)[pool_choices_factor_indices]
pool_choices_msgs_diffs = pool_choices_msgs_diffs.at[
pool_choices_diffs_argmaxes
].set(
jnp.minimum(
pool_indicators_tof_msgs_diffs, -pool_choices_diffs_second_maxes,
)
)
pool_indicators_msgs_diffs = pool_choices_diffs_maxes
else:
# Stable difference between the pool indicators outgoing messages
pool_indicators_msgs_diffs = update_utils.logsumexps_with_temp(
data=pool_choices_tof_msgs_diffs,
labels=pool_choices_factor_indices,
num_labels=num_factors,
temperature=temperature,
maxes=pool_choices_diffs_maxes
)
factor_logsumexp_msgs_diffs = update_utils.logaddexp_with_temp(
pool_indicators_msgs_diffs,
-pool_indicators_tof_msgs_diffs,
temperature
)
# Stable difference between the pool choices outgoing messages
# Except for the pool_choices_tof_msgs_diffs argmaxes
pool_choices_msgs_diffs = - update_utils.logminusexp_with_temp(
factor_logsumexp_msgs_diffs[pool_choices_factor_indices],
pool_choices_tof_msgs_diffs,
temperature,
)
# pool_choices_msgs_diffs above is not numerically stable for the
# pool_choices_diffs_argmaxes. The stable update is derived below
pool_choices_indicators_diffs = pool_choices_tof_msgs_diffs.at[
pool_choices_diffs_argmaxes
].set(-pool_indicators_tof_msgs_diffs)
pool_choices_msgs_diffs_argmaxes = - update_utils.logsumexps_with_temp(
data=pool_choices_indicators_diffs,
labels=pool_choices_factor_indices,
num_labels=num_factors,
temperature=temperature,
)
pool_choices_msgs_diffs = pool_choices_msgs_diffs.at[
pool_choices_diffs_argmaxes
].set(pool_choices_msgs_diffs_argmaxes)
# Special case: factors with a single pool choice
num_pool_choices = jnp.bincount(
pool_choices_factor_indices, length=num_factors
)
first_pool_choices = jnp.concatenate(
[jnp.zeros(1, dtype=int), jnp.cumsum(num_pool_choices)]
)[:-1]
pool_choices_msgs_diffs = pool_choices_msgs_diffs.at[first_pool_choices].set(
jnp.where(
num_pool_choices == 1,
pool_indicators_tof_msgs_diffs,
pool_choices_msgs_diffs[first_pool_choices],
),
)
pool_choices_msgs_ones = pool_choices_msgs_ones.at[first_pool_choices].set(
jnp.where(
num_pool_choices == 1,
pool_indicators_tof_msgs_ones,
pool_choices_msgs_ones[first_pool_choices],
),
)
# Outgoing messages
ftov_msgs = jnp.zeros_like(vtof_msgs)
if normalize:
ftov_msgs = ftov_msgs.at[pool_choices_msg_indices + 1].set(
pool_choices_msgs_diffs
)
ftov_msgs = ftov_msgs.at[pool_indicators_edge_states + 1].set(
pool_indicators_msgs_diffs
)
else:
ftov_msgs = ftov_msgs.at[pool_choices_msg_indices + 1].set(
pool_choices_msgs_ones
)
ftov_msgs = ftov_msgs.at[pool_choices_msg_indices].set(
pool_choices_msgs_ones - pool_choices_msgs_diffs
)
ftov_msgs = ftov_msgs.at[pool_indicators_edge_states + 1].set(
pool_indicators_msgs_zeros + pool_indicators_msgs_diffs
)
ftov_msgs = ftov_msgs.at[pool_indicators_edge_states].set(
pool_indicators_msgs_zeros
)
return ftov_msgs
| PGMax-main | pgmax/factor/pool.py |
# Copyright 2022 Intrinsic Innovation LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A module containing the base class for variable groups in a Factor Graph."""
import dataclasses
import functools
from typing import Any, List, Tuple, Union
import jax.numpy as jnp
import numpy as np
from pgmax.utils import cached_property
MAX_SIZE = 1e9
@functools.total_ordering
@dataclasses.dataclass(frozen=True, eq=False)
class VarGroup:
"""Class to represent a group of variables.
Each variable is represented via a tuple of the form
(variable hash, variable num_states).
Attributes:
num_states: An integer or an array specifying the number of states of the
variables in this VarGroup
_hash: Hash value
"""
num_states: Union[int, np.ndarray]
_hash: int = dataclasses.field(init=False)
def __post_init__(self):
# Only compute the hash once, which is guaranteed to be an int64
this_id = id(self) % 2**32
this_hash = this_id * int(MAX_SIZE)
assert this_hash < 2**63
object.__setattr__(self, "_hash", this_hash)
def __hash__(self):
return self._hash
def __eq__(self, other):
return hash(self) == hash(other)
def __lt__(self, other):
return hash(self) < hash(other)
def __getitem__(
self,
val: Any,
) -> Union[Tuple[int, int], List[Tuple[int, int]]]:
"""Given a variable name, index, or a group of variable indices, retrieve the associated variable(s).
Each variable is returned via a tuple of the form
(variable hash, variable num_states).
Args:
val: a variable index, slice, or name
Returns:
A single variable or a list of variables
"""
raise NotImplementedError(
"Please subclass the VarGroup class and override this method"
)
@cached_property
def variable_hashes(self) -> np.ndarray:
"""Function that generates a variable hash for each variable.
Returns:
Array of variables hashes.
"""
raise NotImplementedError(
"Please subclass the VarGroup class and override this method"
)
@cached_property
def variables(self) -> List[Tuple[int, int]]:
"""Function that returns the list of all variables in the VarGroup.
Each variable is represented by a tuple of the form
(variable hash, variable num_states).
Returns:
List of variables in the VarGroup
"""
assert isinstance(self.variable_hashes, np.ndarray)
assert isinstance(self.num_states, np.ndarray)
vars_hashes = self.variable_hashes.flatten()
vars_num_states = self.num_states.flatten()
return list(zip(vars_hashes, vars_num_states))
def flatten(self, data: Any) -> jnp.ndarray:
"""Function that turns meaningful structured data into a flat data array for internal use.
Args:
data: Meaningful structured data
Returns:
A flat jnp.array for internal use
"""
raise NotImplementedError(
"Please subclass the VarGroup class and override this method"
)
def unflatten(
self, flat_data: Union[np.ndarray, jnp.ndarray], per_state: bool
) -> Any:
"""Function that recovers meaningful structured data from internal flat data array.
Args:
flat_data: Internal flat data array.
per_state: If True, the provided data is per state, such as beliefs for
every state for every variable. Alternatively, it is considered to be
per variable, such as a MAP decoding.
Returns:
Meaningful structured data
"""
raise NotImplementedError(
"Please subclass the VarGroup class and override this method"
)
| PGMax-main | pgmax/vgroup/vgroup.py |
# Copyright 2022 Intrinsic Innovation LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A module containing a subclass of VarGroup for n-dimensional grids of variables."""
import dataclasses
from typing import List, Tuple, Union
import jax
import jax.numpy as jnp
import numpy as np
from pgmax.utils import cached_property
from pgmax.vgroup import vgroup
@dataclasses.dataclass(frozen=True, eq=False)
class NDVarArray(vgroup.VarGroup):
"""Subclass of VarGroup for n-dimensional grids of variables.
Attributes:
num_states: An integer or an array specifying the number of states of the
variables in this VarGroup
shape: Tuple specifying the size of each dimension of the grid (similar to
the notion of a NumPy ndarray shape)
"""
shape: Tuple[int, ...]
_hash: int = dataclasses.field(init=False)
def __post_init__(self):
super().__post_init__()
max_size = int(vgroup.MAX_SIZE)
if np.prod(self.shape) > max_size:
raise ValueError(
f"Currently only support NDVarArray of size smaller than {max_size}."
f" Got {np.prod(self.shape)}"
)
if np.isscalar(self.num_states):
num_states = np.full(
self.shape,
fill_value=self.num_states,
dtype=np.int64,
)
object.__setattr__(self, "num_states", num_states)
elif isinstance(self.num_states, np.ndarray) and np.issubdtype(
self.num_states.dtype, int
):
if self.num_states.shape != self.shape:
raise ValueError(
f"Expected num_states shape {self.shape}. Got"
f" {self.num_states.shape}."
)
else:
raise ValueError(
"num_states should be an integer or a NumPy array of dtype int"
)
def __repr__(self) -> str:
assert isinstance(self.num_states, np.ndarray)
if self.num_states.size == 0:
num_states_str = f"num_states={self.num_states}"
elif self.num_states.size > 0:
min_num_states = self.num_states.min()
max_num_states = self.num_states.max()
if min_num_states == max_num_states:
num_states_str = f"num_states={min_num_states}"
else:
num_states_str = (
f"min_num_states={min_num_states}, max_num_states={max_num_states}"
)
return (
f"NDVarArray(shape={self.shape}, {num_states_str}, _hash={self._hash})"
)
def __getitem__(
self, val: Union[int, slice, Tuple[Union[int, slice], ...]]
) -> Union[Tuple[int, int], List[Tuple[int, int]]]:
"""Given an index or a slice, retrieve the associated variable(s).
Each variable is returned via a tuple of the form
(variable hash, number of states)
Note: Relies on numpy indexation to throw IndexError if val is out-of-bounds
Args:
val: a variable index or slice
Returns:
A single variable or a list of variables
"""
assert isinstance(self.num_states, np.ndarray)
vars_names = self.variable_hashes[val]
vars_num_states = self.num_states[val]
if isinstance(vars_names, np.ndarray):
vars_names = vars_names.flatten()
vars_num_states = vars_num_states.flatten()
return list(zip(vars_names, vars_num_states))
else:
return (vars_names, vars_num_states)
@cached_property
def variable_hashes(self) -> np.ndarray:
"""Function that generates a variable hash for each variable.
Returns:
Array of variables hashes.
"""
indices = np.reshape(np.arange(np.prod(self.shape)), self.shape)
return self.__hash__() + indices
def flatten(self, data: Union[np.ndarray, jnp.ndarray]) -> jnp.ndarray:
"""Function that turns meaningful structured data into a flat data array for internal use.
Args:
data: Meaningful structured data. Should be an array of shape self.shape
(for e.g. MAP decodings) or self.shape + (self.num_states.max(),) (for
e.g. evidence, beliefs).
Returns:
A flat jnp.array for internal use
Raises:
ValueError: If the data is not of the correct shape.
"""
assert isinstance(self.num_states, np.ndarray)
if data.shape == self.shape:
return jax.device_put(data).flatten()
elif data.shape == self.shape + (self.num_states.max(),):
return jax.device_put(
data[np.arange(data.shape[-1]) < self.num_states[..., None]]
)
else:
raise ValueError(
f"data should be of shape {self.shape} or"
f" {self.shape + (self.num_states.max(),)}. Got {data.shape}."
)
def unflatten(
self, flat_data: Union[np.ndarray, jnp.ndarray], per_state: bool
) -> jnp.ndarray:
"""Function that recovers meaningful structured data from internal flat data array.
Args:
flat_data: Internal flat data array.
per_state: If True, the provided data is per state, such as beliefs for
every state for every variable. Alternatively, it is considered to be
per variable, such as a MAP decoding.
Returns:
Meaningful structured data.
An array of shape self.shape (for e.g. MAP decodings)
or an array of shape self.shape + (self.num_states,) (for e.g. evidence,
beliefs).
Raises:
ValueError if:
(1) flat_data is not a 1D array
(2) flat_data is not of the right shape
"""
assert isinstance(self.num_states, np.ndarray)
if flat_data.ndim != 1:
raise ValueError(
f"Can only unflatten 1D array. Got a {flat_data.ndim}D array."
)
if per_state:
if flat_data.size != self.num_states.sum():
raise ValueError(
"flat_data size should be equal to "
f"{self.num_states.sum()}. Got size {flat_data.size}."
)
data = jnp.full(
shape=self.shape + (self.num_states.max(initial=0),),
fill_value=-jnp.inf,
)
data = data.at[
np.arange(data.shape[-1]) < self.num_states[..., None]
].set(flat_data)
else:
if flat_data.size != np.prod(self.shape):
raise ValueError(
f"flat_data size should be equal to {np.prod(self.shape)}. "
f"Got size {flat_data.size}."
)
data = flat_data.reshape(self.shape)
return data
| PGMax-main | pgmax/vgroup/varray.py |
# Copyright 2022 Intrinsic Innovation LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A sub-package defining different types of groups of variables."""
from pgmax.vgroup.varray import NDVarArray
from pgmax.vgroup.vdict import VarDict
from pgmax.vgroup.vgroup import VarGroup
| PGMax-main | pgmax/vgroup/__init__.py |
# Copyright 2022 Intrinsic Innovation LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A module containing a variable dictionnary class inheriting from the base VarGroup."""
import dataclasses
from typing import Any, Dict, Hashable, Mapping, Tuple, Union
import jax.numpy as jnp
import numpy as np
from pgmax.utils import cached_property
from pgmax.vgroup import vgroup
@dataclasses.dataclass(frozen=True, eq=False)
class VarDict(vgroup.VarGroup):
"""A variable dictionary that contains a set of variables.
Attributes:
num_states: The size of the variables in this VarGroup
variable_names: A tuple of all the names of the variables in this VarGroup.
"""
variable_names: Tuple[Any, ...]
_hash: int = dataclasses.field(init=False)
def __post_init__(self):
super().__post_init__()
if np.isscalar(self.num_states):
num_states = np.full(
(len(self.variable_names),),
fill_value=self.num_states,
dtype=np.int64,
)
object.__setattr__(self, "num_states", num_states)
elif isinstance(self.num_states, np.ndarray) and np.issubdtype(
self.num_states.dtype, int
):
if self.num_states.shape != (len(self.variable_names),):
raise ValueError(
f"Expected num_states shape ({len(self.variable_names)},). Got"
f" {self.num_states.shape}."
)
else:
raise ValueError(
"num_states should be an integer or a NumPy array of dtype int"
)
@cached_property
def variable_hashes(self) -> np.ndarray:
"""Function that generates a variable hash for each variable.
Returns:
Array of variables hashes.
"""
indices = np.arange(len(self.variable_names))
return self.__hash__() + indices
def __getitem__(self, var_name: Any) -> Tuple[int, int]:
"""Given a variable name retrieve the associated variable, returned via a tuple of the form (variable hash, number of states).
Args:
var_name: a variable name
Returns:
The queried variable
"""
assert isinstance(self.num_states, np.ndarray)
if var_name not in self.variable_names:
raise ValueError(f"Variable {var_name} is not in VarDict")
var_idx = self.variable_names.index(var_name)
return (self.variable_hashes[var_idx], self.num_states[var_idx])
def flatten(
self, data: Mapping[Any, Union[np.ndarray, jnp.ndarray]]
) -> jnp.ndarray:
"""Function that turns meaningful structured data into a flat data array for internal use.
Args:
data: Meaningful structured data. Should be a mapping with names from
self.variable_names. Each value should be an array of shape (1,) (for
e.g. MAP decodings) or (self.num_states,) (for e.g. evidence, beliefs).
Returns:
A flat jnp.array for internal use
Raises:
ValueError if:
(1) data is referring to a non-existing variable
(2) data is not of the correct shape
"""
assert isinstance(self.num_states, np.ndarray)
for var_name in data:
if var_name not in self.variable_names:
raise ValueError(
f"data is referring to a non-existent variable {var_name}."
)
var_idx = self.variable_names.index(var_name)
if data[var_name].shape != (self.num_states[var_idx],) and data[
var_name
].shape != (1,):
raise ValueError(
f"Variable {var_name} expects a data array of shape"
f" {(self.num_states[var_idx],)} or (1,). Got"
f" {data[var_name].shape}."
)
flat_data = jnp.concatenate(
[data[var_name].flatten() for var_name in self.variable_names]
)
return flat_data
def unflatten(
self, flat_data: Union[np.ndarray, jnp.ndarray], per_state: bool
) -> Dict[Hashable, Union[np.ndarray, jnp.ndarray]]:
"""Function that recovers meaningful structured data from internal flat data array.
Args:
flat_data: Internal flat data array.
per_state: If True, the provided data is per state, such as beliefs
for every state for every variable. Alternatively, it is considered to
be per variable, such as a MAP decoding.
Returns:
Meaningful structured data.
Should be a mapping with names from self.variable_names.
Each value should be an array of shape (1,) (for e.g. MAP decodings) or
(self.num_states,) (for e.g. evidence, beliefs) depending on per_state
Raises:
ValueError if:
(1) flat_data is not a 1D array
(2) flat_data is not of the right shape
"""
assert isinstance(self.num_states, np.ndarray)
if flat_data.ndim != 1:
raise ValueError(
f"Can only unflatten 1D array. Got a {flat_data.ndim}D array."
)
num_variables = len(self.variable_names)
num_variable_states = self.num_states.sum()
if per_state:
if flat_data.shape[0] != num_variable_states:
raise ValueError(
"flat_data should be shape "
f"(num_variable_states(={num_variable_states}),). Got "
f"{flat_data.shape}"
)
# Check whether all the variables have the same number of states
if np.all(self.num_states == self.num_states[0]):
data = dict(
zip(self.variable_names, flat_data.reshape(-1, self.num_states[0]))
)
else:
var_states_limits = np.insert(np.cumsum(self.num_states), 0, 0)
var_states = [
flat_data[var_states_limits[idx] : var_states_limits[idx + 1]]
for idx in range(self.num_states.shape[0])
]
data = dict(zip(self.variable_names, var_states))
else:
if flat_data.shape[0] != num_variables:
raise ValueError(
"flat_data should be shape "
f"(num_variables(={len(self.variables)}),). Got "
f"{flat_data.shape}"
)
data = dict(zip(self.variable_names, flat_data))
return data
| PGMax-main | pgmax/vgroup/vdict.py |
# Copyright 2022 Intrinsic Innovation LLC.
# Copyright 2023 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A sub-package containing functions to perform belief propagation."""
from pgmax.infer.bp import BeliefPropagation
from pgmax.infer.bp import BP
from pgmax.infer.bp import get_marginals
from pgmax.infer.bp_state import BPArrays
from pgmax.infer.bp_state import BPState
from pgmax.infer.bp_state import Evidence
from pgmax.infer.bp_state import FToVMessages
from pgmax.infer.bp_state import LogPotentials
from pgmax.infer.dual_lp import SDLP
from pgmax.infer.dual_lp import SmoothDualLP
from pgmax.infer.energy import compute_energy
from pgmax.infer.inferer import decode_map_states
from pgmax.infer.inferer import Inferer
from pgmax.infer.inferer import InfererContext
def build_inferer(bp_state: BPState, backend: str) -> Inferer:
"""Build a supported inferer."""
if backend == "bp":
return BP(bp_state)
elif backend == "sdlp":
return SDLP(bp_state)
else:
raise NotImplementedError(f"Inferer {backend} is not supported.")
| PGMax-main | pgmax/infer/__init__.py |
# Copyright 2022 Intrinsic Innovation LLC.
# Copyright 2023 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A module containing the core message-passing functions for belief propagation."""
import dataclasses
import functools
from typing import Any, Callable, Dict, Hashable, Optional, Tuple
import warnings
import jax
import jax.numpy as jnp
from jax.scipy.special import logsumexp
from pgmax.factor import FAC_TO_VAR_UPDATES
from pgmax.infer.bp_state import BPArrays
from pgmax.infer.bp_state import BPState
from pgmax.infer.inferer import Inferer
from pgmax.infer.inferer import InfererContext
from pgmax.utils import LOG_POTENTIAL_MAX_ABS
from pgmax.utils import MSG_NEG_INF
from pgmax.utils import NEG_INF
@dataclasses.dataclass(frozen=True, eq=False)
class BeliefPropagation(Inferer):
"""Belief propagation functions.
Attributes:
run_bp: Backward compatible version of run.
run_with_diffs: Run inference while monitoring convergence
"""
run_bp: Callable[..., BPArrays]
run_with_diffs: Callable[..., Tuple[BPArrays, jnp.ndarray]]
# pylint: disable=invalid-name
def BP(
bp_state: BPState, temperature: Optional[float] = 0.0
) -> BeliefPropagation:
"""Returns the generated belief propagation functions.
Args:
bp_state: Belief propagation state.
temperature: Temperature for loopy belief propagation. 1.0 corresponds to
sum-product, 0.0 corresponds to max-product. Used for backward
compatibility
"""
inferer_context = InfererContext(bp_state)
def run_with_diffs(
bp_arrays: BPArrays,
num_iters: int,
damping: float = 0.5,
temperature: float = temperature,
) -> BPArrays:
"""Run belief propagation for num_iters with a damping_factor and a temperature.
Args:
bp_arrays: Initial arrays of log_potentials, ftov_msgs, evidence.
num_iters: Number of belief propagation iterations.
damping: The damping factor to use for message updates between one
timestep and the next.
temperature: Temperature for loopy belief propagation. 1.0 corresponds to
sum-product, 0.0 corresponds to max-product.
Returns:
bp_arrays: A BPArrays containing the updated ftov_msgs.
msgs_deltas: The maximum absolute difference between the current and the
updated messages, at each BP iteration.
"""
# Clip the log potentials for numeric stability.
log_potentials = jnp.clip(
bp_arrays.log_potentials, -LOG_POTENTIAL_MAX_ABS, LOG_POTENTIAL_MAX_ABS
)
evidence = bp_arrays.evidence
ftov_msgs = bp_arrays.ftov_msgs
# Normalize and clip the messages
ftov_msgs = normalize_and_clip_msgs(
ftov_msgs,
inferer_context.edge_indices_for_edge_states,
inferer_context.num_edges,
)
@jax.checkpoint
def update(msgs: jnp.ndarray, _) -> Tuple[jnp.ndarray, None]:
# Compute new variable to factor messages by message passing
vtof_msgs = pass_var_to_fac_messages(
msgs,
evidence,
inferer_context.var_states_for_edge_states,
)
ftov_msgs = jnp.zeros_like(vtof_msgs)
for factor_type in FAC_TO_VAR_UPDATES:
msgs_start, msgs_end = inferer_context.factor_type_to_msgs_range[
factor_type
]
potentials_start, potentials_end = (
inferer_context.factor_type_to_potentials_range[factor_type]
)
# Do not update the messages for factor types not present in the graph
if msgs_start != msgs_end:
ftov_msgs_type = FAC_TO_VAR_UPDATES[factor_type](
vtof_msgs=vtof_msgs[msgs_start:msgs_end],
log_potentials=log_potentials[potentials_start:potentials_end],
temperature=temperature,
normalize=True, # BP normalizes the messages
**inferer_context.inference_arguments[factor_type],
)
ftov_msgs = ftov_msgs.at[msgs_start:msgs_end].set(ftov_msgs_type)
# Use the results of message passing to perform damping and
# update the factor to variable messages
new_msgs = damping * msgs + (1 - damping) * ftov_msgs
# Normalize and clip these damped, updated messages before returning them.
new_msgs = normalize_and_clip_msgs(
new_msgs,
inferer_context.edge_indices_for_edge_states,
inferer_context.num_edges,
)
# Monitor message convergence via the maximum absolute difference between
# the current and the updated messages
msgs_delta = jnp.max(jnp.abs(new_msgs - msgs))
return new_msgs, msgs_delta
# Scan can have significant overhead for a small number of iterations
# if not JITed. Running one it at a time is a common use-case
# for checking convergence, so specialize that case.
if num_iters > 1:
ftov_msgs, msgs_deltas = jax.lax.scan(update, ftov_msgs, None, num_iters)
else:
ftov_msgs, msgs_delta = update(ftov_msgs, None)
msgs_deltas = msgs_delta[None]
return (
BPArrays(
log_potentials=bp_arrays.log_potentials,
ftov_msgs=ftov_msgs,
evidence=bp_arrays.evidence,
),
msgs_deltas,
)
def run(
bp_arrays: BPArrays,
num_iters: int,
damping: float = 0.5,
temperature: float = temperature,
) -> BPArrays:
"""A wrapper around run_with_diffs to only return the updated BPArrays."""
bp_arrays, _ = run_with_diffs(bp_arrays, num_iters, damping, temperature)
return bp_arrays
def run_bp(
bp_arrays: BPArrays,
num_iters: int,
damping: float = 0.5,
) -> BPArrays:
"""Backward compatible version of run."""
warnings.warn(
"BP.run_bp is deprecated. Please consider using BP.run instead."
)
return run(bp_arrays, num_iters, damping, temperature)
# pylint: disable=unexpected-keyword-arg
bp = BeliefPropagation(
init=inferer_context.init,
update=inferer_context.update,
to_bp_state=inferer_context.to_bp_state,
get_beliefs=inferer_context.get_beliefs,
run=run,
run_bp=run_bp,
run_with_diffs=run_with_diffs,
)
return bp
@jax.jit
def pass_var_to_fac_messages(
ftov_msgs: jnp.ndarray,
evidence: jnp.ndarray,
var_states_for_edge_states: jnp.ndarray,
) -> jnp.ndarray:
"""Pass messages from Variables to Factors.
The update works by first summing the evidence and neighboring factor to
variable messages for each variable.
Next, it subtracts messages from the correct elements of this
sum to yield the correct updated messages.
Args:
ftov_msgs: Array of shape (num_edge_states,). This holds all the flattened
factor to variable messages.
evidence: Array of shape (num_var_states,) representing the flattened
evidence for each variable
var_states_for_edge_states: Array of shape (num_edge_states,).
var_states_for_edges[ii] contains the global variable state index
associated to each edge state
Returns:
Array of shape (num_edge_state,). This holds all the flattened variable
to factor messages.
"""
var_sums_arr = evidence.at[var_states_for_edge_states].add(ftov_msgs)
vtof_msgs = var_sums_arr[var_states_for_edge_states] - ftov_msgs
return vtof_msgs
@functools.partial(jax.jit, static_argnames="num_edges")
def normalize_and_clip_msgs(
msgs: jnp.ndarray, edge_indices_for_edge_states: jnp.ndarray, num_edges: int
) -> jnp.ndarray:
"""Perform normalization and clipping of flattened messages.
Normalization is done by subtracting the maximum value of every message from
every element of every message,
Clipping keeps every message value in the range [MSG_NEG_INF, 0],
which is equivalent to a noisy channel between each factor and variable
in the specified graph, with a log-odds of flipping equal to MSG_NEG_INF.
Otherwise, in loopy graphs with unsatisfied constraints, messages can tend to
-inf which results in NaNs.
Args:
msgs: Array of shape (num_edge_states,). This holds all the flattened factor
to variable messages.
edge_indices_for_edge_states: Array of shape (num_edge_states,)
edge_indices_for_edge_states[ii] contains the global edge index associated
with the edge state
num_edges: Total number of edges in the factor graph
Returns:
Array of shape (num_edge_states,). This holds all the flattened factor to
variable messages after normalization and clipping
"""
max_by_edges = (
jnp.full(shape=(num_edges,), fill_value=NEG_INF)
.at[edge_indices_for_edge_states]
.max(msgs)
)
norm_msgs = msgs - max_by_edges[edge_indices_for_edge_states]
# Clip message values to be always greater than MSG_NEG_INF
# This is equvialent to a noisy channel between each factor and variable
# in the specified graph.
new_msgs = jnp.clip(norm_msgs, MSG_NEG_INF, None)
return new_msgs
@jax.jit
def get_marginals(beliefs: Dict[Hashable, Any]) -> Dict[Hashable, Any]:
"""Returns the normalized beliefs of several VarGroups, so that they form a valid probability distribution.
When the temperature is equal to 1.0, get_marginals returns the sum-product
estimate of the marginal probabilities.
When the temperature is equal to 0.0, get_marginals returns the max-product
estimate of the normalized max-marginal probabilities, defined as:
norm_max_marginals(x_i^*) ∝ max_{x: x_i = x_i^*} p(x)
When the temperature is strictly between 0.0 and 1.0, get_marginals returns
the belief propagation estimate of the normalized soft max-marginal
probabilities, defined as:
norm_soft_max_marginals(x_i^*) ∝ (sum_{x: x_i = x_i^*} p(x)^{1 /Temp})^Temp
Args:
beliefs: A dictionary containing the beliefs of the VarGroups.
"""
# pylint: disable=g-long-lambda
return jax.tree_util.tree_map(
lambda x: jnp.exp(x - logsumexp(x, axis=-1, keepdims=True))
if x.size > 0 # Don't need to normalize empty variables
else x,
beliefs,
)
| PGMax-main | pgmax/infer/bp.py |
# Copyright 2023 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Compute the energy of a MAP decoding."""
from typing import Any, Dict, Hashable, Tuple
import jax.numpy as jnp
import numpy as np
from pgmax import factor
from pgmax import vgroup
from pgmax.factor import FAC_TO_VAR_UPDATES
from pgmax.infer import bp_state as bpstate
from pgmax.infer.bp_state import BPArrays
from pgmax.infer.bp_state import BPState
from pgmax.infer.bp_state import Evidence
def get_vars_to_map_states(
map_states: Dict[Hashable, Any]
) -> Dict[Hashable, Any]:
"""Maps each variable of a FactorGraph to its MAP state.
Args:
map_states: A dictionary mapping each VarGroup to the MAP states of all its
variables.
Returns:
A dictionary mapping each individual variable to its MAP state.
"""
vars_to_map_states = {}
for variable_group, vg_map_states in map_states.items():
if np.prod(variable_group.shape) == 0: # Skip empty variable groups
continue
vg_map_states_flat = variable_group.flatten(vg_map_states)
vars_to_map_states.update(
zip(variable_group.variables, list(np.array(vg_map_states_flat)))
)
return vars_to_map_states
def compute_energy(
bp_state: BPState,
bp_arrays: BPArrays,
map_states: Dict[Hashable, Any],
debug_mode=False,
) -> Tuple[float, Any, Any]:
"""Return the energy of a decoding, expressed by its MAP states.
Args:
bp_state: Belief propagation state
bp_arrays: Arrays of log_potentials, ftov_msgs, evidence
map_states: A dictionary mapping the VarGroups of the FactorGraph to their
MAP states
debug_mode: Debug mode returns the individual energies of each variable and
factor in the FactorGraph
Returns:
energy: The energy of the decoding
vars_energies: The energy of each individual variable (only in debug mode)
factors_energies: The energy of each individual factor (only in debug mode)
Note: Remember that the lower the energy, the better the decoding!
"""
if debug_mode:
return _compute_energy_debug_mode(bp_state, bp_arrays, map_states)
wiring = bp_state.fg_state.wiring
factor_type_to_potentials_range = (
bp_state.fg_state.factor_type_to_potentials_range
)
factor_type_to_msgs_range = bp_state.fg_state.factor_type_to_msgs_range
# Add offsets to the edges and factors indices of var_states_for_edges
var_states_for_edges = factor.concatenate_var_states_for_edges(
[
wiring[factor_type].var_states_for_edges
for factor_type in FAC_TO_VAR_UPDATES
]
)
log_potentials = bp_arrays.log_potentials
evidence = bp_arrays.evidence
# Inference argumnets per factor type
inference_arguments = {}
for factor_type in FAC_TO_VAR_UPDATES:
this_inference_arguments = wiring[factor_type].get_inference_arguments()
inference_arguments[factor_type] = this_inference_arguments
# Step 1: compute the contribution of all the variables to the energy
# Represent the decoding of each variable groups via a one-hot vector
vgroups_one_hot_decoding = {}
for variable_group in bp_state.fg_state.variable_groups:
if variable_group.num_states.size == 0:
continue
# VarDict will soon inherit from NDVarArray
assert isinstance(variable_group, vgroup.NDVarArray)
vgroup_decoded = map_states[variable_group]
vgroup_one_hot_decoding = jnp.zeros(
shape=vgroup_decoded.shape + (variable_group.num_states.max(),)
)
dims = [np.arange(dim) for dim in variable_group.shape]
meshgrid = jnp.meshgrid(*dims, indexing="ij")
vgroup_one_hot_decoding = vgroup_one_hot_decoding.at[
tuple(meshgrid) + (vgroup_decoded,)
].set(1.0)
vgroups_one_hot_decoding[variable_group] = vgroup_one_hot_decoding
# Flatten the one-hot decoding
var_states_one_hot_decoding = bpstate.update_evidence(
jnp.zeros_like(evidence),
vgroups_one_hot_decoding,
bp_state.fg_state,
)
energy = -jnp.sum(var_states_one_hot_decoding * evidence)
# Step 2: compute the contribution of each factor type to the energy
# Extract the one-hot decoding of all the edge states
edge_states_one_hot_decoding = var_states_one_hot_decoding[
var_states_for_edges[..., 0]
]
for factor_type in FAC_TO_VAR_UPDATES:
msgs_start, msgs_end = factor_type_to_msgs_range[factor_type]
potentials_start, potentials_end = factor_type_to_potentials_range[
factor_type
]
# Do not compute the energy for factor types not present in the graph
if msgs_start != msgs_end:
energy += factor_type.compute_energy(
edge_states_one_hot_decoding=edge_states_one_hot_decoding[
msgs_start:msgs_end
],
log_potentials=log_potentials[potentials_start:potentials_end],
**inference_arguments[factor_type],
)
return energy, None, None
def _compute_energy_debug_mode(
bp_state: BPState,
bp_arrays: BPArrays,
map_states: Dict[Hashable, Any],
) -> Tuple[float, Any, Any]:
"""Return the energy of a decoding, expressed by its MAP states, as well as the energies of each variable and factor.
Args:
bp_state: Belief propagation state
bp_arrays: Arrays of log_potentials, ftov_msgs, evidence
map_states: A dictionary mapping the VarGroups of the FactorGraph to their
MAP states
Returns:
energy: The energy of the decoding
vars_energies: The energy of each individual variable (only in debug mode)
factors_energies: The energy of each individual factor (only in debug mode)
"""
print("Computing the energy of a decoding in debug mode is slow...")
# Outputs
energy = 0.0
vars_energies = {}
factors_energies = {}
# Map each variable to its MAP state
vars_to_map_states = get_vars_to_map_states(map_states)
# Step 1: compute the contribution of each variable to the energy
evidence = Evidence(bp_state.fg_state, value=np.array(bp_arrays.evidence))
for variable_group in bp_state.fg_state.variable_groups:
for var in variable_group.variables:
var_decoded_state = int(vars_to_map_states[var])
var_energy = -float(evidence[var][var_decoded_state])
vars_energies[var] = var_energy
energy += var_energy
# Step 2: compute the contribution of each factor to the energy
for factor_group in bp_state.fg_state.factor_group_to_potentials_starts:
# All the factors in a FactorGroup share the same configurations
factor_configs = factor_group.factor_configs
for this_factor in factor_group.factors:
this_factor_variables = this_factor.variables
factor_energy = factor_group.factor_type.compute_factor_energy(
variables=this_factor_variables,
vars_to_map_states=vars_to_map_states,
factor_configs=factor_configs,
log_potentials=this_factor.log_potentials,
)
energy += factor_energy
factors_energies[frozenset(this_factor_variables)] = factor_energy
return energy, vars_energies, factors_energies
| PGMax-main | pgmax/infer/energy.py |
# Copyright 2023 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A module solving the smoothed dual of the LP relaxation of the MAP problem."""
import dataclasses
import functools
from typing import Any, Callable, Dict, Hashable, Optional, Tuple
import jax
import jax.numpy as jnp
from pgmax import infer
from pgmax.factor import FAC_TO_VAR_UPDATES
from pgmax.factor import update_utils
from pgmax.infer.bp_state import BPArrays
from pgmax.infer.bp_state import BPState
from pgmax.infer.inferer import Inferer
from pgmax.infer.inferer import InfererContext
from pgmax.utils import NEG_INF
@dataclasses.dataclass(frozen=True, eq=False)
class SmoothDualLP(Inferer):
"""Smooth Dual LP-MAP solver functions.
Attributes:
run_with_objvals: Solves the Smooth Dual LP-MAP problem via accelerated
gradient descent (or subgradient descent) and returns the objective value
at each step.
decode_primal_unaries: Decodes the primal LP-MAP unaries and returns a state
assignment for each variable of the FactorGraph.
get_primal_upper_bound: Returns an upper bound of the optimal objective
value of the (non smooth) LP-MAP problem.
get_map_lower_bound: Returns a lower bound of the optimal objective value of
the (Integer Programming) MAP problem.
get_bp_updates: Used for unit test. Get the BP updates involved in the SDLP
solver.
"""
run_with_objvals: Callable[..., Tuple[BPArrays, float]]
decode_primal_unaries: Callable[
..., Tuple[Dict[Hashable, Any], Dict[Hashable, Any]]
]
get_primal_upper_bound: Callable[..., float]
get_map_lower_bound: Callable[..., float]
get_bp_updates: Callable[..., Tuple[jnp.ndarray, jnp.ndarray, jnp.ndarray]]
# pylint: disable=invalid-name
def SDLP(bp_state: BPState) -> SmoothDualLP:
"""Returns the generated Smooth Dual LP-MAP functions.
Args:
bp_state: Belief propagation state.
"""
inferer_context = InfererContext(bp_state)
def smooth_dual_objval_and_grad(
ftov_msgs: jnp.ndarray,
log_potentials: jnp.ndarray,
evidence: jnp.ndarray,
logsumexp_temp: float,
) -> Tuple[float, jnp.ndarray, jnp.ndarray, jnp.ndarray]:
"""Computes the Smooth Dual LP-MAP objective value and its closed-form gradient with respect to the dual messages.
Note 1: The Smooth Dual LP-MAP problem is introduced in
https://papers.nips.cc/paper_files/paper/2012/file/bad5f33780c42f2588878a9d07405083-Paper.pdf
Note 2: To compute the closed-form gradient of the Smooth Dual LP-MAP
objective value, we reuse the Belief Propagation message updates at the
temperature equal to logsumexp_temp.
Note 3: Here, ftov_msgs are the dual LP-MAP "messages" (see Section
1.3 in https://people.csail.mit.edu/dsontag/papers/SonGloJaa_optbook.pdf)
which have a different semantic than the Belief Propagation "messages".
Args:
ftov_msgs: Flat array of dual LP-MAP messages from factors to variables
log_potentials: Flat array of log potentials
evidence: Array of shape (num_var_states,) containing the flattened
evidence for each variable
logsumexp_temp: Temperature for the log sum-exp function, which controls
how well the objective value of the Smooth Dual LP-MAP approximates the
Dual LP-MAP one. When logsumexp_temp=0.0, it computes the subgradient of
the objective value.
Returns:
objval: Smooth Dual LP-MAP objective value
grad_ftov_msgs: Closed-form gradient of the Smooth Dual objective value
with respect to the dual messages.
When logsumexp_temp=0.0, it is the subgradient of the objective value.
bp_ftov_msgs_updates: Belief Propagation message updates, used to compute
the gradient of the Smooth Dual objective value.
This quantity is only used for unit tests
logsumexp_fac_configs_w_reps_at_edges: At each edge, this contains the log
sum-exp of the outgoing messages over all the factor configs for the
(unique) factor that this edge connects to.
This quantity is only used for unit tests
"""
var_sums_arr = evidence.at[inferer_context.var_states_for_edge_states].add(
ftov_msgs
)
# BP message updates
bp_ftov_msgs_updates = jnp.zeros_like(ftov_msgs)
for factor_type in FAC_TO_VAR_UPDATES:
msgs_start, msgs_end = inferer_context.factor_type_to_msgs_range[
factor_type
]
potentials_start, potentials_end = (
inferer_context.factor_type_to_potentials_range[factor_type]
)
# Do not update the messages for factor types not present in the graph
if msgs_start != msgs_end:
this_bp_ftov_msgs_updates = FAC_TO_VAR_UPDATES[factor_type](
vtof_msgs=-ftov_msgs[msgs_start:msgs_end],
log_potentials=log_potentials[potentials_start:potentials_end],
temperature=logsumexp_temp,
normalize=False, # SDLP does not normalize the messages
**inferer_context.inference_arguments[factor_type],
)
bp_ftov_msgs_updates = bp_ftov_msgs_updates.at[msgs_start:msgs_end].set(
this_bp_ftov_msgs_updates
)
if logsumexp_temp == 0.0:
# Compute the subgradient of the loss w.r.t. the dual messages
# First compute the contribution of the variables to the subgradient
maxes_var_states, argmaxes_var_states = (
update_utils.get_maxes_and_argmaxes(
var_sums_arr,
inferer_context.evidence_to_vars,
inferer_context.num_variables,
)
)
num_var_states = evidence.shape[0]
subgrad_ftov_msgs_plus = jnp.zeros(shape=(num_var_states,))
subgrad_ftov_msgs_plus = subgrad_ftov_msgs_plus.at[
argmaxes_var_states
].set(1.0)
# Second, compute the contribution of the factors to the subgradient
# maxes_fac_configs_w_reps_at_edges are the same at each edge of a factor
(
maxes_fac_configs_w_reps_at_edges,
argmaxes_fac_configs_by_edges,
) = update_utils.get_maxes_and_argmaxes(
bp_ftov_msgs_updates - ftov_msgs,
inferer_context.edge_indices_for_edge_states,
inferer_context.num_edges,
)
num_edge_states = inferer_context.var_states_for_edge_states.shape[0]
subgrad_ftov_msgs_minus = jnp.zeros(shape=(num_edge_states,))
subgrad_ftov_msgs_minus = subgrad_ftov_msgs_minus.at[
argmaxes_fac_configs_by_edges
].set(-1.0)
subgrad_ftov_msgs = subgrad_ftov_msgs_plus[
inferer_context.var_states_for_edge_states
] + subgrad_ftov_msgs_minus
# Finally, compute the Dual LP-MAP objective value
maxes_fac_configs = (
jnp.full(shape=(inferer_context.num_factors,), fill_value=NEG_INF)
.at[inferer_context.factor_indices_for_edge_states]
.max(
maxes_fac_configs_w_reps_at_edges[
inferer_context.edge_indices_for_edge_states
]
)
)
objval = jnp.sum(maxes_var_states) + jnp.sum(maxes_fac_configs)
return (
objval,
subgrad_ftov_msgs,
bp_ftov_msgs_updates,
maxes_fac_configs_w_reps_at_edges,
)
else:
# Get the stable softmax and logsumexp of the evidence for each variable
(
softmax_var_states,
logsumexp_var_states
) = update_utils.softmax_and_logsumexps_with_temp(
data=var_sums_arr,
labels=inferer_context.evidence_to_vars,
num_labels=inferer_context.num_variables,
temperature=logsumexp_temp,
)
# Get the stable softmax and logsumexp of the messages for each factor
# logsumexp_fac_configs_w_reps_at_edges are equal at each edge of a factor
(
sum_by_edge_state_softmax_fac_configs,
logsumexp_fac_configs_w_reps_at_edges,
) = update_utils.softmax_and_logsumexps_with_temp(
data=bp_ftov_msgs_updates - ftov_msgs,
labels=inferer_context.edge_indices_for_edge_states,
num_labels=inferer_context.num_edges,
temperature=logsumexp_temp,
)
# Compute the closed-form gradient of the loss w.r.t the dual messages
grad_ftov_msgs = (
softmax_var_states[inferer_context.var_states_for_edge_states]
- sum_by_edge_state_softmax_fac_configs
)
# Finally, compute the Smooth Dual LP-MAP objective value
logsumexp_fac_configs = (
jnp.full(shape=(inferer_context.num_factors,), fill_value=NEG_INF)
.at[inferer_context.factor_indices_for_edge_states]
.max(
logsumexp_fac_configs_w_reps_at_edges[
inferer_context.edge_indices_for_edge_states
]
)
)
objval = jnp.sum(logsumexp_var_states) + jnp.sum(logsumexp_fac_configs)
return (
objval,
grad_ftov_msgs,
bp_ftov_msgs_updates,
logsumexp_fac_configs_w_reps_at_edges,
)
@functools.partial(
jax.jit, static_argnames=("logsumexp_temp", "num_iters", "lr")
)
def run_with_objvals(
sdlp_arrays: BPArrays,
logsumexp_temp: float,
num_iters: int,
lr: Optional[float] = None,
) -> Tuple[BPArrays, float]:
"""Solves the Smooth Dual LP-MAP problem via accelerated gradient descent (or subgradient descent) and returns the objective value at each step.
Args:
sdlp_arrays: A BPArrays containing log_potentials, LP-MAP ftov_msgs,
evidence
logsumexp_temp: Temperature for the log sum-exp function, which controls
how well the objective value of the Smooth Dual LP-MAP approximates the
Dual LP-MAP one
num_iters: Number of (sub)gradient descent iterations
lr: Optional learning rate. If None, default learning rate is set to
logsumexp_temp for gradient descent and 0.01 for subgradient descent.
Returns:
sdlp_arrays: A BPArrays containing the updated LP-MAP ftov_msgs
objvals: The list of Smooth Dual LP-MAP objective values at each step
Raises: ValueError if
(1) logsumexp_temp is not between 0.0 and 1.0
(2) logsumexp_temp > 0.0 and lr > logsumexp_temp
"""
if logsumexp_temp < 0.0 or logsumexp_temp > 1.0:
raise ValueError(
"The log sum-exp temperature of the Dual LP-MAP solver has to be"
" between 0.0 and 1.0"
)
if logsumexp_temp != 0.0 and lr is not None and lr > logsumexp_temp:
raise ValueError(
"For gradient descent, the learning rate must be smaller than the"
" log sum-exp temperature."
)
if lr is None:
if logsumexp_temp != 0.0:
lr = logsumexp_temp
else:
lr = 0.01
log_potentials = sdlp_arrays.log_potentials
evidence = sdlp_arrays.evidence
ftov_msgs = sdlp_arrays.ftov_msgs
eta = jnp.copy(ftov_msgs)
def gradient_descent_update(msgs_eta, it):
"""Runs one step of accelerated gradient descent and updates the dual messages."""
ftov_msgs, eta = msgs_eta
# Compute the objective value and the gradient w.r.t the dual messages
objval, msgs_grads, _, _ = smooth_dual_objval_and_grad(
ftov_msgs,
log_potentials,
evidence,
logsumexp_temp=logsumexp_temp,
)
# Accelerated gradient descent
if logsumexp_temp > 0:
# Log-sum-exp has a Lipschitz gradient with constant 1 / logsumexp_temp
# https://arxiv.org/pdf/1704.00805.pdf, Prop. 4
new_eta = ftov_msgs - lr * msgs_grads
# Nesterov's acceleration
new_ftov_msgs = new_eta + (it + 1.0) / (it + 4.0) * (new_eta - eta)
# Subgradient descent
else:
new_eta = ftov_msgs - lr / jnp.sqrt(it + 1.0) * msgs_grads
# Empirically found to accelerate convergence
new_ftov_msgs = new_eta + (it + 1.0) / (it + 4.0) * (new_eta - eta)
return (new_ftov_msgs, new_eta), objval
(ftov_msgs, eta), objvals = jax.lax.scan(
gradient_descent_update, (ftov_msgs, eta), jnp.arange(num_iters)
)
sdlp_arrays = BPArrays(
log_potentials=log_potentials, ftov_msgs=ftov_msgs, evidence=evidence
)
return sdlp_arrays, objvals
def run(
sdlp_arrays: BPArrays,
logsumexp_temp: float,
num_iters: int,
lr: Optional[float] = None,
) -> BPArrays:
"""A wrapper around run_with_objvals to only return the updated BPArrays."""
sdlp_arrays, _ = run_with_objvals(
sdlp_arrays=sdlp_arrays,
logsumexp_temp=logsumexp_temp,
num_iters=num_iters,
lr=lr,
)
return sdlp_arrays
def decode_primal_unaries(
sdlp_arrays: BPArrays,
) -> Tuple[Dict[Hashable, Any], Dict[Hashable, Any]]:
"""Decodes the primal unaries and returns a state assignment for each variable of the FactorGraph.
This implements the local decoding suggested in Section 1.7 of
https://people.csail.mit.edu/dsontag/papers/SonGloJaa_optbook.pdf
Note: We do not decode the primal factor binary variables. Also note that
the decoded factor binaries would not be guaranteed to be consistent with
the decoded unaries when we are not at a minimum of the Dual LP-MAP problem.
Args:
sdlp_arrays: A BPArrays containing log_potentials, LP-MAP ftov_msgs,
evidence
Returns:
decoded_primal_unaries: The primal unaries decoded from the SDLP solution
dual_beliefs: The associated dual beliefs
"""
dual_beliefs = inferer_context.get_beliefs(sdlp_arrays)
decoded_primal_unaries = infer.decode_map_states(dual_beliefs)
return decoded_primal_unaries, dual_beliefs
def get_primal_upper_bound(sdlp_arrays: BPArrays) -> float:
"""Returns an upper bound of the optimal objective value of the (non smooth) LP-MAP problem.
Args:
sdlp_arrays: A BPArrays containing log_potentials, LP-MAP ftov_msgs,
evidence
"""
return smooth_dual_objval_and_grad(
sdlp_arrays.ftov_msgs,
sdlp_arrays.log_potentials,
sdlp_arrays.evidence,
logsumexp_temp=0.0,
)[0]
def get_map_lower_bound(
sdlp_arrays: BPArrays,
decoded_primal_unaries: Dict[Hashable, Any],
debug_mode=False,
) -> float:
"""Given decoded primal unaries, returns a lower bound of the optimal objective value of the (Integer Programming) MAP problem.
Note 1: This lower bound is the opposite of the energy of the decoding.
Note 2: This lower bound is also a lower bound of the optimal objective
value of the relaxed LP-MAP problem.
A tighter lower bound could be derived for the LP-MAP problem, but it would
first require to map the dual messages to a feasible solution for the primal
of the LP-MAP problem. See Algorithm 1 in
https://papers.nips.cc/paper_files/paper/2012/file/bad5f33780c42f2588878a9d07405083-Paper.pdf
Note 3: If the MAP lower bound and the LP-MAP upper bound returned by
get_primal_upper_bound are equal, then strong duality theorem guarantees
that the LP relaxation is tight and that we are at the optimal MAP solution.
Args:
sdlp_arrays: A BPArrays containing log_potentials, LP-MAP ftov_msgs,
evidence
decoded_primal_unaries: The primal unaries decoded from the smooth dual
solution.
debug_mode: Debug mode give access to the individual contributions of each
variable and factor
Returns:
A lower bound of the optimal objective value of the MAP problem
"""
return -infer.compute_energy(
bp_state=bp_state,
bp_arrays=sdlp_arrays,
map_states=decoded_primal_unaries,
debug_mode=debug_mode,
)[0]
def get_bp_updates(
sdlp_arrays: BPArrays, logsumexp_temp: float
) -> Tuple[jnp.ndarray, jnp.ndarray, jnp.ndarray]:
"""Returns the BP updates involved in the Dual solver, for unit tests.
Args:
sdlp_arrays: A BPArrays containing log_potentials, ftov_msgs, evidence.
logsumexp_temp: Temperature for the log sum-exp function, which controls
how well the Smooth Dual LP-MAP objective value approximates the
Dual LP-MAP objective value
Returns:
bp_ftov_msgs_updates: Belief Propagation message updates used to compute
the Smooth Dual gradients
logsumexp_fac_configs_w_reps_at_edges: At each edge, this contains the log
sum exp of the outgoing messages over all the factor configs for the
(unique) factor that this edge connects to.
"""
(
_,
_,
bp_ftov_msgs_updates,
logsumexp_fac_configs_w_reps_at_edges,
) = smooth_dual_objval_and_grad(
sdlp_arrays.ftov_msgs,
sdlp_arrays.log_potentials,
sdlp_arrays.evidence,
logsumexp_temp=logsumexp_temp,
)
return (
bp_ftov_msgs_updates,
logsumexp_fac_configs_w_reps_at_edges,
)
bp = SmoothDualLP(
init=inferer_context.init,
update=inferer_context.update,
to_bp_state=inferer_context.to_bp_state,
get_beliefs=inferer_context.get_beliefs,
run=run,
run_with_objvals=run_with_objvals,
decode_primal_unaries=decode_primal_unaries,
get_primal_upper_bound=get_primal_upper_bound,
get_map_lower_bound=get_map_lower_bound,
get_bp_updates=get_bp_updates,
)
return bp
| PGMax-main | pgmax/infer/dual_lp.py |
# Copyright 2022 Intrinsic Innovation LLC.
# Copyright 2023 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A module defining container classes for belief propagation states."""
import dataclasses
import functools
from typing import Any, Dict, Optional, Tuple, Union, cast
import jax
import jax.numpy as jnp
import numpy as np
from pgmax import fgraph
from pgmax import fgroup
from pgmax.utils import NEG_INF
@jax.tree_util.register_pytree_node_class
@dataclasses.dataclass(frozen=True, eq=False)
class BPArrays:
"""Container for the relevant flat arrays used in belief propagation.
Attributes:
log_potentials: Flat log potentials array.
ftov_msgs: Flat factor to variable messages array.
evidence: Flat evidence array.
"""
log_potentials: Union[np.ndarray, jnp.ndarray]
ftov_msgs: Union[np.ndarray, jnp.ndarray]
evidence: Union[np.ndarray, jnp.ndarray]
def __post_init__(self):
for field in self.__dataclass_fields__:
if isinstance(getattr(self, field), np.ndarray):
getattr(self, field).flags.writeable = False
def tree_flatten(self):
return jax.tree_util.tree_flatten(dataclasses.asdict(self))
@classmethod
def tree_unflatten(cls, aux_data, children):
return cls(**aux_data.unflatten(children))
@functools.partial(jax.jit, static_argnames="fg_state")
def update_log_potentials(
log_potentials: jnp.ndarray,
updates: Dict[Any, Union[np.ndarray, jnp.ndarray]],
fg_state: fgraph.FactorGraphState,
) -> jnp.ndarray:
"""Returns new log_potentials with updates for a set of factor groups.
Args:
log_potentials: A flat jnp array containing log_potentials.
updates: A dictionary containing updates for log_potentials
fg_state: Factor graph state
Returns:
A flat jnp array containing updated log_potentials.
Raises: ValueError if
(1) Provided log_potentials shape does not match the expected
log_potentials shape.
(2) Provided name is not valid for log_potentials updates.
"""
# Copy to device and clip updates to not have infinite values
updates = jax.tree_util.tree_map(
lambda x: jnp.clip(jax.device_put(x), NEG_INF, -NEG_INF), updates
)
for factor_group, data in updates.items():
if factor_group in fg_state.factor_group_to_potentials_starts:
flat_data = factor_group.flatten(data)
if flat_data.shape != factor_group.factor_group_log_potentials.shape:
raise ValueError(
"Expected log potentials shape"
f" {factor_group.factor_group_log_potentials.shape} for"
f" factor group. Got incompatible data shape {data.shape}."
)
start = fg_state.factor_group_to_potentials_starts[factor_group]
log_potentials = log_potentials.at[
start : start + flat_data.shape[0]
].set(flat_data)
else:
raise ValueError("Invalid FactorGroup for log potentials updates.")
return log_potentials
@dataclasses.dataclass(frozen=True, eq=False)
class LogPotentials:
"""Class for storing and manipulating log potentials.
Attributes:
fg_state: Factor graph state
value: Optionally specify an initial value
Raises: ValueError if provided value shape does not match the expected
log_potentials shape.
"""
fg_state: fgraph.FactorGraphState
value: Optional[np.ndarray] = None
def __post_init__(self):
if self.value is None:
object.__setattr__(self, "value", self.fg_state.log_potentials)
else:
if self.value.shape != self.fg_state.log_potentials.shape:
raise ValueError(
"Expected log potentials shape"
f" {self.fg_state.log_potentials.shape}. Got {self.value.shape}."
)
object.__setattr__(self, "value", self.value)
def __getitem__(self, factor_group: fgroup.FactorGroup) -> np.ndarray:
"""Function to query log potentials for a FactorGroup.
Args:
factor_group: Queried FactorGroup
Returns:
The queried log potentials.
"""
value = cast(np.ndarray, self.value)
if factor_group in self.fg_state.factor_group_to_potentials_starts:
start = self.fg_state.factor_group_to_potentials_starts[factor_group]
log_potentials = value[
start : start + factor_group.factor_group_log_potentials.shape[0]
]
else:
raise ValueError("Invalid FactorGroup queried to access log potentials.")
return log_potentials
def __setitem__(
self,
factor_group: fgroup.FactorGroup,
data: Union[np.ndarray, jnp.ndarray],
):
"""Set the log potentials for a FactorGroup.
Args:
factor_group: FactorGroup
data: Array containing the log potentials for the FactorGroup
"""
object.__setattr__(
self,
"value",
np.asarray(
update_log_potentials(
jax.device_put(self.value),
{factor_group: jax.device_put(data)},
self.fg_state,
)
),
)
@functools.partial(jax.jit, static_argnames="fg_state")
def update_ftov_msgs(
ftov_msgs: jnp.ndarray,
updates: Dict[Any, Union[np.ndarray, jnp.ndarray]],
fg_state: fgraph.FactorGraphState,
) -> jnp.ndarray:
"""Returns new ftov_msgs with updates for a set of variables or factor types.
Args:
ftov_msgs: A flat jnp array containing ftov_msgs.
updates: A dictionary mapping the variables or the factor types to their
ftov_msgs updates
fg_state: Factor graph state
Returns:
A flat jnp array containing updated ftov_msgs.
Raises: ValueError if:
(1) provided ftov_msgs shape does not match the expected ftov_msgs shape.
(2) provided variable or factor type is not in the FactorGraph.
"""
# Copy to device and clip updates to not have infinite values
updates = jax.tree_util.tree_map(
lambda x: jnp.clip(jax.device_put(x), NEG_INF, -NEG_INF), updates
)
for name, data in updates.items():
if name in fg_state.factor_type_to_msgs_range:
msgs_start, msgs_end = fg_state.factor_type_to_msgs_range[name]
if data.shape != (msgs_end - msgs_start,):
raise ValueError(
f"Expected ftov_msgs shape {(msgs_end - msgs_start,)}"
f" for factor type {name}. Got incompatible shape {data.shape}."
)
ftov_msgs = ftov_msgs.at[msgs_start:msgs_end].set(data)
elif name in fg_state.vars_to_starts:
num_var_states = name[1]
if data.shape != (num_var_states,):
raise ValueError(
f"Expected ftov_msgs shape {(num_var_states,)} for variable {name}."
f" Got incompatible shape {data.shape}."
)
var_states_for_edge_states = np.concatenate(
[
wiring_by_type.var_states_for_edges[..., 0]
for wiring_by_type in fg_state.wiring.values()
]
)
starts = np.nonzero(
var_states_for_edge_states == fg_state.vars_to_starts[name]
)[0]
for start in starts:
ftov_msgs = ftov_msgs.at[start : start + name[1]].set(
data / starts.shape[0]
)
else:
raise ValueError(
"Provided variable or factor type is not in the FactorGraph"
)
return ftov_msgs
@dataclasses.dataclass(frozen=True, eq=False)
class FToVMessages:
"""Class for storing and manipulating factor to variable messages.
Attributes:
fg_state: Factor graph state
value: Optionally specify initial value for ftov messages
Raises: ValueError if provided value does not match expected ftov messages
shape.
"""
fg_state: fgraph.FactorGraphState
value: Optional[np.ndarray] = None
def __post_init__(self):
if self.value is None:
object.__setattr__(
self, "value", np.zeros(self.fg_state.total_factor_num_states)
)
else:
if self.value.shape != (self.fg_state.total_factor_num_states,):
raise ValueError(
"Expected messages shape"
f" {(self.fg_state.total_factor_num_states,)}. Got"
f" {self.value.shape}."
)
object.__setattr__(self, "value", self.value)
def __setitem__(
self,
variable: Tuple[int, int],
data: Union[np.ndarray, jnp.ndarray],
) -> None:
"""Spreading beliefs at a variable to all connected Factors.
Args:
variable: Variable queried
data: An array containing the beliefs to be spread uniformly across all
factors to variable messages involving this variable.
"""
object.__setattr__(
self,
"value",
np.asarray(
update_ftov_msgs(
jax.device_put(self.value),
{variable: jax.device_put(data)},
self.fg_state,
)
),
)
@functools.partial(jax.jit, static_argnames="fg_state")
def update_evidence(
evidence: jnp.ndarray,
updates: Dict[Any, Union[np.ndarray, jnp.ndarray]],
fg_state: fgraph.FactorGraphState,
) -> jnp.ndarray:
"""Returns new evidence with updates for a set of variables or variables groups.
Args:
evidence: A flat jnp array containing evidence.
updates: A dictionary mapping variables or VarGroups to their updated
evidence values.
fg_state: Factor graph state
Returns:
A flat jnp array containing the updated evidence values.
Raises: ValueError if
(1) The provided evidence shape does not match the expected one
(2) The provided variable or VarGroup is not in the Factorgraph
"""
# Copy to device and clip updates to not have infinite values
updates = jax.tree_util.tree_map(
lambda x: jnp.clip(jax.device_put(x), NEG_INF, -NEG_INF), updates
)
for name, data in updates.items():
# Name is a variable_group or a variable
if name in fg_state.variable_groups:
# The evidence will only be flattened if it is of the expected size
flat_data = name.flatten(data)
first_variable = name.variables[0]
start_index = fg_state.vars_to_starts[first_variable]
evidence = evidence.at[
start_index : start_index + flat_data.shape[0]
].set(flat_data)
elif name in fg_state.vars_to_starts:
start_index = fg_state.vars_to_starts[name]
var_num_states = name[1]
if data.shape != (var_num_states,):
raise ValueError(
f"Expected evidence shape {(var_num_states,)} for variable {name}."
f" Got incompatible shape {data.shape}."
)
evidence = evidence.at[start_index : start_index + name[1]].set(data)
else:
raise ValueError(
"Got evidence for a variable or a VarGroup not in the FactorGraph!"
)
return evidence
@dataclasses.dataclass(frozen=True, eq=False)
class Evidence:
"""Class for storing and manipulating evidence.
Attributes:
fg_state: Factor graph state
value: Optionally specify initial value for evidence
Raises: ValueError if provided value does not match expected evidence shape.
"""
fg_state: fgraph.FactorGraphState
value: Optional[np.ndarray] = None
def __post_init__(self):
if self.value is None:
value = np.zeros((self.fg_state.num_var_states,))
object.__setattr__(self, "value", value)
else:
if self.value.shape != (self.fg_state.num_var_states,):
raise ValueError(
f"Expected evidence shape {(self.fg_state.num_var_states,)}. "
f"Got {self.value.shape}."
)
def __getitem__(self, variable: Tuple[int, int]) -> np.ndarray:
"""Returns the evidence of a queried variable.
Args:
variable: Variable queried
"""
value = cast(np.ndarray, self.value)
start = self.fg_state.vars_to_starts[variable]
evidence = value[start : start + variable[1]]
return evidence
def __setitem__(
self,
name: Any,
data: np.ndarray,
) -> None:
"""Updates the evidence of a variable group or of a variable.
Args:
name: The name of a variable group or a single variable. If name is the
name of a variable group, updates are derived by using the variable
group to flatten the data. If name is the name of a variable, data
should be of an array shape (num_states,) If name is None, updates are
derived by using self.fg_state.variable_groups to flatten the data.
data: Array containing the evidence updates.
"""
object.__setattr__(
self,
"value",
np.asarray(
update_evidence(
jax.device_put(self.value),
{name: jax.device_put(data)},
self.fg_state,
),
),
)
@dataclasses.dataclass(frozen=True, eq=False)
class BPState:
"""Container class for belief propagation states, including log potentials, ftov messages and evidence (unary log potentials).
Attributes:
log_potentials: log potentials of the model
ftov_msgs: factor to variable messages
evidence: evidence (unary log potentials) for variables.
fg_state: associated factor graph state
Raises: ValueError if log_potentials, ftov_msgs or evidence are not derived
from the same Factor graph state.
"""
log_potentials: LogPotentials
ftov_msgs: FToVMessages
evidence: Evidence
def __post_init__(self):
if (self.log_potentials.fg_state != self.ftov_msgs.fg_state) or (
self.ftov_msgs.fg_state != self.evidence.fg_state
):
raise ValueError(
"log_potentials, ftov_msgs and evidence should be derived from the"
" same fg_state."
)
@property
def fg_state(self) -> fgraph.FactorGraphState:
return self.log_potentials.fg_state
| PGMax-main | pgmax/infer/bp_state.py |
# Copyright 2023 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Shared context classes for the inference methods."""
import dataclasses
import functools
from typing import Any, Callable, Dict, Hashable, Optional, Sequence
import warnings
import jax
import jax.numpy as jnp
import numpy as np
from pgmax import factor
from pgmax import vgroup
from pgmax.factor import FAC_TO_VAR_UPDATES
from pgmax.infer import bp_state as bpstate
from pgmax.infer.bp_state import BPArrays
from pgmax.infer.bp_state import BPState
from pgmax.infer.bp_state import Evidence
from pgmax.infer.bp_state import FToVMessages
from pgmax.infer.bp_state import LogPotentials
@dataclasses.dataclass(frozen=True, eq=False)
class Inferer:
"""Inferer pure functions.
Attributes:
init: Function to create log_potentials, ftov_msgs and evidence.
update: Function to update log_potentials, ftov_msgs and evidence.
to_bp_state: Function to reconstruct the BPState from a BPArrays.
get_beliefs: Function to calculate beliefs from a BPArrays.
run: Function to run inference.
"""
init: Callable[..., BPArrays]
update: Callable[..., BPArrays]
to_bp_state: Callable[..., BPArrays]
get_beliefs: Callable[..., Dict[Hashable, Any]]
run: Callable[..., BPArrays]
@dataclasses.dataclass(frozen=True, eq=False)
class InfererContext:
"""Shared inference context for the different inferers.
Attributes:
bp_state: Belief propagation state.
"""
bp_state: BPState
def __post_init__(self):
if jax.lib.xla_bridge.get_backend().platform == "tpu": # pragma: no cover
warnings.warn(
"PGMax is not optimized for the TPU backend. Please consider using"
" GPUs!"
)
object.__setattr__(self, "wiring", self.bp_state.fg_state.wiring)
object.__setattr__(
self, "evidence_to_vars", self.bp_state.fg_state.evidence_to_vars
)
# Add offsets to the edges and factors indices of var_states_for_edges
var_states_for_edges = factor.concatenate_var_states_for_edges(
[
self.wiring[factor_type].var_states_for_edges
for factor_type in FAC_TO_VAR_UPDATES
]
)
object.__setattr__(
self, "var_states_for_edge_states", var_states_for_edges[..., 0]
)
object.__setattr__(
self, "edge_indices_for_edge_states", var_states_for_edges[..., 1]
)
object.__setattr__(
self, "factor_indices_for_edge_states", var_states_for_edges[..., 2]
)
# Useful static quantities
num_variables = int(self.bp_state.fg_state.evidence_to_vars[-1]) + 1
num_edges = int(var_states_for_edges[-1, 1]) + 1
num_factors = int(var_states_for_edges[-1, 2]) + 1
object.__setattr__(self, "num_variables", num_variables)
object.__setattr__(self, "num_edges", num_edges)
object.__setattr__(self, "num_factors", num_factors)
# Inference arguments per factor type
inference_arguments = {}
for factor_type in FAC_TO_VAR_UPDATES:
this_inference_arguments = self.wiring[
factor_type
].get_inference_arguments()
inference_arguments[factor_type] = this_inference_arguments
object.__setattr__(self, "inference_arguments", inference_arguments)
object.__setattr__(
self,
"factor_type_to_msgs_range",
self.bp_state.fg_state.factor_type_to_msgs_range,
)
object.__setattr__(
self,
"factor_type_to_potentials_range",
self.bp_state.fg_state.factor_type_to_potentials_range,
)
def update(
self,
bp_arrays: Optional[BPArrays] = None,
log_potentials_updates: Optional[Dict[Any, jnp.ndarray]] = None,
ftov_msgs_updates: Optional[Dict[Any, jnp.ndarray]] = None,
evidence_updates: Optional[Dict[Any, jnp.ndarray]] = None,
) -> BPArrays:
"""Returns a BPArrays with the updated log_potentials, ftov_msgs and evidence.
Args:
bp_arrays: Optional arrays of log_potentials, ftov_msgs, evidence.
log_potentials_updates: Optional dictionary containing log_potentials
updates.
ftov_msgs_updates: Optional dictionary containing ftov_msgs updates.
evidence_updates: Optional dictionary containing evidence updates.
"""
if bp_arrays is not None:
log_potentials = bp_arrays.log_potentials
evidence = bp_arrays.evidence
ftov_msgs = bp_arrays.ftov_msgs
else:
log_potentials = jax.device_put(self.bp_state.log_potentials.value)
ftov_msgs = self.bp_state.ftov_msgs.value
evidence = self.bp_state.evidence.value
if log_potentials_updates is not None:
log_potentials = bpstate.update_log_potentials(
log_potentials,
log_potentials_updates,
self.bp_state.fg_state,
)
if ftov_msgs_updates is not None:
ftov_msgs = bpstate.update_ftov_msgs(
ftov_msgs,
ftov_msgs_updates,
self.bp_state.fg_state,
)
if evidence_updates is not None:
evidence = bpstate.update_evidence(
evidence, evidence_updates, self.bp_state.fg_state
)
return BPArrays(
log_potentials=log_potentials,
ftov_msgs=ftov_msgs,
evidence=evidence,
)
def init(
self,
log_potentials_updates: Optional[Dict[Any, jnp.ndarray]] = None,
ftov_msgs_updates: Optional[Dict[Any, jnp.ndarray]] = None,
evidence_updates: Optional[Dict[Any, jnp.ndarray]] = None,
) -> BPArrays:
"""Returns a BPArrays with the initialized log_potentials, ftov_msgs and evidence.
Args:
log_potentials_updates: Optional dictionary containing log_potentials
updates.
ftov_msgs_updates: Optional dictionary containing ftov_msgs updates.
evidence_updates: Optional dictionary containing evidence updates.
"""
return self.update(
bp_arrays=None,
log_potentials_updates=log_potentials_updates,
ftov_msgs_updates=ftov_msgs_updates,
evidence_updates=evidence_updates,
)
def to_bp_state(self, bp_arrays: BPArrays) -> BPState:
"""Returns a BPState reconstructed from a BPArrays.
Args:
bp_arrays: A BPArrays containing log_potentials, ftov_msgs, evidence.
"""
return BPState(
log_potentials=LogPotentials(
fg_state=self.bp_state.fg_state, value=bp_arrays.log_potentials
),
ftov_msgs=FToVMessages(
fg_state=self.bp_state.fg_state,
value=bp_arrays.ftov_msgs,
),
evidence=Evidence(
fg_state=self.bp_state.fg_state,
value=bp_arrays.evidence,
),
)
@functools.partial(jax.jit, static_argnames="self")
def get_beliefs(self, bp_arrays: BPArrays) -> Dict[Hashable, Any]:
"""Returns the beliefs derived from a BPArrays.
Args:
bp_arrays: A BPArrays containing log_potentials, ftov_msgs, evidence.
"""
flat_beliefs = (
jax.device_put(bp_arrays.evidence)
.at[jax.device_put(self.var_states_for_edge_states)]
.add(bp_arrays.ftov_msgs)
)
return unflatten_beliefs(
flat_beliefs, self.bp_state.fg_state.variable_groups
)
def unflatten_beliefs(
flat_beliefs: jnp.array, variable_groups: Sequence[vgroup.VarGroup]
) -> Dict[Hashable, Any]:
"""Returns unflattened beliefs from flat beliefs.
Args:
flat_beliefs: Flattened array of beliefs
variable_groups: All the variable groups in the FactorGraph.
"""
beliefs = {}
start = 0
for variable_group in variable_groups:
num_states = variable_group.num_states
assert isinstance(num_states, np.ndarray)
length = num_states.sum()
beliefs[variable_group] = variable_group.unflatten(
flat_beliefs[start : start + length], True
)
start += length
return beliefs
@jax.jit
def decode_map_states(beliefs: Dict[Hashable, Any]) -> Dict[Hashable, Any]:
"""Returns the MAP states of several VarGroups given their beliefs.
Args:
beliefs: A dictionary containing the beliefs of the VarGroups.
"""
# pylint: disable=g-long-lambda
return jax.tree_util.tree_map(
lambda x: jnp.argmax(x, axis=-1)
if x.size > 0 # Deal with MAP state of zero-sized array
else jnp.zeros(x.shape[:-1]),
beliefs,
)
| PGMax-main | pgmax/infer/inferer.py |
# Copyright 2022 Intrinsic Innovation LLC.
# Copyright 2022 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A module containing the core class to build a factor graph."""
import collections
import copy
import dataclasses
import functools
import types
from typing import Any, Dict, FrozenSet, List, Mapping, OrderedDict, Sequence, Set, Tuple, Type, Union
import jax.numpy as jnp
import numpy as np
from pgmax import factor
from pgmax import fgroup
from pgmax import vgroup
from pgmax.factor import FAC_TO_VAR_UPDATES
from pgmax.utils import cached_property
@dataclasses.dataclass(frozen=True, eq=False)
class FactorGraphState:
"""FactorGraphState.
Attributes:
variable_groups: VarGroups in the FactorGraph.
vars_to_starts: Maps variables to their starting indices in the flat
evidence array. flat_evidence[vars_to_starts[variable]:
vars_to_starts[variable] + variable.num_var_states] contains evidence to
the variable.
num_var_states: Total number of variable states.
total_factor_num_states: Size of the flat ftov messages array.
factor_type_to_msgs_range: Maps factors types to their start and end indices
in the flat ftov messages.
factor_type_to_potentials_range: Maps factor types to their start and end
indices in the flat log potentials.
factor_group_to_potentials_starts: Maps factor groups to their starting
indices in the flat log potentials.
log_potentials: Flat log potentials array concatenated for each factor type.
evidence_to_vars: Maps the evidence entries to their variable indices
wiring: Wiring derived for each factor type.
"""
variable_groups: Sequence[vgroup.VarGroup]
vars_to_starts: Mapping[Tuple[int, int], int]
num_var_states: int
total_factor_num_states: int
factor_type_to_msgs_range: OrderedDict[Type[factor.Factor], Tuple[int, int]]
factor_type_to_potentials_range: OrderedDict[
Type[factor.Factor], Tuple[int, int]
]
factor_group_to_potentials_starts: OrderedDict[fgroup.FactorGroup, int]
log_potentials: np.ndarray
evidence_to_vars: np.ndarray
wiring: OrderedDict[Type[factor.Factor], factor.Wiring]
def __post_init__(self):
for field in self.__dataclass_fields__:
if isinstance(getattr(self, field), np.ndarray):
getattr(self, field).flags.writeable = False
if isinstance(getattr(self, field), Mapping):
object.__setattr__(
self, field, types.MappingProxyType(getattr(self, field))
)
@dataclasses.dataclass
class FactorGraph:
"""Class for representing a factor graph.
Factors in a graph are clustered in factor groups,
which are grouped according to their factor types.
Attributes:
variable_groups: A single VarGroup or a list of VarGroups.
"""
variable_groups: Union[vgroup.VarGroup, Sequence[vgroup.VarGroup]]
def __post_init__(self):
if isinstance(self.variable_groups, vgroup.VarGroup):
self.variable_groups = [self.variable_groups]
# Useful objects to build the FactorGraph
self._factor_types_to_groups: OrderedDict[
Type[factor.Factor], List[fgroup.FactorGroup]
] = collections.OrderedDict(
[(factor_type, []) for factor_type in FAC_TO_VAR_UPDATES]
)
self._factor_types_to_variables_for_factors: OrderedDict[
Type[factor.Factor], Set[FrozenSet[Any]]
] = collections.OrderedDict(
[(factor_type, set()) for factor_type in FAC_TO_VAR_UPDATES]
)
# See FactorGraphState docstrings for documentation on the following fields
self._vars_to_starts: Dict[Tuple[int, int], int] = {}
vars_num_states_cumsum = 0
for variable_group in self.variable_groups:
vg_num_states = variable_group.num_states.flatten()
vg_num_states_cumsum = np.insert(np.cumsum(vg_num_states), 0, 0)
self._vars_to_starts.update(
zip(
variable_group.variables,
vars_num_states_cumsum + vg_num_states_cumsum[:-1],
)
)
vars_num_states_cumsum += vg_num_states_cumsum[-1]
self._num_var_states = vars_num_states_cumsum
# pylint: disable=g-complex-comprehension
def __hash__(self) -> int:
all_factor_groups = tuple(
[
factor_group
for factor_groups_per_type in self._factor_types_to_groups.values()
for factor_group in factor_groups_per_type
]
)
return hash(all_factor_groups)
def add_factors(
self,
factors: Union[
factor.Factor,
fgroup.FactorGroup,
Sequence[Union[factor.Factor, fgroup.FactorGroup]],
],
) -> None:
"""Add a single Factor / FactorGroup or a list of Factor / FactorGroup to the FactorGraph.
Args:
factors: The Factor, FactorGroup or list of Factors and FactorGroups to be
added to the FactorGraph.
Raises:
ValueError: If FactorGroup involving the same variables already exists.
"""
if isinstance(factors, list):
for this_factor in factors:
self.add_factors(this_factor)
return None
if isinstance(factors, fgroup.FactorGroup):
factor_group = factors
elif isinstance(factors, factor.Factor):
factor_group = fgroup.SingleFactorGroup(
variables_for_factors=[factors.variables],
single_factor=factors,
)
factor_type = factor_group.factor_type
for var_names_for_factor in factor_group.variables_for_factors:
var_names = frozenset(var_names_for_factor)
if len(var_names_for_factor) != len(var_names):
raise ValueError(
f"A Factor of type {factor_type} involving variables"
f" {var_names_for_factor} contains variables duplicates."
)
if var_names in self._factor_types_to_variables_for_factors[factor_type]:
raise ValueError(
f"A Factor of type {factor_type} involving variables"
f" {var_names} already exists. Please merge the corresponding"
" factors."
)
self._factor_types_to_variables_for_factors[factor_type].add(var_names)
self._factor_types_to_groups[factor_type].append(factor_group)
@functools.lru_cache()
def compute_offsets(self) -> None:
"""Compute factor messages offsets for the factor types and factor groups in the flattened array of message.
Also compute log potentials offsets for factor groups.
If offsets have already beeen compiled, do nothing.
"""
# Message offsets for ftov messages
self._factor_type_to_msgs_range = collections.OrderedDict()
self._factor_group_to_msgs_starts = collections.OrderedDict()
factor_num_states_cumsum = 0
# Log potentials offsets
self._factor_type_to_potentials_range = collections.OrderedDict()
self._factor_group_to_potentials_starts = collections.OrderedDict()
factor_num_configs_cumsum = 0
for (
factor_type,
factors_groups_by_type,
) in self._factor_types_to_groups.items():
factor_type_num_states_start = factor_num_states_cumsum
factor_type_num_configs_start = factor_num_configs_cumsum
for factor_group in factors_groups_by_type:
self._factor_group_to_msgs_starts[factor_group] = (
factor_num_states_cumsum
)
self._factor_group_to_potentials_starts[factor_group] = (
factor_num_configs_cumsum
)
factor_num_states_cumsum += factor_group.factor_edges_num_states.sum()
factor_num_configs_cumsum += (
factor_group.factor_group_log_potentials.shape[0]
)
self._factor_type_to_msgs_range[factor_type] = (
factor_type_num_states_start,
factor_num_states_cumsum,
)
self._factor_type_to_potentials_range[factor_type] = (
factor_type_num_configs_start,
factor_num_configs_cumsum,
)
self._total_factor_num_states = factor_num_states_cumsum
self._total_factor_num_configs = factor_num_configs_cumsum
@cached_property
def wiring(self) -> OrderedDict[Type[factor.Factor], factor.Wiring]:
"""Function to compile wiring for belief propagation.
If wiring has already beeen compiled, do nothing.
Returns:
A dictionnary mapping each factor type to its wiring.
"""
wiring = collections.OrderedDict()
for factor_type in self._factor_types_to_groups:
wiring[factor_type] = [
factor_group.compile_wiring(self._vars_to_starts)
for factor_group in self._factor_types_to_groups[factor_type]
]
wiring = collections.OrderedDict(
[
(factor_type, factor_type.concatenate_wirings(wiring[factor_type]))
for factor_type in wiring
]
)
return wiring
@cached_property
def log_potentials(self) -> OrderedDict[Type[factor.Factor], np.ndarray]:
"""Function to compile potential array for belief propagation.
If potential array has already been compiled, do nothing.
Returns:
A dictionnary mapping each factor type to the array of the log of the
potential function for each valid configuration
"""
log_potentials = collections.OrderedDict()
for (
factor_type,
factors_groups_by_type,
) in self._factor_types_to_groups.items():
if not factors_groups_by_type:
log_potentials[factor_type] = jnp.empty((0,))
else:
log_potentials[factor_type] = jnp.concatenate(
[
factor_group.factor_group_log_potentials
for factor_group in factors_groups_by_type
]
)
return log_potentials
@cached_property
def factors(
self,
) -> OrderedDict[Type[factor.Factor], Tuple[factor.Factor, ...]]:
"""Mapping factor type to individual factors in the factor graph.
This function is only called on demand when the user requires it.
Returns:
The list of factors in the FactorGraph
"""
print(
"Factors have not been added to the factor graph yet, this may take a"
" while..."
)
factors = collections.OrderedDict()
for factor_type in self._factor_types_to_groups:
factors_by_type = []
for factor_group in self._factor_types_to_groups[factor_type]:
for this_factor in factor_group.factors:
factors_by_type.append(this_factor)
factors[factor_type] = tuple(factors_by_type)
return factors
@property
def factor_groups(
self,
) -> OrderedDict[Type[factor.Factor], List[fgroup.FactorGroup]]:
"""Tuple of factor groups in the factor graph."""
return self._factor_types_to_groups
@property
def evidence_to_vars(self) -> np.ndarray:
"""Returns the variable index associated with each evidence entry."""
var_indices = list(self._vars_to_starts.values()) + [self._num_var_states]
evidence_to_vars = np.zeros((self._num_var_states,), dtype=int)
for idx_var in range(len(var_indices) - 1):
var_start, var_stop = var_indices[idx_var], var_indices[idx_var + 1]
evidence_to_vars[var_start:var_stop] = idx_var
return evidence_to_vars
@cached_property
def fg_state(self) -> FactorGraphState:
"""Current factor graph state given the added factors."""
# Preliminary computations
self.compute_offsets()
log_potentials = jnp.concatenate(
[
self.log_potentials[factor_type]
for factor_type in self.log_potentials.keys()
]
)
assert isinstance(self.variable_groups, list)
return FactorGraphState(
variable_groups=self.variable_groups,
vars_to_starts=self._vars_to_starts,
num_var_states=self._num_var_states,
total_factor_num_states=self._total_factor_num_states,
factor_type_to_msgs_range=copy.copy(self._factor_type_to_msgs_range),
factor_type_to_potentials_range=copy.copy(
self._factor_type_to_potentials_range
),
factor_group_to_potentials_starts=copy.copy(
self._factor_group_to_potentials_starts
),
log_potentials=log_potentials,
evidence_to_vars=self.evidence_to_vars,
wiring=self.wiring,
)
@property
def bp_state(self) -> Any:
"""Relevant information for doing belief propagation."""
# Preliminary computations
self.compute_offsets()
# pylint: disable=g-import-not-at-top
from pgmax.infer import bp_state
return bp_state.BPState(
log_potentials=bp_state.LogPotentials(fg_state=self.fg_state),
ftov_msgs=bp_state.FToVMessages(fg_state=self.fg_state),
evidence=bp_state.Evidence(fg_state=self.fg_state),
)
| PGMax-main | pgmax/fgraph/fgraph.py |
# Copyright 2022 Intrinsic Innovation LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A sub-package containing functions to represent a factor graph."""
from pgmax.fgraph.fgraph import FactorGraph
from pgmax.fgraph.fgraph import FactorGraphState
| PGMax-main | pgmax/fgraph/__init__.py |
# Copyright 2022 Intrinsic Innovation LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Defines LogicalFactorGroup and its two children, ORFactorGroup and ANDFactorGroup.
"""
import collections
import dataclasses
from typing import Any, FrozenSet, Optional, OrderedDict, Type
import numpy as np
from pgmax import factor
from pgmax.factor import logical
from pgmax.fgroup import fgroup
@dataclasses.dataclass(frozen=True, eq=False)
class LogicalFactorGroup(fgroup.FactorGroup):
"""Class to represent a group of LogicalFactors.
All factors in the group are assumed to have the same edge_states_offset.
Consequently, the factors are all ORFactors or ANDFactors.
Attributes:
edge_states_offset: Offset to go from a variable's relevant state to its
other state. For ORFactors the edge_states_offset is 1 For ANDFactors the
edge_states_offset is -1
"""
factor_configs: Optional[np.ndarray] = dataclasses.field(
init=False,
default=None,
)
edge_states_offset: int = dataclasses.field(init=False)
# pylint: disable=g-complex-comprehension
def _get_variables_to_factors(
self,) -> OrderedDict[FrozenSet[Any], logical.LogicalFactor]:
"""Function that generates a dictionary mapping set of connected variables to factors.
This function is only called on demand when the user requires it.
Returns:
A dictionary mapping all possible set of connected variables to different
factors.
"""
variables_to_factors = collections.OrderedDict([
(frozenset(variables_for_factor),
self.factor_type(variables=variables_for_factor))
for variables_for_factor in self.variables_for_factors
])
return variables_to_factors
@dataclasses.dataclass(frozen=True, eq=False)
class ORFactorGroup(LogicalFactorGroup):
"""Class to represent a group of ORFactors.
Attributes:
edge_states_offset: Offset to go from a variable's relevant state to its
other state. For ORFactors the edge_states_offset is 1.
"""
edge_states_offset: int = dataclasses.field(init=False, default=1)
factor_type: Type[factor.Factor] = dataclasses.field(
init=False,
default=logical.ORFactor,
)
@dataclasses.dataclass(frozen=True, eq=False)
class ANDFactorGroup(LogicalFactorGroup):
"""Class to represent a group of ANDFactors.
Attributes:
edge_states_offset: Offset to go from a variable's relevant state to its
other state. For ANDFactors the edge_states_offset is -1.
"""
edge_states_offset: int = dataclasses.field(init=False, default=-1)
factor_type: Type[factor.Factor] = dataclasses.field(
init=False,
default=logical.ANDFactor,
)
| PGMax-main | pgmax/fgroup/logical.py |
# Copyright 2022 Intrinsic Innovation LLC.
# Copyright 2022 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A sub-package defining different types of groups of factors."""
from pgmax.fgroup.enum import EnumFactorGroup
from pgmax.fgroup.enum import PairwiseFactorGroup
from pgmax.fgroup.fgroup import FactorGroup
from pgmax.fgroup.fgroup import SingleFactorGroup
from pgmax.fgroup.logical import ANDFactorGroup
from pgmax.fgroup.logical import LogicalFactorGroup
from pgmax.fgroup.logical import ORFactorGroup
from pgmax.fgroup.pool import PoolFactorGroup
| PGMax-main | pgmax/fgroup/__init__.py |
# Copyright 2022 Intrinsic Innovation LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A module containing the base classes for factor groups in a factor graph."""
import dataclasses
import functools
import inspect
from typing import Any, FrozenSet, List, Mapping, OrderedDict, Sequence, Tuple, Type, Union
import jax.numpy as jnp
import numpy as np
from pgmax import factor
from pgmax.utils import cached_property
@functools.total_ordering
@dataclasses.dataclass(frozen=True, eq=False)
class FactorGroup:
"""Class to represent a group of Factors.
Attributes:
variables_for_factors: A list of list of variables. Each list within the
outer list contains the variables connected to a Factor. The same variable
can be connected to multiple Factors.
factor_configs: Optional array containing an explicit enumeration of all
valid configurations
log_potentials: Array of log potentials.
factor_type: Factor type shared by all the Factors in the FactorGroup.
Raises:
ValueError: if the FactorGroup does not contain a Factor
"""
variables_for_factors: Sequence[List[Tuple[int, int]]]
factor_configs: np.ndarray = dataclasses.field(init=False)
log_potentials: np.ndarray = dataclasses.field(
init=False,
default_factory=lambda: np.empty((0,)),
)
factor_type: Type[Any] = dataclasses.field(init=False)
def __post_init__(self):
if not self.variables_for_factors:
raise ValueError("Cannot create a FactorGroup with no Factor.")
def __hash__(self):
return id(self)
def __eq__(self, other):
return hash(self) == hash(other)
def __lt__(self, other):
return hash(self) < hash(other)
def __getitem__(self, variables: Sequence[Tuple[int, int]]) -> Any:
"""Function to query individual factors in the factor group.
Args:
variables: a set of variables, used to query an individual factor in the
factor group involving this set of variables
Returns:
A queried individual factor
Raises:
ValueError: if the queried factor is not present in the factor group
"""
variables = frozenset(variables)
if variables not in self._variables_to_factors:
raise ValueError(
f"The queried factor connected to the set of variables {variables} is"
" not present in the factor group."
)
return self._variables_to_factors[variables]
@cached_property
def factor_sizes(self) -> np.ndarray:
"""Computes the factor sizes.
Returns:
Array of the different factor sizes.
"""
return np.array(
[
len(variables_for_factor)
for variables_for_factor in self.variables_for_factors
]
)
@cached_property
def factor_edges_num_states(self) -> np.ndarray:
"""Computes an array concatenating the number of states for the variables connected to each Factor of the FactorGroup.
Each variable will appear once for each Factor it connects to.
Returns:
Array with the the number of states for the variables connected to
each Factor.
"""
factor_edges_num_states = np.empty(
shape=(self.factor_sizes.sum(),),
dtype=int,
)
idx = 0
for variables_for_factor in self.variables_for_factors:
for variable in variables_for_factor:
factor_edges_num_states[idx] = variable[1]
idx += 1
return factor_edges_num_states
@cached_property
def _variables_to_factors(
self,
) -> Mapping[FrozenSet[Sequence[List[Tuple[int, int]]]], factor.Factor]:
"""Function to compile potential array for the factor group.
This function is only called on demand when the user requires it.
Returns:
A dictionnary mapping set of connected variables to the factors
"""
return self._get_variables_to_factors()
@cached_property
def factor_group_log_potentials(self) -> np.ndarray:
"""Flattened array of log potentials."""
return self.log_potentials.flatten()
@cached_property
def factors(self) -> Tuple[factor.Factor, ...]:
"""Returns all factors in the factor group.
This function is only called on demand when the user requires it.
"""
return tuple(self._variables_to_factors.values())
@cached_property
def num_factors(self) -> int:
"""Returns the number of factors in the FactorGroup."""
return len(self.variables_for_factors)
def _get_variables_to_factors(
self,
) -> OrderedDict[FrozenSet[Sequence[List[Tuple[int, int]]]], Any]:
"""Function that generates a dictionary mapping names to factors.
This function is only called on demand when the user requires it.
Returns:
A dictionary mapping all possible names to different factors.
"""
raise NotImplementedError(
"Please subclass the FactorGroup class and override this method"
)
def flatten(self, data: Union[np.ndarray, jnp.ndarray]) -> jnp.ndarray:
"""Function that turns meaningful structured data into a flat data array for internal use.
Args:
data: Meaningful structured data.
Returns:
A flat jnp.array for internal use
"""
raise NotImplementedError(
"Please subclass the FactorGroup class and override this method"
)
def unflatten(self, flat_data: Union[np.ndarray, jnp.ndarray]) -> Any:
"""Function that recovers meaningful structured data from internal flat data array.
Args:
flat_data: Internal flat data array.
Returns:
Meaningful structured data.
"""
raise NotImplementedError(
"Please subclass the FactorGroup class and override this method"
)
def compile_wiring(
self,
vars_to_starts: Mapping[Tuple[int, int], int],
) -> Any:
"""Compile an efficient wiring for the FactorGroup.
Args:
vars_to_starts: A dictionary that maps variables to their global starting
indices For an n-state variable, a global start index of m means the
global indices of its n variable states are m, m + 1, ..., m + n - 1
Returns:
Wiring for the FactorGroup
"""
compile_wiring_arguments = inspect.getfullargspec(
self.factor_type.compile_wiring
).args
compile_wiring_arguments.remove("vars_to_starts")
compile_wiring_arguments = {
key: getattr(self, key) for key in compile_wiring_arguments
}
wiring = self.factor_type.compile_wiring(
vars_to_starts=vars_to_starts, **compile_wiring_arguments
)
return wiring
@dataclasses.dataclass(frozen=True, eq=False)
class SingleFactorGroup(FactorGroup):
"""Class to represent a FactorGroup with a single factor.
For internal use only. Should not be directly used to add FactorGroups to a
factor graph.
Attributes:
single_factor: the single factor in the SingleFactorGroup
"""
single_factor: factor.Factor
def __post_init__(self):
super().__post_init__()
if len(self.variables_for_factors) != 1:
raise ValueError(
"SingleFactorGroup should only contain one factor. Got"
f" {len(self.variables_for_factors)}"
)
object.__setattr__(self, "factor_type", type(self.single_factor))
compile_wiring_arguments = inspect.getfullargspec(
self.factor_type.compile_wiring
).args
compile_wiring_arguments.remove("vars_to_starts")
for key in compile_wiring_arguments:
if not hasattr(self, key):
object.__setattr__(self, key, getattr(self.single_factor, key))
single_factor_log_potentials = getattr(self.single_factor, "log_potentials")
if single_factor_log_potentials.shape[0] > 0:
# Fgroup log potentials is of shape (num_factors, num_configs)
single_factor_log_potentials = single_factor_log_potentials[None]
object.__setattr__(self, "log_potentials", single_factor_log_potentials)
def _get_variables_to_factors(
self,
) -> OrderedDict[FrozenSet[Sequence[List[Tuple[int, int]]]], factor.Factor]:
"""Function that generates a dictionary mapping names to factors.
Returns:
A dictionary mapping all possible names to different factors.
"""
return OrderedDict(
[(frozenset(self.variables_for_factors[0]), self.single_factor)]
)
def flatten(self, data: Union[np.ndarray, jnp.ndarray]) -> jnp.ndarray:
"""Function that turns meaningful structured data into a flat data array for internal use.
Args:
data: Meaningful structured data.
Returns:
A flat jnp.array for internal use
"""
raise NotImplementedError(
"SingleFactorGroup does not support vectorized factor operations."
)
def unflatten(self, flat_data: Union[np.ndarray, jnp.ndarray]) -> Any:
"""Function that recovers meaningful structured data from internal flat data array.
Args:
flat_data: Internal flat data array.
Returns:
Meaningful structured data.
"""
raise NotImplementedError(
"SingleFactorGroup does not support vectorized factor operations."
)
| PGMax-main | pgmax/fgroup/fgroup.py |
# Copyright 2022 Intrinsic Innovation LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Defines EnumFactorGroup and PairwiseFactorGroup."""
import collections
import dataclasses
from typing import Any, FrozenSet, OrderedDict, Optional, Type, Union
import jax
import jax.numpy as jnp
import numpy as np
from pgmax import factor
from pgmax.factor import enum
from pgmax.fgroup import fgroup
@dataclasses.dataclass(frozen=True, eq=False)
class EnumFactorGroup(fgroup.FactorGroup):
"""Class to represent a group of EnumFactors.
All factors in the group are assumed to have the same set of valid
configurations.
The associated log potentials can however be different across factors.
Attributes:
factor_configs: Array of shape (num_val_configs, num_variables) containing
explicit enumeration of all valid configurations
log_potentials: Optional 1D array of shape (num_val_configs,) or 2D array of
shape (num_factors, num_val_configs).
If 1D, the log potentials are copied for each factor of the group.
If 2D, it specifices the log potentials of each factor.
If None, the log potential are initialized to uniform 0.
factor_type: Factor type shared by all the Factors in the FactorGroup.
Raises:
ValueError if:
(1) The specified log_potentials is not of the expected shape.
(2) The dtype of the potential array is not float
"""
factor_configs: np.ndarray
log_potentials: Optional[Union[np.ndarray, jnp.ndarray]] = (
None #: :meta private:
)
factor_type: Type[factor.Factor] = dataclasses.field(
init=False,
default=enum.EnumFactor,
) #: :meta private:
def __post_init__(self):
super().__post_init__()
num_val_configs = self.factor_configs.shape[0]
if self.log_potentials is None:
log_potentials = np.zeros(
(self.num_factors, num_val_configs),
dtype=float,
)
else:
if self.log_potentials.shape != (
num_val_configs,
) and self.log_potentials.shape != (self.num_factors, num_val_configs):
raise ValueError(
f"Expected log potentials shape: {(num_val_configs,)} or"
f" {(self.num_factors, num_val_configs)}. Got"
f" {self.log_potentials.shape}."
)
if isinstance(self.log_potentials, jnp.ndarray):
log_potentials = jnp.broadcast_to(
self.log_potentials,
(self.num_factors, num_val_configs),
)
else:
log_potentials = np.broadcast_to(
self.log_potentials,
(self.num_factors, num_val_configs),
)
if not np.issubdtype(log_potentials.dtype, np.floating):
raise ValueError(
f"Potentials should be floats. Got {log_potentials.dtype}."
)
object.__setattr__(self, "log_potentials", log_potentials)
# pylint: disable=g-complex-comprehension
def _get_variables_to_factors(
self,
) -> OrderedDict[FrozenSet[Any], enum.EnumFactor]:
"""Function that generates a dictionary mapping set of connected variables to factors.
This function is only called on demand when the user requires it.
Returns:
A dictionary mapping all possible set of connected variables to different
factors.
"""
np_array_log_potentials = np.array(self.log_potentials)
variables_to_factors = collections.OrderedDict(
[
(
frozenset(variables_for_factor),
enum.EnumFactor(
variables=variables_for_factor,
factor_configs=self.factor_configs,
log_potentials=np_array_log_potentials[ii],
),
)
for ii, variables_for_factor in enumerate(
self.variables_for_factors
)
]
)
return variables_to_factors
def flatten(self, data: Union[np.ndarray, jnp.ndarray]) -> jnp.ndarray:
"""Function that turns meaningful structured data into a flat data array for internal use.
Args:
data: Meaningful structured data. Array of shape (num_val_configs,) (for
shared log potentials) or (num_factors, num_val_configs) (for log
potentials) or (num_factors, num_edge_states) (for ftov messages).
Returns:
A flat jnp.array for internal use
Raises:
ValueError: if data is not of the right shape.
"""
num_factors = len(self.factors)
factor_edges_num_states = sum(
[variable[1] for variable in self.variables_for_factors[0]]
)
if (
data.shape != (num_factors, self.factor_configs.shape[0])
and data.shape != (num_factors, factor_edges_num_states)
and data.shape != (self.factor_configs.shape[0],)
):
raise ValueError(
"data should be of shape"
f" {(num_factors, self.factor_configs.shape[0])} or"
f" {(num_factors, factor_edges_num_states)} or"
f" {(self.factor_configs.shape[0],)}. Got {data.shape}."
)
if data.shape == (self.factor_configs.shape[0],):
flat_data = jnp.tile(data, num_factors)
else:
flat_data = jax.device_put(data).flatten()
return flat_data
def unflatten(
self,
flat_data: Union[np.ndarray, jnp.ndarray],
) -> Union[np.ndarray, jnp.ndarray]:
"""Function that recovers meaningful structured data from internal flat data array.
Args:
flat_data: Internal flat data array.
Returns:
Meaningful structured data.
Array of shape (num_val_configs,) (for shared log potentials)
or (num_factors, num_val_configs) (for log potentials)
or (num_factors, num_edge_states) (for ftov messages).
Raises:
ValueError if:
(1) flat_data is not a 1D array
(2) flat_data is not of the right shape
"""
if flat_data.ndim != 1:
raise ValueError(
f"Can only unflatten 1D array. Got a {flat_data.ndim}D array."
)
num_factors = len(self.factors)
factor_edges_num_states = sum(
[variable[1] for variable in self.variables_for_factors[0]]
)
if flat_data.size == num_factors * self.factor_configs.shape[0]:
data = flat_data.reshape(
(num_factors, self.factor_configs.shape[0]),
)
elif flat_data.size == num_factors * np.sum(factor_edges_num_states):
data = flat_data.reshape((num_factors, np.sum(factor_edges_num_states)))
else:
raise ValueError(
"flat_data should be compatible with shape"
f" {(num_factors, self.factor_configs.shape[0])} or"
f" {(num_factors, np.sum(factor_edges_num_states))}. Got"
f" {flat_data.shape}."
)
return data
@dataclasses.dataclass(frozen=True, eq=False)
class PairwiseFactorGroup(fgroup.FactorGroup):
"""Class to represent a group of EnumFactors where each factor connects to two different variables.
All factors in the group are assumed to be such that all possible
configurations of the two variables are valid.
The associated log potentials can however be different across factors.
Attributes:
log_potential_matrix: Optional 2D array of shape (num_states1, num_states2)
or 3D array of shape (num_factors, num_states1, num_states2) where
num_states1 and num_states2 are the number of states of the first and
second variables involved in each factor.
If 2D, the log potentials are copied for each factor of the group.
If 3D, it specifies the log potentials of each factor.
If None, the log potential are initialized to uniform 0.
factor_type: Factor type shared by all the Factors in the FactorGroup.
Raises:
ValueError if:
(1) The specified log_potential_matrix is not a 2D or 3D array.
(2) The dtype of the potential array is not float
(3) Some pairwise factors connect to less or more than 2 variables.
(4) The specified log_potential_matrix does not match the number of
factors.
(5) The specified log_potential_matrix does not match the number of
variable states of the
variables in the factors.
"""
log_potential_matrix: Optional[Union[np.ndarray, jnp.ndarray]] = (
None #: :meta private:
)
factor_type: Type[factor.Factor] = dataclasses.field(
init=False,
default=enum.EnumFactor,
) #: :meta private:
def __post_init__(self):
super().__post_init__()
if self.log_potential_matrix is None:
log_potential_matrix = np.zeros((
self.variables_for_factors[0][0][1],
self.variables_for_factors[0][1][1],
))
else:
log_potential_matrix = self.log_potential_matrix
if not (log_potential_matrix.ndim == 2 or log_potential_matrix.ndim == 3):
raise ValueError(
"log_potential_matrix should be either a 2D array, specifying shared"
" parameters for all pairwise factors, or 3D array, specifying"
" parameters for individual pairwise factors. Got a"
f" {log_potential_matrix.ndim}D log_potential_matrix array."
)
if not np.issubdtype(log_potential_matrix.dtype, np.floating):
raise ValueError(
"Potential matrix should be floats. Got"
f" {self.log_potential_matrix.dtype}."
)
if log_potential_matrix.ndim == 3 and log_potential_matrix.shape[0] != len(
self.variables_for_factors
):
raise ValueError(
"Expected log_potential_matrix for"
f" {len(self.variables_for_factors)} factors. Got"
f" log_potential_matrix for {log_potential_matrix.shape[0]} factors."
)
log_potential_shape = log_potential_matrix.shape[-2:]
for variables_for_factor in self.variables_for_factors:
if len(variables_for_factor) != 2:
raise ValueError(
"All pairwise factors should connect to exactly 2 variables. Got a"
f" factor connecting to {len(variables_for_factor)} variables"
f" ({variables_for_factor})."
)
factor_num_configs = (
variables_for_factor[0][1],
variables_for_factor[1][1],
)
if log_potential_shape != factor_num_configs:
raise ValueError(
f"The specified pairwise factor {variables_for_factor} (with"
f" {factor_num_configs}configurations) does not match the specified"
" log_potential_matrix (with"
f" {log_potential_shape} configurations)."
)
object.__setattr__(self, "log_potential_matrix", log_potential_matrix)
factor_configs = (
np.mgrid[
: log_potential_matrix.shape[-2], : log_potential_matrix.shape[-1]
]
.transpose((1, 2, 0))
.reshape((-1, 2))
)
object.__setattr__(self, "factor_configs", factor_configs)
if isinstance(log_potential_matrix, jnp.ndarray):
log_potential_matrix = jnp.broadcast_to(
log_potential_matrix,
(len(self.variables_for_factors),) + log_potential_matrix.shape[-2:],
)
else:
log_potential_matrix = np.broadcast_to(
log_potential_matrix,
(len(self.variables_for_factors),) + log_potential_matrix.shape[-2:],
)
log_potentials = log_potential_matrix.reshape(
log_potential_matrix.shape[0], -1
)
object.__setattr__(self, "log_potentials", log_potentials)
# pylint: disable=g-complex-comprehension
def _get_variables_to_factors(
self,
) -> OrderedDict[FrozenSet[Any], enum.EnumFactor]:
"""Function that generates a dictionary mapping set of connected variables to factors.
This function is only called on demand when the user requires it.
Returns:
A dictionary mapping all possible set of connected variables to factors.
"""
variables_to_factors = collections.OrderedDict(
[
(
frozenset(variable_for_factor),
enum.EnumFactor(
variables=variable_for_factor,
factor_configs=self.factor_configs,
log_potentials=self.log_potentials[ii],
),
)
for ii, variable_for_factor in enumerate(self.variables_for_factors)
]
)
return variables_to_factors
def flatten(self, data: Union[np.ndarray, jnp.ndarray]) -> jnp.ndarray:
"""Function that turns meaningful structured data into a flat data array for internal use.
Args:
data: Meaningful structured data. Should be an array of shape
(num_factors, var0_num_states, var1_num_states) (for log potential
matrices) or (num_factors, var0_num_states + var1_num_states) (for ftov
messages) or (var0_num_states, var1_num_states) (for shared log
potential matrix).
Returns:
A flat jnp.array for internal use
"""
assert isinstance(self.log_potential_matrix, np.ndarray)
num_factors = len(self.factors)
if (
data.shape != (num_factors,) + self.log_potential_matrix.shape[-2:]
and data.shape
!= (num_factors, np.sum(self.log_potential_matrix.shape[-2:]))
and data.shape != self.log_potential_matrix.shape[-2:]
):
raise ValueError(
"data should be of shape"
f" {(num_factors,) + self.log_potential_matrix.shape[-2:]} or"
f" {(num_factors, np.sum(self.log_potential_matrix.shape[-2:]))} or"
f" {self.log_potential_matrix.shape[-2:]}. Got {data.shape}."
)
if data.shape == self.log_potential_matrix.shape[-2:]:
flat_data = jnp.tile(jax.device_put(data).flatten(), num_factors)
else:
flat_data = jax.device_put(data).flatten()
return flat_data
def unflatten(
self, flat_data: Union[np.ndarray, jnp.ndarray]
) -> Union[np.ndarray, jnp.ndarray]:
"""Function that recovers meaningful structured data from internal flat data array.
Args:
flat_data: Internal flat data array.
Returns:
Meaningful structured data. Should be an array of shape
(num_factors, var0_num_states, var1_num_states) (for log potential
matrices)
or (num_factors, var0_num_states + var1_num_states) (for ftov messages)
or (var0_num_states, var1_num_states) (for shared log potential matrix).
Raises:
ValueError if:
(1) flat_data is not a 1D array
(2) flat_data is not of the right shape
"""
if flat_data.ndim != 1:
raise ValueError(
f"Can only unflatten 1D array. Got a {flat_data.ndim}D array."
)
assert isinstance(self.log_potential_matrix, np.ndarray)
num_factors = len(self.factors)
if flat_data.size == num_factors * np.prod(
self.log_potential_matrix.shape[-2:]
):
data = flat_data.reshape(
(num_factors,) + self.log_potential_matrix.shape[-2:]
)
elif flat_data.size == num_factors * np.sum(
self.log_potential_matrix.shape[-2:]
):
data = flat_data.reshape(
(num_factors, np.sum(self.log_potential_matrix.shape[-2:]))
)
else:
raise ValueError(
"flat_data should be compatible with shape"
f" {(num_factors,) + self.log_potential_matrix.shape[-2:]} or"
f" {(num_factors, np.sum(self.log_potential_matrix.shape[-2:]))}. Got"
f" {flat_data.shape}."
)
return data
| PGMax-main | pgmax/fgroup/enum.py |
# Copyright 2022 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Defines PoolFactorGroup."""
import collections
import dataclasses
from typing import Any, FrozenSet, Optional, OrderedDict, Type
import numpy as np
from pgmax import factor
from pgmax.factor import pool
from pgmax.fgroup import fgroup
@dataclasses.dataclass(frozen=True, eq=False)
class PoolFactorGroup(fgroup.FactorGroup):
"""Class to represent a group of PoolFactors."""
factor_configs: Optional[np.ndarray] = dataclasses.field(
init=False,
default=None,
)
factor_type: Type[factor.Factor] = dataclasses.field(
init=False,
default=pool.PoolFactor,
)
# pylint: disable=g-complex-comprehension
def _get_variables_to_factors(
self,
) -> OrderedDict[FrozenSet[Any], pool.PoolFactor]:
"""Function that generates a dictionary mapping set of connected variables to factors.
This function is only called on demand when the user requires it.
Returns:
A dictionary mapping all possible set of connected variables to different
factors.
"""
variables_to_factors = collections.OrderedDict(
[
(
frozenset(variables_for_factor),
pool.PoolFactor(variables=variables_for_factor),
)
for variables_for_factor in self.variables_for_factors
]
)
return variables_to_factors
| PGMax-main | pgmax/fgroup/pool.py |
# Copyright 2020 DeepMind Technologies Limited.
#
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Setup for pip package."""
from setuptools import setup
REQUIRED_PACKAGES = (
"absl-py>=0.12.0",
"numpy>=1.16.4",
"typing>=3.7.4.3",
"scipy>=1.7.1",
"open-spiel>=1.0.1",
"tensorflow>=2.6.0",
"tensorflow-datasets>=4.4.0",
"jax==0.2.20",
"jaxlib==0.1.71"
)
LONG_DESCRIPTION = "\n".join([(
"A suite of 17 datasets with phase space, high dimensional (visual) "
"observations and other measurement where appropriate that are based on "
"physical systems, exhibiting a Hamiltonian dynamics"
)])
setup(
name="dm_hamiltonian_dynamics_suite",
version="0.0.1",
description="A collection of 17 datasets based on Hamiltonian physical "
"systems.",
long_description=LONG_DESCRIPTION,
url="https://github.com/deepmind/dm_hamiltonian_dynamics_suite",
author="DeepMind",
packages=[
"dm_hamiltonian_dynamics_suite",
"dm_hamiltonian_dynamics_suite.hamiltonian_systems",
"dm_hamiltonian_dynamics_suite.molecular_dynamics",
"dm_hamiltonian_dynamics_suite.multiagent_dynamics",
],
install_requires=REQUIRED_PACKAGES,
platforms=["any"],
license="Apache License, Version 2.0",
)
| dm_hamiltonian_dynamics_suite-master | setup.py |
# Copyright 2020 DeepMind Technologies Limited.
#
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Script to generate datasets from the configs in datasets.py."""
from absl import app
from absl import flags
from dm_hamiltonian_dynamics_suite import datasets
from dm_hamiltonian_dynamics_suite.molecular_dynamics import generate_dataset
import jax
flags.DEFINE_string("folder", None,
"The folder where to store the datasets.")
flags.DEFINE_string("dataset", None,
"The dataset from datasets.py to use or "
"'molecular_dynamics' respectively.")
flags.DEFINE_string("lammps_file", None,
"For dataset='molecular_dynamics' this should be the "
"LAMMPS trajectory file containing a sequence of timesteps "
"obtained from a MD simulation.")
flags.DEFINE_float("dt", None, "The delta time between two observations.")
flags.DEFINE_integer("num_steps", None, "The number of steps to simulate.")
flags.DEFINE_integer("steps_per_dt", 10,
"How many internal steps to do per a single observation "
"step.")
flags.DEFINE_integer("num_train", None,
"The number of training examples to generate.")
flags.DEFINE_integer("num_test", None,
"The number of test examples to generate.")
flags.DEFINE_boolean("overwrite", False, "Overwrites previous data.")
flags.mark_flag_as_required("folder")
flags.mark_flag_as_required("dataset")
flags.mark_flag_as_required("dt")
flags.mark_flag_as_required("num_steps")
flags.mark_flag_as_required("num_train")
flags.mark_flag_as_required("num_test")
FLAGS = flags.FLAGS
def main(argv):
if len(argv) > 1:
raise ValueError(f"Unexpected args: {argv[1:]}")
if FLAGS.dataset == "molecular_dynamics":
generate_dataset.generate_lammps_dataset(
lammps_file=FLAGS.lammps_file,
folder=FLAGS.output_path,
dt=FLAGS.dt,
num_steps=FLAGS.num_steps,
num_train=FLAGS.num_train,
num_test=FLAGS.num_test,
shuffle=FLAGS.shuffle,
seed=FLAGS.seed,
overwrite=FLAGS.overwrite,
)
else:
datasets.generate_full_dataset(
folder=FLAGS.folder,
dataset=FLAGS.dataset,
dt=FLAGS.dt,
num_steps=FLAGS.num_steps,
steps_per_dt=FLAGS.steps_per_dt,
num_train=FLAGS.num_train,
num_test=FLAGS.num_test,
overwrite=FLAGS.overwrite
)
if __name__ == "__main__":
jax.config.update("jax_enable_x64", True)
app.run(main)
| dm_hamiltonian_dynamics_suite-master | dm_hamiltonian_dynamics_suite/generate_dataset.py |
# Copyright 2020 DeepMind Technologies Limited.
#
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Module containing dataset configurations used for generation and some utility functions."""
import functools
import os
import shutil
from typing import Callable, Mapping, Any, TextIO, Generator, Tuple, Optional
from absl import logging
from dm_hamiltonian_dynamics_suite.hamiltonian_systems import ideal_double_pendulum
from dm_hamiltonian_dynamics_suite.hamiltonian_systems import ideal_mass_spring
from dm_hamiltonian_dynamics_suite.hamiltonian_systems import ideal_pendulum
from dm_hamiltonian_dynamics_suite.hamiltonian_systems import n_body
from dm_hamiltonian_dynamics_suite.hamiltonian_systems import utils
import jax
import jax.numpy as jnp
import numpy as np
import tensorflow as tf
PipelineOutput = Optional[Tuple[
Tuple[Mapping[str, jnp.ndarray], ...],
Tuple[Mapping[str, jnp.ndarray], ...],
]]
try:
from dm_hamiltonian_dynamics_suite.multiagent_dynamics import game_dynamics # pylint: disable=g-import-not-at-top
_OPEN_SPIEL_INSTALLED = True
except ModuleNotFoundError:
_OPEN_SPIEL_INSTALLED = False
def open_spiel_available() -> bool:
return _OPEN_SPIEL_INSTALLED
def set_up_folder(folder: str, overwrite: bool) -> None:
"""Sets up the folder needed for the dataset (optionally clearing it)."""
if os.path.exists(folder):
if overwrite:
shutil.rmtree(folder)
os.makedirs(folder)
else:
os.makedirs(folder)
def save_features(
file: TextIO,
example_dict: Mapping[str, Any],
prefix: str = ""
) -> None:
"""Saves the features file used for loading."""
for k, v in example_dict.items():
if isinstance(v, dict):
save_features(file, v, prefix=f"{prefix}{k}/")
else:
if isinstance(v, tf.Tensor):
v = v.numpy()
if isinstance(v, (np.ndarray, jnp.ndarray)):
# int32 are promoted to int64
if v.dtype == np.int32:
file.write(f"{prefix}{k}, {v.shape}, {np.int64}\n")
else:
file.write(f"{prefix}{k}, {v.shape}, {v.dtype}\n")
else:
raise NotImplementedError(f"Currently the only supported feature types "
f"are tf.Tensor, np.ndarray and jnp.ndarray. "
f"Encountered value of type {type(v)}.")
def encode_example(example_dict: Mapping[str, Any]) -> Mapping[str, Any]:
"""Encodes a single trajectory into a TFRecord example."""
result_dict = dict()
for k, v in example_dict.items():
if isinstance(v, tf.Tensor):
v = v.numpy()
if isinstance(v, dict):
for ki, vi in encode_example(v).items():
result_dict[f"{k}/{ki}"] = vi
elif isinstance(v, (np.ndarray, jnp.ndarray)):
if v.dtype == np.uint8:
# We encode images to png
if v.ndim == 4:
# Since encode_png accepts only a single image for a batch of images
# we just stack them over their first axis.
v = v.reshape((-1,) + v.shape[-2:])
image_string = tf.image.encode_png(v).numpy()
result_dict[k] = tf.train.Feature(
bytes_list=tf.train.BytesList(value=[image_string]))
elif v.dtype == np.int32:
# int32 are promoted to int64
value = v.reshape([-1]).astype(np.int64)
result_dict[k] = tf.train.Feature(
int64_list=tf.train.Int64List(value=value))
else:
# Since tf.Records do not support reading float64, here for any values
# we interpret them as int64 and store them in this format, in order
# when reading to be able to recover the float64 values.
value = v.reshape([-1]).view(np.int64)
result_dict[k] = tf.train.Feature(
int64_list=tf.train.Int64List(value=value))
else:
raise NotImplementedError(f"Currently the only supported feature types "
f"are tf.Tensor, np.ndarray and jnp.ndarray. "
f"Encountered value of type {type(v)}.")
return result_dict
def transform_dataset(
generator: Generator[Mapping[str, Any], None, None],
destination_folder: str,
prefix: str,
overwrite: bool
) -> None:
"""Copies the dataset from the source folder to the destination as a TFRecord dataset."""
set_up_folder(destination_folder, overwrite)
features_path = os.path.join(destination_folder, "features.txt")
features_saved = False
file_path = os.path.join(destination_folder, f"{prefix}.tfrecord")
if os.path.exists(file_path):
if not overwrite:
logging.info("The file with prefix %s already exist. Skipping.", prefix)
# We assume that the features file must be present in this case.
return
else:
logging.info("The file with prefix %s already exist and overwrite=True."
" Deleting.", prefix)
os.remove(file_path)
with tf.io.TFRecordWriter(file_path) as writer:
for element in generator:
if not features_saved:
with open(features_path, "w") as f:
save_features(f, element)
features_saved = True
example = tf.train.Example(features=tf.train.Features(
feature=encode_example(element)))
writer.write(example.SerializeToString())
def generate_sample(
index: int,
system: n_body.hamiltonian.HamiltonianSystem,
dt: float,
num_steps: int,
steps_per_dt: int
) -> Mapping[str, jnp.ndarray]:
"""Simulates a single trajectory of the system."""
seed = np.random.randint(0, 2 * 32 -1)
prng_key = jax.random.fold_in(jax.random.PRNGKey(seed), index)
total_steps = num_steps * steps_per_dt
total_dt = dt / steps_per_dt
result = system.generate_and_render_dt(
num_trajectories=1,
rng_key=prng_key,
t0=0.0,
dt=total_dt,
num_steps=total_steps)
sub_sample_index = np.linspace(0.0, total_steps, num_steps + 1)
sub_sample_index = sub_sample_index.astype("int64")
def sub_sample(x):
if x.ndim > 1 and x.shape[1] == total_steps + 1:
return x[0, sub_sample_index]
else:
return x
result = jax.tree_map(sub_sample, result)
for k in result.keys():
if "image" in k:
result[k] = (result[k] * 255.0).astype("uint8")
return result
def create_pipeline(
generate: Callable[[int], Mapping[str, jnp.ndarray]],
output_path: str,
num_train: int,
num_test: int,
return_generated_examples: bool = False,
) -> Callable[[], PipelineOutput]:
"""Runs the generation pipeline for the HML datasets."""
def pipeline() -> PipelineOutput:
train_examples = list()
test_examples = list()
with open(f"{output_path}/features.txt", "w") as f:
save_features(f, generate(0))
with tf.io.TFRecordWriter(f"{output_path}/train.tfrecord") as writer:
for i in range(num_train):
example = generate(i)
if return_generated_examples:
train_examples.append(example)
example = tf.train.Example(features=tf.train.Features(
feature=encode_example(example)))
writer.write(example.SerializeToString())
with tf.io.TFRecordWriter(f"{output_path}/test.tfrecord") as writer:
for i in range(num_test):
example = generate(num_train + i)
if return_generated_examples:
test_examples.append(example)
example = tf.train.Example(features=tf.train.Features(
feature=encode_example(example)))
writer.write(example.SerializeToString())
if return_generated_examples:
return tuple(train_examples), tuple(test_examples)
return pipeline
def generate_full_dataset(
folder: str,
dataset: str,
dt: float,
num_steps: int,
steps_per_dt: int,
num_train: int,
num_test: int,
overwrite: bool,
return_generated_examples: bool = False,
) -> PipelineOutput:
"""Runs the data generation."""
dt_str = str(dt).replace(".", "_")
folder = os.path.join(folder, dataset.lower() + f"_dt_{dt_str}")
set_up_folder(folder, overwrite)
cls, config = globals().get(dataset.upper())
system = cls(**config())
generate = functools.partial(
generate_sample,
system=system,
dt=dt,
num_steps=num_steps,
steps_per_dt=steps_per_dt)
pipeline = create_pipeline(
generate, folder, num_train, num_test, return_generated_examples)
return pipeline()
MASS_SPRING = (
ideal_mass_spring.IdealMassSpring,
lambda: dict( # pylint:disable=g-long-lambda
k_range=utils.BoxRegion(2.0, 2.0),
m_range=utils.BoxRegion(0.5, 0.5),
radius_range=utils.BoxRegion(0.1, 1.0),
uniform_annulus=False,
randomize_canvas_location=False,
randomize_x=False,
num_colors=1,
)
)
MASS_SPRING_COLORS = (
ideal_mass_spring.IdealMassSpring,
lambda: dict( # pylint:disable=g-long-lambda
k_range=utils.BoxRegion(2.0, 2.0),
m_range=utils.BoxRegion(0.2, 1.0),
radius_range=utils.BoxRegion(0.1, 1.0),
num_colors=6,
)
)
MASS_SPRING_COLORS_FRICTION = (
ideal_mass_spring.IdealMassSpring,
lambda: dict( # pylint:disable=g-long-lambda
k_range=utils.BoxRegion(2.0, 2.0),
m_range=utils.BoxRegion(0.2, 1.0),
radius_range=utils.BoxRegion(0.1, 1.0),
num_colors=6,
friction=0.05,
),
)
PENDULUM = (
ideal_pendulum.IdealPendulum,
lambda: dict( # pylint:disable=g-long-lambda
m_range=utils.BoxRegion(0.5, 0.5),
g_range=utils.BoxRegion(3.0, 3.0),
l_range=utils.BoxRegion(1.0, 1.0),
radius_range=utils.BoxRegion(1.3, 2.3),
uniform_annulus=False,
randomize_canvas_location=False,
num_colors=1,
)
)
PENDULUM_COLORS = (
ideal_pendulum.IdealPendulum,
lambda: dict( # pylint:disable=g-long-lambda
m_range=utils.BoxRegion(0.5, 1.5),
g_range=utils.BoxRegion(3.0, 4.0),
l_range=utils.BoxRegion(.5, 1.0),
radius_range=utils.BoxRegion(1.3, 2.3),
num_colors=6,
)
)
PENDULUM_COLORS_FRICTION = (
ideal_pendulum.IdealPendulum,
lambda: dict( # pylint:disable=g-long-lambda
m_range=utils.BoxRegion(0.5, 1.5),
g_range=utils.BoxRegion(3.0, 4.0),
l_range=utils.BoxRegion(.5, 1.0),
radius_range=utils.BoxRegion(1.3, 2.3),
num_colors=6,
friction=0.05,
)
)
DOUBLE_PENDULUM = (
ideal_double_pendulum.IdealDoublePendulum,
lambda: dict( # pylint:disable=g-long-lambda
m_range=utils.BoxRegion(0.5, 0.5),
g_range=utils.BoxRegion(3.0, 3.0),
l_range=utils.BoxRegion(1.0, 1.0),
radius_range=utils.BoxRegion(1.3, 2.3),
uniform_annulus=False,
randomize_canvas_location=False,
num_colors=2,
)
)
DOUBLE_PENDULUM_COLORS = (
ideal_double_pendulum.IdealDoublePendulum,
lambda: dict( # pylint:disable=g-long-lambda
m_range=utils.BoxRegion(0.4, 0.6),
g_range=utils.BoxRegion(2.5, 4.0),
l_range=utils.BoxRegion(0.75, 1.0),
radius_range=utils.BoxRegion(1.0, 2.5),
num_colors=6,
)
)
DOUBLE_PENDULUM_COLORS_FRICTION = (
ideal_double_pendulum.IdealDoublePendulum,
lambda: dict( # pylint:disable=g-long-lambda
m_range=utils.BoxRegion(0.4, 0.6),
g_range=utils.BoxRegion(2.5, 4.0),
l_range=utils.BoxRegion(0.75, 1.0),
radius_range=utils.BoxRegion(1.0, 2.5),
num_colors=6,
friction=0.05
),
)
TWO_BODY = (
n_body.TwoBodySystem,
lambda: dict( # pylint:disable=g-long-lambda
m_range=utils.BoxRegion(1.0, 1.0),
g_range=utils.BoxRegion(1.0, 1.0),
radius_range=utils.BoxRegion(0.5, 1.5),
provided_canvas_bounds=utils.BoxRegion(-2.75, 2.75),
randomize_canvas_location=False,
num_colors=2,
)
)
TWO_BODY_COLORS = (
n_body.TwoBodySystem,
lambda: dict( # pylint:disable=g-long-lambda
m_range=utils.BoxRegion(0.5, 1.5),
g_range=utils.BoxRegion(0.5, 1.5),
radius_range=utils.BoxRegion(0.5, 1.5),
provided_canvas_bounds=utils.BoxRegion(-5.0, 5.0),
randomize_canvas_location=False,
num_colors=6,
)
)
def no_open_spiel_func(*_, **__):
raise ValueError("You must download and install `open_spiel` first in "
"order to use the game_dynamics datasets. See "
"https://github.com/deepmind/open_spiel for instructions"
" how to do this.")
if not open_spiel_available():
MATCHING_PENNIES = (no_open_spiel_func, dict)
ROCK_PAPER_SCISSORS = (no_open_spiel_func, dict)
else:
MATCHING_PENNIES = (game_dynamics.ZeroSumGame,
lambda: dict(game_name="matrix_mp"))
ROCK_PAPER_SCISSORS = (game_dynamics.ZeroSumGame,
lambda: dict(game_name="matrix_rps"))
| dm_hamiltonian_dynamics_suite-master | dm_hamiltonian_dynamics_suite/datasets.py |
# Copyright 2020 DeepMind Technologies Limited.
#
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
| dm_hamiltonian_dynamics_suite-master | dm_hamiltonian_dynamics_suite/__init__.py |
# Copyright 2020 DeepMind Technologies Limited.
#
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A module for loading the Hamiltonian datasets."""
import functools
import os
from typing import Optional, Mapping, Any, Tuple, Sequence, Callable, Union, TypeVar
import jax
import tensorflow as tf
import tensorflow_datasets as tfds
T = TypeVar("T")
def filter_based_on_keys(
example: Mapping[str, T],
keys_to_preserve: Sequence[str],
single_key_return_array: bool = False
) -> Union[T, Mapping[str, T]]:
"""Filters the contents of the mapping, to return only the keys given in `keys_to_preserve`."""
if not keys_to_preserve:
raise ValueError("You must provide at least one key to preserve.")
if len(keys_to_preserve) == 1 and single_key_return_array:
return example[keys_to_preserve[0]]
elif single_key_return_array:
raise ValueError(f"You have provided {len(keys_to_preserve)}>1 keys to "
f"preserve and have also set "
f"single_key_return_array=True.")
return {k: example[k] for k in keys_to_preserve}
def preprocess_batch(
batch: Mapping[str, Any],
num_local_devices: int,
multi_device: bool,
sub_sample_length: Optional[int],
dtype: str = "float32"
) -> Mapping[str, Any]:
"""Function to preprocess the data for a batch.
This performs two functions:
1.If 'sub_sample_length' is not None, it randomly subsamples every example
along the second (time) axis to return an array of the requested length.
Note that this assumes that all arrays have the same length.
2. Converts all arrays to the provided data type using
`tf.image.convert_image_dtype`.
3. Reshapes the array based on the number of local devices.
Args:
batch: Dictionary with the full batch.
num_local_devices: The number of local devices.
multi_device: Whether to prepare the batch for multi device training.
sub_sample_length: The sub-sampling length requested.
dtype: String for the dtype, must be a floating-point type.
Returns:
The preprocessed batch.
"""
if dtype not in ("float32", "float64"):
raise ValueError("The provided dtype must be a floating point dtype.")
tensor = batch.get("image", batch.get("x", None))
if tensor is None:
raise ValueError("We need either the key 'image' or 'x' to be present in "
"the batch provided.")
if not isinstance(tensor, tf.Tensor):
raise ValueError(f"Expecting the value for key 'image' or 'x' to be a "
f"tf.Tensor, instead got {type(tensor)}.")
# `n` here represents the batch size
n = tensor.shape[0] or tf.shape(tensor)[0]
# `t` here represents number of time steps in the batch
t = tensor.shape[1]
if sub_sample_length is not None:
# Sample random index for each example in the batch
limit = t - sub_sample_length + 1
start = tf.random.uniform(shape=[n], maxval=limit, dtype="int32")
indices = tf.range(sub_sample_length)[None, :, None] + start[:, None, None]
def index(x):
"""Indexes every array in the batch according to the sampled indices and length, if its second dimensions is equal to `t`."""
if x.shape.rank > 1 and x.shape[1] == t:
if isinstance(n, tf.Tensor):
shape = [None, sub_sample_length] + list(x.shape[2:])
else:
shape = [n, sub_sample_length] + list(x.shape[2:])
x = tf.gather_nd(x, indices, batch_dims=1)
x.set_shape(shape)
return x
batch = jax.tree_map(index, batch)
def convert_fn(x):
"""Converts the value of `x` to the provided precision dtype.
Integer valued arrays, with data type different from int32 and int64 are
assumed to represent compressed images and are converted via
`tf.image.convert_image_dtype`. For any other data types (float or int)
their type is preserved, but their precision is changed based on the
target `dtype`. For instance, if `dtype=float32` the float64 variables are
converted to float32 and int64 values are converted to int32.
Args:
x: The input array.
Returns:
The converted output array.
"""
if x.dtype == tf.int64:
return tf.cast(x, "int32") if dtype == "float32" else x
elif x.dtype == tf.int32:
return tf.cast(x, "int64") if dtype == "float64" else x
elif x.dtype == tf.float64 or x.dtype == tf.float32:
return tf.cast(x, dtype=dtype)
else:
return tf.image.convert_image_dtype(x, dtype=dtype)
batch = jax.tree_map(convert_fn, batch)
if not multi_device:
return batch
def reshape_for_jax_pmap(x):
"""Reshapes values such that their leading dimension is the number of local devices."""
return tf.reshape(x, [num_local_devices, -1] + x.shape[1:].as_list())
return jax.tree_map(reshape_for_jax_pmap, batch)
def load_filenames_and_parse_fn(
path: str,
tfrecord_prefix: str
) -> Tuple[Tuple[str], Callable[[str], Mapping[str, Any]]]:
"""Returns the file names and read_fn based on the number of shards."""
file_name = os.path.join(path, f"{tfrecord_prefix}.tfrecord")
if not os.path.exists(file_name):
raise ValueError(f"The dataset file {file_name} does not exist.")
features_file = os.path.join(path, "features.txt")
if not os.path.exists(features_file):
raise ValueError(f"The dataset features file {features_file} does not "
f"exist.")
with open(features_file, "r") as f:
dtype_dict = dict()
shapes_dict = dict()
parsing_description = dict()
for line in f:
key = line.split(", ")[0]
shape_string = line.split("(")[1].split(")")[0]
shapes_dict[key] = tuple(int(s) for s in shape_string.split(",") if s)
dtype_dict[key] = line.split(", ")[-1][:-1]
if dtype_dict[key] == "uint8":
parsing_description[key] = tf.io.FixedLenFeature([], tf.string)
elif dtype_dict[key] in ("float32", "float64"):
parsing_description[key] = tf.io.VarLenFeature(tf.int64)
else:
parsing_description[key] = tf.io.VarLenFeature(dtype_dict[key])
def parse_fn(example_proto: str) -> Mapping[str, Any]:
raw = tf.io.parse_single_example(example_proto, parsing_description)
parsed = dict()
for name, dtype in dtype_dict.items():
value = raw[name]
if dtype == "uint8":
value = tf.image.decode_png(value)
else:
value = tf.sparse.to_dense(value)
if dtype in ("float32", "float64"):
value = tf.bitcast(value, type=dtype)
value = tf.reshape(value, shapes_dict[name])
if "/" in name:
k1, k2 = name.split("/")
if k1 not in parsed:
parsed[k1] = dict()
parsed[k1][k2] = value
else:
parsed[name] = value
return parsed
return (file_name,), parse_fn
def load_parsed_dataset(
path: str,
tfrecord_prefix: str,
num_shards: int,
shard_index: Optional[int] = None,
keys_to_preserve: Optional[Sequence[str]] = None
) -> tf.data.Dataset:
"""Loads a dataset and shards it based on jax devices."""
shard_index = shard_index or jax.process_index()
file_names, parse_fn = load_filenames_and_parse_fn(
path=path,
tfrecord_prefix=tfrecord_prefix,
)
ds = tf.data.TFRecordDataset(file_names)
threads = max(1, os.cpu_count() - 4)
options = tf.data.Options()
options.threading.private_threadpool_size = threads
options.threading.max_intra_op_parallelism = 1
ds = ds.with_options(options)
# Shard if we don't shard by files
if num_shards != 1:
ds = ds.shard(num_shards, shard_index)
# Parse the examples one by one
if keys_to_preserve is not None:
# Optionally also filter them based on the keys provided
def parse_filter(example_proto):
example = parse_fn(example_proto)
return filter_based_on_keys(example, keys_to_preserve=keys_to_preserve)
ds = ds.map(parse_filter, num_parallel_calls=tf.data.experimental.AUTOTUNE)
else:
ds = ds.map(parse_fn, num_parallel_calls=tf.data.experimental.AUTOTUNE)
return ds
def load_dataset(
path: str,
tfrecord_prefix: str,
sub_sample_length: Optional[int],
per_device_batch_size: int,
num_epochs: Optional[int],
drop_remainder: bool,
multi_device: bool = False,
num_shards: int = 1,
shard_index: Optional[int] = None,
keys_to_preserve: Optional[Sequence[str]] = None,
shuffle: bool = False,
cache: bool = True,
shuffle_buffer: Optional[int] = 10000,
dtype: str = "float32",
seed: Optional[int] = None
) -> tf.data.Dataset:
"""Creates a tensorflow.Dataset pipeline from an TFRecord dataset.
Args:
path: The path to the dataset.
tfrecord_prefix: The dataset prefix.
sub_sample_length: The length of the sequences that will be returned.
If this is `None` the dataset will return full length sequences.
If this is an `int` it will subsample each sequence uniformly at random
for a sequence of the provided size. Note that all examples in the dataset
must be at least this long, otherwise the tensorflow code might crash.
per_device_batch_size: The batch size to use on a single device. The actual
batch size is this multiplied by the number of devices.
num_epochs: The number of times to repeat the full dataset.
drop_remainder: If the number of examples in the dataset are not divisible
evenly by the batch size, whether each epoch to drop the remaining
examples, or to construct a batch with batch size smaller than usual.
multi_device: Whether to load the dataset prepared for multi-device use
(e.g. pmap) with leading dimension equal to the number of local devices.
num_shards: If you want to shard the dataset, you must specify how many
shards you want to use.
shard_index: The shard index for this host. If `None` will use
`jax.process_index()`.
keys_to_preserve: Explicit specification which keys to keep from the dataset
shuffle: Whether to shuffle examples in the dataset.
cache: Whether to use cache in the tf.Dataset.
shuffle_buffer: Size of the shuffling buffer.
dtype: What data type to convert the data to.
seed: Seed to pass to the loader.
Returns:
A tensorflow dataset object.
"""
per_host_batch_size = per_device_batch_size * jax.local_device_count()
# Preprocessing function
batch_fn = functools.partial(
preprocess_batch,
num_local_devices=jax.local_device_count(),
multi_device=multi_device,
sub_sample_length=sub_sample_length,
dtype=dtype)
with tf.name_scope("dataset"):
ds = load_parsed_dataset(
path=path,
tfrecord_prefix=tfrecord_prefix,
num_shards=num_shards,
shard_index=shard_index,
keys_to_preserve=keys_to_preserve,
)
if cache:
ds = ds.cache()
if shuffle:
ds = ds.shuffle(shuffle_buffer, seed=seed)
ds = ds.repeat(num_epochs)
ds = ds.batch(per_host_batch_size, drop_remainder=drop_remainder)
ds = ds.map(batch_fn, num_parallel_calls=tf.data.experimental.AUTOTUNE)
return ds.prefetch(tf.data.experimental.AUTOTUNE)
def dataset_as_iter(dataset_func, *args, **kwargs):
def iterable_func():
yield from tfds.as_numpy(dataset_func(*args, **kwargs))
return iterable_func
| dm_hamiltonian_dynamics_suite-master | dm_hamiltonian_dynamics_suite/load_datasets.py |
# Copyright 2020 DeepMind Technologies Limited.
#
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Module for testing the generation and loading of datasets."""
import os
from absl.testing import absltest
from absl.testing import parameterized
from dm_hamiltonian_dynamics_suite import datasets
from dm_hamiltonian_dynamics_suite import load_datasets
import jax
from jax import numpy as jnp
import tensorflow as tf
DATASETS_TO_TEST = [
"mass_spring",
"pendulum_colors",
"double_pendulum_colors",
"two_body_colors",
]
if datasets.open_spiel_available():
DATASETS_TO_TEST += [
"matching_pennies",
"rock_paper_scissors",
]
class TestToyDataset(parameterized.TestCase):
"""Test class for the functions in `tag_graph_matcher.py`."""
def compare_structures_all_the_same(self, example, batched_example):
"""Compares that the two examples are identical in structure and value."""
self.assertEqual(
jax.tree_structure(example),
jax.tree_structure(batched_example),
"Structures should be the same."
)
# The real example image is not converted however
example["image"] = tf.image.convert_image_dtype(
example["image"], dtype=batched_example["image"].dtype).numpy()
for v1, v2 in zip(jax.tree_leaves(example),
jax.tree_leaves(batched_example)):
self.assertEqual(v1.dtype, v2.dtype, "Dtypes should be the same.")
self.assertEqual((1,) + v1.shape, v2.shape, "Shapes should be the same.")
self.assertTrue(jnp.allclose(v1, v2[0]), "Values should be the same.")
@parameterized.parameters(DATASETS_TO_TEST)
def test_dataset(
self,
dataset,
folder: str = "/tmp/dm_hamiltonian_dynamics_suite/tests/",
dt: float = 0.1,
num_steps: int = 100,
steps_per_dt: int = 10,
num_train: int = 10,
num_test: int = 10,
):
"""Checks that the dataset generation and loading are working correctly."""
# Generate the dataset
train_examples, test_examples = datasets.generate_full_dataset(
folder=folder,
dataset=dataset,
dt=dt,
num_steps=num_steps,
steps_per_dt=steps_per_dt,
num_train=num_train,
num_test=num_test,
overwrite=True,
return_generated_examples=True,
)
# Load train dataset
dataset_path = dataset.lower() + "_dt_" + str(dt).replace(".", "_")
ds = load_datasets.dataset_as_iter(
load_datasets.load_dataset,
path=os.path.join(folder, dataset_path),
tfrecord_prefix="train",
sub_sample_length=None,
per_device_batch_size=1,
num_epochs=1,
drop_remainder=False,
dtype="float64"
)
examples = tuple(x for x in ds())
self.assertEqual(
len(train_examples), len(examples),
"Number of training examples not the same."
)
# Compare individual examples
for example_1, example_2 in zip(train_examples, examples):
self.compare_structures_all_the_same(example_1, example_2)
# Load test dataset
ds = load_datasets.dataset_as_iter(
load_datasets.load_dataset,
path=os.path.join(folder, dataset_path),
tfrecord_prefix="test",
sub_sample_length=None,
per_device_batch_size=1,
num_epochs=1,
drop_remainder=False,
dtype="float64"
)
examples = tuple(x for x in ds())
self.assertEqual(
len(test_examples), len(examples),
"Number of test examples not the same."
)
# Compare individual examples
for example_1, example_2 in zip(test_examples, examples):
self.compare_structures_all_the_same(example_1, example_2)
if __name__ == "__main__":
jax.config.update("jax_enable_x64", True)
absltest.main()
| dm_hamiltonian_dynamics_suite-master | dm_hamiltonian_dynamics_suite/tests/test_datasets.py |
# Copyright 2020 DeepMind Technologies Limited.
#
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Module for the PhaseSpace class."""
import functools
from typing import Callable, Type, Union
import jax
from jax import numpy as jnp
from jax import tree_util
class PhaseSpace(object):
"""Holds a pair of position and momentum for a Hamiltonian System."""
def __init__(self, position: jnp.ndarray, momentum: jnp.ndarray):
self._position = position
self._momentum = momentum
@property
def position(self) -> jnp.ndarray:
"""The position element of the phase space."""
return self._position
@property
def momentum(self) -> jnp.ndarray:
"""The momentum element of the phase space."""
return self._momentum
@property
def q(self) -> jnp.ndarray:
"""A shorthand for the position element of the phase space."""
return self._position
@property
def p(self) -> jnp.ndarray:
"""A shorthand for the momentum element of the phase space."""
return self._momentum
@property
def single_state(self) -> jnp.ndarray:
"""Returns the concatenation of position and momentum."""
return jnp.concatenate([self.q, self.p], axis=-1)
@property
def ndim(self) -> int:
"""Returns the number of dimensions of the position array."""
return self.q.ndim
@classmethod
def from_state(cls: Type["PhaseSpace"], state: jnp.ndarray) -> "PhaseSpace":
q, p = jnp.split(state, 2, axis=-1)
return cls(position=q, momentum=p)
def __str__(self) -> str:
return f"{type(self).__name__}(q={self.position}, p={self.momentum})"
def __repr__(self) -> str:
return self.__str__()
class TangentPhaseSpace(PhaseSpace):
"""Represents the tangent space to PhaseSpace."""
def __add__(
self,
other: Union[PhaseSpace, "TangentPhaseSpace"],
) -> Union[PhaseSpace, "TangentPhaseSpace"]:
if isinstance(other, TangentPhaseSpace):
return TangentPhaseSpace(position=self.q + other.q,
momentum=self.p + other.p)
elif isinstance(other, PhaseSpace):
return PhaseSpace(position=self.q + other.q,
momentum=self.p + other.p)
else:
raise ValueError(f"Can not add TangentPhaseSpace and {type(other)}.")
def __radd__(
self,
other: Union[PhaseSpace, "TangentPhaseSpace"]
) -> Union[PhaseSpace, "TangentPhaseSpace"]:
return self.__add__(other)
def __mul__(self, other: jnp.ndarray) -> "TangentPhaseSpace":
return TangentPhaseSpace(position=self.q * other,
momentum=self.p * other)
def __rmul__(self, other):
return self.__mul__(other)
@classmethod
def zero(cls: Type["TangentPhaseSpace"]) -> "TangentPhaseSpace":
return cls(position=jnp.asarray(0.0), momentum=jnp.asarray(0.0))
HamiltonianFunction = Callable[
[
jnp.ndarray, # t
PhaseSpace, # y
],
jnp.ndarray # H(t, y)
]
SymplecticTangentFunction = Callable[
[
jnp.ndarray, # t
PhaseSpace # (q, p)
],
TangentPhaseSpace # (dH_dp, - dH_dq)
]
SymplecticTangentFunctionArray = Callable[
[
jnp.ndarray, # t
jnp.ndarray # (q, p)
],
jnp.ndarray # (dH_dp, - dH_dq)
]
def poisson_bracket_with_q_and_p(
f: HamiltonianFunction
) -> SymplecticTangentFunction:
"""Returns a function that computes the Poisson brackets {q,f} and {p,f}."""
def bracket(t: jnp.ndarray, y: PhaseSpace) -> TangentPhaseSpace:
# Use the summation trick for getting gradient
# Note that the first argument to the hamiltonian is t
grad = jax.grad(lambda *args: jnp.sum(f(*args)), argnums=1)(t, y)
return TangentPhaseSpace(position=grad.p, momentum=-grad.q)
return bracket
def transform_symplectic_tangent_function_using_array(
func: SymplecticTangentFunction
) -> SymplecticTangentFunctionArray:
@functools.wraps(func)
def wrapped(t: jnp.ndarray, state: jnp.ndarray) -> jnp.ndarray:
return func(t, PhaseSpace.from_state(state)).single_state
return wrapped
tree_util.register_pytree_node(
nodetype=PhaseSpace,
flatten_func=lambda y: ((y.q, y.p), None),
unflatten_func=lambda _, q_and_p: PhaseSpace(*q_and_p)
)
tree_util.register_pytree_node(
nodetype=TangentPhaseSpace,
flatten_func=lambda y: ((y.q, y.p), None),
unflatten_func=lambda _, q_and_p: TangentPhaseSpace(*q_and_p)
)
| dm_hamiltonian_dynamics_suite-master | dm_hamiltonian_dynamics_suite/hamiltonian_systems/phase_space.py |
# Copyright 2020 DeepMind Technologies Limited.
#
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
| dm_hamiltonian_dynamics_suite-master | dm_hamiltonian_dynamics_suite/hamiltonian_systems/__init__.py |
# Copyright 2020 DeepMind Technologies Limited.
#
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""N body."""
import abc
import functools
from typing import Any, Optional, Tuple
from dm_hamiltonian_dynamics_suite.hamiltonian_systems import hamiltonian
from dm_hamiltonian_dynamics_suite.hamiltonian_systems import phase_space
from dm_hamiltonian_dynamics_suite.hamiltonian_systems import utils
import jax
import jax.numpy as jnp
class NBodySystem(hamiltonian.TimeIndependentHamiltonianSystem):
"""An N-body system abstract class.
Parameters:
m_range - possible range of the particle mass
g_range - possible range of the gravitational force
provided_canvas_bounds - The canvas bounds for the given ranges
The Hamiltonian is:
- sum_i<j (g * m_i * m_j) / ||q_i-q_j|| + sum_i ||p_i||^2 / (2 * m_i)
Initial state parameters:
radius_range - The initial state is sampled from a disk in phase space with
radius in this range.
uniform_annulus - Whether to sample uniformly on the disk or uniformly the
radius.
randomize_canvas_location - Whether to randomize th vertical position of the
particle when rendering.
"""
def __init__(
self,
n: int,
space_dims: int,
m_range: utils.BoxRegion,
g_range: utils.BoxRegion,
provided_canvas_bounds: utils.BoxRegion,
**kwargs):
super(NBodySystem, self).__init__(
system_dims=n * space_dims,
**kwargs,
)
self.n = n
self.space_dims = space_dims
self.m_range = m_range
self.g_range = g_range
self.provided_canvas_bounds = provided_canvas_bounds
render = functools.partial(utils.render_particles_trajectory,
canvas_limits=self.full_canvas_bounds(),
resolution=self.resolution,
num_colors=self.num_colors)
self._batch_render = jax.vmap(render)
def _hamiltonian(
self,
y: phase_space.PhaseSpace,
params: utils.Params,
**kwargs: Any
) -> jnp.ndarray:
assert len(params) == 2
m = params["m"]
g = params["g"]
q = y.q.reshape([-1, self.n, self.space_dims])
p = y.p.reshape([-1, self.n, self.space_dims])
q_ij = jnp.matmul(q, jnp.swapaxes(q, axis1=-1, axis2=-2))
q_ii = jnp.diagonal(q_ij, axis1=-1, axis2=-2)
q_ij_norms_2 = q_ii[:, None, :] + q_ii[:, :, None] - 2.0 * q_ij
# Adding identity so that on the diagonal the norms are not 0
q_ij_norms = jnp.sqrt(q_ij_norms_2 + jnp.identity(self.n))
masses_ij = m[:, None, :] * m[:, :, None]
# Remove masses in the diagonal so that those potentials are 0
masses_ij = masses_ij - masses_ij * jnp.identity(self.n)[None]
# Compute pairwise interactions
products = g[:, None, None] * masses_ij / q_ij_norms
# Note that here we are summing both i->j and j->i hence the division by 2
potential = - products.sum(axis=(-2, -1)) / 2
kinetic = jnp.sum(p ** 2, axis=-1) / (2.0 * m)
kinetic = kinetic.sum(axis=-1)
return potential + kinetic
@abc.abstractmethod
def sample_y(
self,
num_samples: int,
params: utils.Params,
rng_key: jnp.ndarray,
**kwargs: Any
) -> phase_space.PhaseSpace:
pass
def sample_params(
self,
num_samples: int,
rng_key: jnp.ndarray,
**kwargs: Any
) -> utils.Params:
key1, key2 = jax.random.split(rng_key)
m = jax.random.uniform(key1, [num_samples, self.n], minval=self.m_range.min,
maxval=self.m_range.max)
g = jax.random.uniform(key2, [num_samples], minval=self.g_range.min,
maxval=self.g_range.max)
return dict(m=m, g=g)
def simulate_analytically(
self,
y0: phase_space.PhaseSpace,
t0: utils.FloatArray,
t_eval: jnp.ndarray,
params: utils.Params,
**kwargs: Any
) -> Optional[phase_space.PhaseSpace]:
return None
def canvas_bounds(self) -> utils.BoxRegion:
return self.provided_canvas_bounds
def canvas_position(
self,
position: jnp.ndarray,
params: utils.Params
) -> jnp.ndarray:
return position
def render_trajectories(
self,
position: jnp.ndarray,
params: utils.Params,
rng_key: jnp.ndarray,
**kwargs: Any
) -> Tuple[jnp.ndarray, utils.Params]:
n, _, d = position.shape
assert d == self.system_dims
assert len(params) == 2
key1, key2 = jax.random.split(rng_key, 2)
m = utils.expand_to_rank_right(params["m"], 2)
particles = position.reshape([n, -1, self.n, self.space_dims])
if self.randomize_canvas_location:
offset = jax.random.uniform(key1, shape=[n, self.space_dims])
offset = self.random_offset_bounds().convert_from_unit_interval(offset)
else:
offset = jnp.zeros(shape=[n, self.space_dims])
particles = particles + offset[:, None, None, :]
particles_radius = jnp.sqrt(m / jnp.pi)
if self.num_colors == 1:
color_index = jnp.zeros([n, self.n]).astype("int64")
else:
if self.num_colors < self.n:
raise ValueError("The number of colors must be at least the number of "
"objects or 1.")
color_index = utils.random_int_k_from_n(
key2,
num_samples=n,
n=self.num_colors,
k=self.n
)
images = self._batch_render(particles, particles_radius, color_index)
return images, dict(offset=offset, color_index=color_index)
class TwoBodySystem(NBodySystem):
"""N-body system with N = 2."""
def __init__(
self,
m_range: utils.BoxRegion,
g_range: utils.BoxRegion,
radius_range: utils.BoxRegion,
provided_canvas_bounds: utils.BoxRegion,
**kwargs):
self.radius_range = radius_range
super().__init__(n=2, space_dims=2,
m_range=m_range,
g_range=g_range,
provided_canvas_bounds=provided_canvas_bounds,
**kwargs)
def sample_y(
self,
num_samples: int,
params: utils.Params,
rng_key: jnp.ndarray,
**kwargs: Any
) -> phase_space.PhaseSpace:
pos = jax.random.uniform(rng_key, [num_samples, self.n])
pos = self.radius_range.convert_from_unit_interval(pos)
r = jnp.sqrt(jnp.sum(pos ** 2, axis=-1))
vel = jnp.flip(pos, axis=-1) / (2 * r[..., None] ** 1.5)
vel = vel * jnp.asarray([1.0, -1.0]).reshape([1, 2])
pos = jnp.repeat(pos.reshape([num_samples, 1, -1]), repeats=self.n, axis=1)
vel = jnp.repeat(vel.reshape([num_samples, 1, -1]), repeats=self.n, axis=1)
pos = pos * jnp.asarray([1.0, -1.0]).reshape([1, 2, 1])
vel = vel * jnp.asarray([1.0, -1.0]).reshape([1, 2, 1])
pos = pos.reshape([num_samples, -1])
vel = vel.reshape([num_samples, -1])
return phase_space.PhaseSpace(position=pos, momentum=vel)
class ThreeBody2DSystem(NBodySystem):
"""N-body system with N = 3 in two dimensions."""
def __init__(
self,
m_range: utils.BoxRegion,
g_range: utils.BoxRegion,
radius_range: utils.BoxRegion,
provided_canvas_bounds: utils.BoxRegion,
**kwargs):
self.radius_range = radius_range
super().__init__(n=3, space_dims=2,
m_range=m_range,
g_range=g_range,
provided_canvas_bounds=provided_canvas_bounds,
**kwargs)
def sample_y(
self,
num_samples: int,
params: utils.Params,
rng_key: jnp.ndarray,
**kwargs: Any
) -> phase_space.PhaseSpace:
theta = 2 * jnp.pi / 3
rot = jnp.asarray([[jnp.cos(theta), - jnp.sin(theta)],
[jnp.sin(theta), jnp.cos(theta)]])
p1 = 2 * jax.random.uniform(rng_key, [num_samples, 2]) - 1.0
r = jax.random.uniform(rng_key, [num_samples])
r = self.radius_range.convert_from_unit_interval(r)
p1 *= (r / jnp.linalg.norm(p1, axis=-1))[:, None]
p2 = jnp.matmul(p1, rot.T)
p3 = jnp.matmul(p2, rot.T)
p = jnp.concatenate([p1, p2, p3], axis=-1)
# scale factor to get circular trajectories
factor = jnp.sqrt(jnp.sin(jnp.pi / 3)/(2 * jnp.cos(jnp.pi / 6) **2))
# velocity that yields a circular orbit
v1 = jnp.flip(p1, axis=-1) * factor / r[:, None]**1.5
v2 = jnp.matmul(v1, rot.T)
v3 = jnp.matmul(v2, rot.T)
v = jnp.concatenate([v1, v2, v3], axis=-1)
return phase_space.PhaseSpace(position=p, momentum=v)
| dm_hamiltonian_dynamics_suite-master | dm_hamiltonian_dynamics_suite/hamiltonian_systems/n_body.py |
# Copyright 2020 DeepMind Technologies Limited.
#
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Module with various utilities not central to the code."""
from typing import Dict, Optional, Tuple, Union
import jax
from jax import lax
import jax.numpy as jnp
import jax.random as jnr
import numpy as np
FloatArray = Union[float, jnp.ndarray]
Params = Dict[str, jnp.ndarray]
class BoxRegion:
"""A class for bounds, especially used for sampling."""
def __init__(self, minimum: FloatArray, maximum: FloatArray):
minimum = jnp.asarray(minimum)
maximum = jnp.asarray(maximum)
if minimum.shape != maximum.shape:
raise ValueError(f"The passed values for minimum and maximum should have "
f"the same shape, but had shapes: {minimum.shape} "
f"and {maximum.shape}.")
self._minimum = minimum
self._maximum = maximum
@property
def min(self) -> jnp.ndarray:
return self._minimum
@property
def max(self) -> jnp.ndarray:
return self._maximum
@property
def size(self) -> jnp.ndarray:
return self.max - self.min
@property
def dims(self) -> int:
if self.min.ndim != 0:
return self.min.shape[-1]
return 0
def convert_to_unit_interval(self, value: jnp.ndarray) -> jnp.ndarray:
return (value - self.min) / self.size
def convert_from_unit_interval(self, value: jnp.ndarray) -> jnp.ndarray:
return value * self.size + self.min
def __str__(self) -> str:
return f"{type(self).__name__}(min={self.min}, max={self.max})"
def __repr__(self) -> str:
return self.__str__()
def expand_to_rank_right(x: jnp.ndarray, rank: int) -> jnp.ndarray:
if x.ndim == rank:
return x
assert x.ndim < rank
new_shape = x.shape + (1,) * (rank - x.ndim)
return x.reshape(new_shape)
def expand_to_rank_left(x: jnp.ndarray, rank: int) -> int:
if x.ndim == rank:
return x
assert x.ndim < rank
new_shape = (1,) * (rank - x.ndim) + x.shape
return x.reshape(new_shape)
def vecmul(matrix: jnp.ndarray, vector: jnp.ndarray) -> jnp.ndarray:
return jnp.matmul(matrix, vector[..., None])[..., 0]
def dt_to_t_eval(t0: FloatArray, dt: FloatArray, num_steps: int) -> jnp.ndarray:
if (isinstance(t0, (float, np.ndarray)) and
isinstance(dt, (float, np.ndarray))):
dt = np.asarray(dt)[None]
shape = [num_steps] + [1] * (dt.ndim - 1)
return t0 + dt * np.arange(1, num_steps + 1).reshape(shape)
else:
return t0 + dt * jnp.arange(1, num_steps + 1)
def t_eval_to_dt(t0: FloatArray, t_eval: FloatArray) -> jnp.ndarray:
t = jnp.ones_like(t_eval[:1]) * t0
t = jnp.concatenate([t, t_eval], axis=0)
return t[1:] - t[:-1]
def simple_loop(
f,
x0: jnp.ndarray,
t_args: Optional[jnp.ndarray] = None,
num_steps: Optional[int] = None,
use_scan: bool = True
) -> Tuple[jnp.ndarray, jnp.ndarray]:
"""Runs a simple loop that outputs the evolved variable at every time step."""
if t_args is None and num_steps is None:
raise ValueError("Exactly one of `t_args` and `num_steps` should be "
"provided.")
if t_args is not None and num_steps is not None:
raise ValueError("Exactly one of `t_args` and `num_steps` should be "
"provided.")
def step(x_t, t_arg):
x_next = f(x_t) if t_arg is None else f(x_t, t_arg)
return x_next, x_next
if use_scan:
return lax.scan(step, init=x0, xs=t_args, length=num_steps)[1]
y = []
x = x0
num_steps = t_args.shape[0] if t_args is not None else num_steps
t_args = [None] * num_steps if t_args is None else t_args
for i in range(num_steps):
x, _ = step(x, t_args[i])
y.append(x)
return jax.tree_multimap(lambda *args: jnp.stack(args, axis=0), *y)
def hsv2rgb(array: jnp.ndarray) -> jnp.ndarray:
"""Converts an HSV float array image to RGB."""
hi = jnp.floor(array[..., 0] * 6)
f = array[..., 0] * 6 - hi
p = array[..., 2] * (1 - array[..., 1])
q = array[..., 2] * (1 - f * array[..., 1])
t = array[..., 2] * (1 - (1 - f) * array[..., 1])
v = array[..., 2]
hi = jnp.stack([hi, hi, hi], axis=-1).astype(jnp.uint8) % 6
hi_is_0 = hi == 0
hi_is_1 = hi == 1
hi_is_2 = hi == 2
hi_is_3 = hi == 3
hi_is_4 = hi == 4
hi_is_5 = hi == 5
out = (hi_is_0 * jnp.stack((v, t, p), axis=-1) +
hi_is_1 * jnp.stack((q, v, p), axis=-1) +
hi_is_2 * jnp.stack((p, v, t), axis=-1) +
hi_is_3 * jnp.stack((p, q, v), axis=-1) +
hi_is_4 * jnp.stack((t, p, v), axis=-1) +
hi_is_5 * jnp.stack((v, p, q), axis=-1))
return out
def render_particles_trajectory(
particles: jnp.ndarray,
particles_radius: FloatArray,
color_indices: FloatArray,
canvas_limits: BoxRegion,
resolution: int,
num_colors: int,
background_color: Tuple[float, float, float] = (0.321, 0.349, 0.368),
temperature: FloatArray = 80.0):
"""Renders n particles in different colors for a full trajectory.
NB: The default background color is not black as we have experienced issues
when training models with black background.
Args:
particles: Array of size (t, n, 2)
The last 2 dimensions define the x, y coordinates of each particle.
particles_radius: Array of size (n,) or a single value.
Defines the radius of each particle.
color_indices: Array of size (n,) or a single value.
Defines the color of each particle.
canvas_limits: List of 2 lists or Array of size (2, 2)
First row defines the limit over x and second over y.
resolution: int
The resolution of the produced images.
num_colors: int
The number of possible colors to use.
background_color: List or Array of size (3)
The color for the background. Default to black.
temperature: float
The temperature of the sigmoid distance metric used to the center of the
particles.
Returns:
An array of size (t, resolution, resolution, 3) with the produced images.
"""
particles = jnp.asarray(particles)
assert particles.ndim == 3
assert particles.shape[-1] == 2
t, n = particles.shape[:2]
particles_radius = jnp.asarray(particles_radius)
if particles_radius.ndim == 0:
particles_radius = jnp.full([n], particles_radius)
assert particles_radius.shape == (n,)
color_indices = jnp.asarray(color_indices)
if color_indices.ndim == 0:
color_indices = jnp.full([n], color_indices)
assert color_indices.shape == (n,), f"Colors shape: {color_indices.shape}"
background_color = jnp.asarray(background_color)
assert background_color.shape == (3,)
particles = canvas_limits.convert_to_unit_interval(particles)
canvas_size = canvas_limits.max - canvas_limits.min
canvas_size = canvas_size[0] if canvas_size.ndim == 1 else canvas_size
particles_radius = particles_radius / canvas_size
images = jnp.ones([t, resolution, resolution, 3]) * background_color
hues = jnp.linspace(0, 1, num=num_colors, endpoint=False)
colors = hues[color_indices][None, :, None, None]
s_channel = jnp.ones((t, n, resolution, resolution))
v_channel = jnp.ones((t, n, resolution, resolution))
h_channel = jnp.ones((t, n, resolution, resolution)) * colors
hsv_imgs = jnp.stack((h_channel, s_channel, v_channel), axis=-1)
rgb_imgs = hsv2rgb(hsv_imgs)
images = [img[:, 0] for img in jnp.split(rgb_imgs, n, axis=1)] + [images]
grid = jnp.linspace(0.0, 1.0, resolution)
dx, dy = jnp.meshgrid(grid, grid)
dx, dy = dx[None, None], dy[None, None]
x, y = particles[..., 0][..., None, None], particles[..., 1][..., None, None]
d = jnp.sqrt((x - dx) ** 2 + (y - dy) ** 2)
particles_radius = particles_radius[..., None, None]
mask = 1.0 / (1.0 + jnp.exp((d - particles_radius) * temperature))
masks = ([m[:, 0, ..., None] for m in jnp.split(mask, n, axis=1)] +
[jnp.ones_like(images[0])])
final_image = jnp.zeros([t, resolution, resolution, 3])
c = jnp.ones_like(images[0])
for img, m in zip(images, masks):
final_image = final_image + c * m * img
c = c * (1 - m)
return final_image
def uniform_annulus(
key: jnp.ndarray,
num_samples: int,
dim_samples: int,
radius_range: BoxRegion,
uniform: bool
) -> jnp.ndarray:
"""Samples points uniformly in the annulus defined by radius range."""
key1, key2 = jnr.split(key)
direction = jnr.normal(key1, [num_samples, dim_samples])
norms = jnp.linalg.norm(direction, axis=-1, keepdims=True)
direction = direction / norms
# Sample a radius uniformly between [min_radius, max_radius]
r = jnr.uniform(key2, [num_samples])
if uniform:
radius_range = BoxRegion(radius_range.min ** 2, radius_range.max ** 2)
r = jnp.sqrt(radius_range.convert_from_unit_interval(r))
else:
r = radius_range.convert_from_unit_interval(r)
return direction * r[:, None]
multi_shuffle = jax.vmap(lambda x, key, k: jnr.permutation(key, x)[:k],
in_axes=(0, 0, None), out_axes=0)
def random_int_k_from_n(
rng: jnp.ndarray,
num_samples: int,
n: int,
k: int
) -> jnp.ndarray:
"""Samples randomly k integers from 1 to n."""
if k > n:
raise ValueError(f"k should be less than or equal to n, but got k={k} and "
f"n={n}.")
x = jnp.repeat(jnp.arange(n).reshape([1, n]), num_samples, axis=0)
return multi_shuffle(x, jnr.split(rng, num_samples), k)
| dm_hamiltonian_dynamics_suite-master | dm_hamiltonian_dynamics_suite/hamiltonian_systems/utils.py |
# Copyright 2020 DeepMind Technologies Limited.
#
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Ideal mass spring."""
import functools
from typing import Any, Optional, Tuple
from dm_hamiltonian_dynamics_suite.hamiltonian_systems import hamiltonian
from dm_hamiltonian_dynamics_suite.hamiltonian_systems import phase_space
from dm_hamiltonian_dynamics_suite.hamiltonian_systems import utils
import jax
import jax.numpy as jnp
class IdealMassSpring(hamiltonian.TimeIndependentHamiltonianSystem):
"""An idealized mass-spring system (also known as a harmonica oscillator).
The system is represented in 2 dimensions, but the spring moves only in the
vertical orientation.
Parameters:
k_range - possible range of the spring's force coefficient
m_range - possible range of the particle mass
The Hamiltonian is:
k * q^2 / 2.0 + p^2 / (2 * m)
Initial state parameters:
radius_range - The initial state is sampled from a disk in phase space with
radius in this range.
uniform_annulus - Whether to sample uniformly on the disk or uniformly the
radius.
randomize_x - Whether to randomize the horizontal position of the particle
when rendering.
"""
def __init__(
self,
k_range: utils.BoxRegion,
m_range: utils.BoxRegion,
radius_range: utils.BoxRegion,
uniform_annulus: bool = True,
randomize_x: bool = True,
**kwargs):
super().__init__(system_dims=1, **kwargs)
self.k_range = k_range
self.m_range = m_range
self.radius_range = radius_range
self.uniform_annulus = uniform_annulus
self.randomize_x = randomize_x
render = functools.partial(utils.render_particles_trajectory,
canvas_limits=self.full_canvas_bounds(),
resolution=self.resolution,
num_colors=self.num_colors)
self._batch_render = jax.vmap(render)
def _hamiltonian(
self,
y: phase_space.PhaseSpace,
params: utils.Params,
**kwargs: Any
) -> jnp.ndarray:
assert len(params) == 2
k = params["k"]
m = params["m"]
potential = k * y.q[..., 0] ** 2 / 2
kinetic = y.p[..., 0] ** 2 / (2 * m)
return potential + kinetic
def sample_y(
self,
num_samples: int,
params: utils.Params,
rng_key: jnp.ndarray,
**kwargs: Any
) -> phase_space.PhaseSpace:
assert len(params) == 2
k = params["k"]
m = params["m"]
state = utils.uniform_annulus(
rng_key, num_samples, 2, self.radius_range, self.uniform_annulus)
q = state[..., :1]
p = state[..., 1:] * jnp.sqrt(k * m)
return phase_space.PhaseSpace(position=q, momentum=p)
def sample_params(
self,
num_samples: int,
rng_key: jnp.ndarray,
**kwargs: Any
) -> utils.Params:
key1, key2 = jax.random.split(rng_key)
k = jax.random.uniform(key1, [num_samples], minval=self.k_range.min,
maxval=self.k_range.max)
m = jax.random.uniform(key2, [num_samples], minval=self.m_range.min,
maxval=self.m_range.max)
return dict(k=k, m=m)
def simulate_analytically(
self,
y0: phase_space.PhaseSpace,
t0: utils.FloatArray,
t_eval: jnp.ndarray,
params: utils.Params,
**kwargs: Any
) -> Optional[phase_space.PhaseSpace]:
if self.friction != 0.0:
return None
assert len(params) == 2
k = params["k"]
m = params["m"]
t = t_eval - t0
w = jnp.sqrt(k / m).astype(self.dtype)
a = jnp.sqrt(y0.q[..., 0] ** 2 + y0.p[..., 0] ** 2 / (k * m))
b = jnp.arctan2(- y0.p[..., 0], y0.q[..., 0] * m * w)
w, a, b, m = w[..., None], a[..., None], b[..., None], m[..., None]
t = utils.expand_to_rank_right(t, y0.q.ndim + 1)
q = a * jnp.cos(w * t + b)
p = - a * m * w * jnp.sin(w * t + b)
return phase_space.PhaseSpace(position=q, momentum=p)
def canvas_bounds(self) -> utils.BoxRegion:
max_x = self.radius_range.max
max_r = jnp.sqrt(self.m_range.max / jnp.pi)
return utils.BoxRegion(- max_x - max_r, max_x + max_r)
def canvas_position(
self,
position: jnp.ndarray,
params: utils.Params
) -> jnp.ndarray:
return jnp.stack([jnp.zeros_like(position), position], axis=-1)
def render_trajectories(
self,
position: jnp.ndarray,
params: utils.Params,
rng_key: jnp.ndarray,
**kwargs: Any
) -> Tuple[jnp.ndarray, utils.Params]:
n, _, d = position.shape
assert d == self.system_dims
assert len(params) == 2
key1, key2 = jax.random.split(rng_key)
m = utils.expand_to_rank_right(params["m"], 2)
particles = self.canvas_position(position, params)
if self.randomize_x:
x_offset = jax.random.uniform(key1, shape=[n])
y_offset = jnp.zeros_like(x_offset)
offset = jnp.stack([x_offset, y_offset], axis=-1)
else:
offset = jnp.zeros([n, d])
if self.randomize_canvas_location:
offset_ = jax.random.uniform(key2, shape=[n, d])
offset_ = self.random_offset_bounds().convert_from_unit_interval(offset_)
offset = offset + offset_
particles = particles + offset[:, None, None, :]
particles_radius = jnp.sqrt(m / jnp.pi)
if self.num_colors == 1:
color_index = jnp.zeros([n, 1]).astype("int64")
else:
color_index = jax.random.randint(
key=rng_key, shape=[n, 1], minval=0, maxval=self.num_colors)
images = self._batch_render(particles, particles_radius, color_index)
return images, dict(offset=offset, color_index=color_index)
| dm_hamiltonian_dynamics_suite-master | dm_hamiltonian_dynamics_suite/hamiltonian_systems/ideal_mass_spring.py |
# Copyright 2020 DeepMind Technologies Limited.
#
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Ideal double pendulum."""
import functools
from typing import Any, Optional, Tuple
from dm_hamiltonian_dynamics_suite.hamiltonian_systems import hamiltonian
from dm_hamiltonian_dynamics_suite.hamiltonian_systems import phase_space
from dm_hamiltonian_dynamics_suite.hamiltonian_systems import utils
import jax
import jax.numpy as jnp
class IdealDoublePendulum(hamiltonian.TimeIndependentHamiltonianSystem):
"""An idealized double pendulum system.
Parameters:
m_range - possible range of the particle mass
g_range - possible range of the gravitational force
l_range - possible range of the length of the pendulum
The Hamiltonian is:
H = [m_2 * l_2^2 * p_1^2 + (m_1 + m_2) * l_1^2 * p_2^2 - 2 * m_2 * l_1 *
l_2 * p_1 * p_2 * cos(q_1 - q_2)] /
[2 * m_2 * l_1^2 * l_2^2 * (m_1 + m_2 * sin(q_1 - q_2)^2]
- (m_1 + m_2) * g * l_1 * cos(q_1) - m_2 * g * l_2 * cos(q_2)
See https://iopscience.iop.org/article/10.1088/1742-6596/739/1/012066/meta
Initial state parameters:
radius_range - The initial state is sampled from a disk in phase space with
radius in this range.
uniform_annulus - Whether to sample uniformly on the disk or uniformly the
radius.
randomize_canvas_location - Whether to randomize th vertical position of the
particle when rendering.
"""
def __init__(
self,
m_range: utils.BoxRegion,
g_range: utils.BoxRegion,
l_range: utils.BoxRegion,
radius_range: utils.BoxRegion,
uniform_annulus: bool = True,
**kwargs):
super().__init__(system_dims=2, **kwargs)
self.m_range = m_range
self.g_range = g_range
self.l_range = l_range
self.radius_range = radius_range
self.uniform_annulus = uniform_annulus
render = functools.partial(
utils.render_particles_trajectory,
canvas_limits=self.full_canvas_bounds(),
resolution=self.resolution,
num_colors=self.num_colors)
self._batch_render = jax.vmap(render)
def _hamiltonian(
self,
y: phase_space.PhaseSpace,
params: utils.Params,
**kwargs: Any
) -> jnp.ndarray:
assert len(params) == 5
m_1 = params["m_1"]
l_1 = params["l_1"]
m_2 = params["m_2"]
l_2 = params["l_2"]
g = params["g"]
q_1, q_2 = y.q[..., 0], y.q[..., 1]
p_1, p_2 = y.p[..., 0], y.p[..., 1]
a_1 = m_2 * l_2 ** 2 * p_1 ** 2
a_2 = (m_1 + m_2) * l_1 ** 2 * p_2 ** 2
a_3 = 2 * m_2 * l_1 * l_2 * p_1 * p_2 * jnp.cos(q_1 - q_2)
b_1 = 2 * m_2 * l_1 ** 2 * l_2 ** 2
b_2 = (m_1 + m_2 * jnp.sin(q_1 - q_2) ** 2)
c_1 = (m_1 + m_2) * g * l_1 * jnp.cos(q_1)
c_2 = m_2 * g * l_2 * jnp.cos(q_2)
return (a_1 + a_2 - a_3) / (b_1 * b_2) - c_1 - c_2
def sample_y(
self,
num_samples: int,
params: utils.Params,
rng_key: jnp.ndarray,
**kwargs: Any
) -> phase_space.PhaseSpace:
key1, key2 = jax.random.split(rng_key)
state_1 = utils.uniform_annulus(
key1, num_samples, 2, self.radius_range, self.uniform_annulus)
state_2 = utils.uniform_annulus(
key2, num_samples, 2, self.radius_range, self.uniform_annulus)
state = jnp.stack([state_1[..., 0], state_2[..., 0],
state_1[..., 1], state_2[..., 1]], axis=-1)
return phase_space.PhaseSpace.from_state(state.astype(self.dtype))
def sample_params(
self,
num_samples: int,
rng_key: jnp.ndarray,
**kwargs: Any
) -> utils.Params:
keys = jax.random.split(rng_key, 5)
m_1 = jax.random.uniform(keys[0], [num_samples], minval=self.m_range.min,
maxval=self.m_range.max)
m_2 = jax.random.uniform(keys[1], [num_samples], minval=self.m_range.min,
maxval=self.m_range.max)
l_1 = jax.random.uniform(keys[2], [num_samples], minval=self.l_range.min,
maxval=self.l_range.max)
l_2 = jax.random.uniform(keys[3], [num_samples], minval=self.l_range.min,
maxval=self.l_range.max)
g = jax.random.uniform(keys[4], [num_samples], minval=self.g_range.min,
maxval=self.g_range.max)
return dict(m_1=m_1, m_2=m_2, l_1=l_1, l_2=l_2, g=g)
def simulate_analytically(
self,
y0: phase_space.PhaseSpace,
t0: utils.FloatArray,
t_eval: jnp.ndarray,
params: utils.Params,
**kwargs: Any
) -> Optional[phase_space.PhaseSpace]:
return None
def canvas_bounds(self) -> utils.BoxRegion:
max_d = 2 * self.l_range.max + jnp.sqrt(self.m_range.max / jnp.pi)
return utils.BoxRegion(-max_d, max_d)
def canvas_position(
self,
position: jnp.ndarray,
params: utils.Params
) -> jnp.ndarray:
l_1 = utils.expand_to_rank_right(params["l_1"], 2)
l_2 = utils.expand_to_rank_right(params["l_2"], 2)
y_1 = jnp.sin(position[..., 0] - jnp.pi / 2.0) * l_1
x_1 = jnp.cos(position[..., 0] - jnp.pi / 2.0) * l_1
position_1 = jnp.stack([x_1, y_1], axis=-1)
y_2 = jnp.sin(position[..., 1] - jnp.pi / 2.0) * l_2
x_2 = jnp.cos(position[..., 1] - jnp.pi / 2.0) * l_2
position_2 = jnp.stack([x_2, y_2], axis=-1)
return jnp.stack([position_1, position_2], axis=-2)
def render_trajectories(
self,
position: jnp.ndarray,
params: utils.Params,
rng_key: jnp.ndarray,
**kwargs: Any
) -> Tuple[jnp.ndarray, utils.Params]:
n, _, d = position.shape
assert d == self.system_dims
assert len(params) == 5
key1, key2 = jax.random.split(rng_key, 2)
m_1 = params["m_1"]
m_2 = params["m_2"]
position = self.canvas_position(position, params)
position_1, position_2 = position[..., 0, :], position[..., 1, :]
if self.randomize_canvas_location:
offset = jax.random.uniform(key1, shape=[n, 2])
offset = self.random_offset_bounds().convert_from_unit_interval(offset)
else:
offset = jnp.zeros([n, 2])
position_1 = position_1 + offset[:, None, :]
position_2 = position_1 + position_2
particles = jnp.stack([position_1, position_2], axis=-2)
radius_1 = jnp.sqrt(m_1 / jnp.pi)
radius_2 = jnp.sqrt(m_2 / jnp.pi)
particles_radius = jnp.stack([radius_1, radius_2], axis=-1)
if self.num_colors == 1:
color_index = jnp.zeros([n, 2]).astype("int64")
else:
color_index = utils.random_int_k_from_n(
key2,
num_samples=n,
n=self.num_colors,
k=2
)
images = self._batch_render(particles, particles_radius, color_index)
return images, dict(offset=offset, color_index=color_index)
| dm_hamiltonian_dynamics_suite-master | dm_hamiltonian_dynamics_suite/hamiltonian_systems/ideal_double_pendulum.py |
# Copyright 2020 DeepMind Technologies Limited.
#
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Ideal pendulum."""
import functools
from typing import Any, Optional, Tuple
from dm_hamiltonian_dynamics_suite.hamiltonian_systems import hamiltonian
from dm_hamiltonian_dynamics_suite.hamiltonian_systems import phase_space
from dm_hamiltonian_dynamics_suite.hamiltonian_systems import utils
import jax
import jax.numpy as jnp
class IdealPendulum(hamiltonian.TimeIndependentHamiltonianSystem):
"""An idealized pendulum system.
Parameters:
m_range - possible range of the particle mass
g_range - possible range of the gravitational force
l_range - possible range of the length of the pendulum
The Hamiltonian is:
m * l * g * (1 - cos(q)) + p^2 / (2 * m * l^2)
Initial state parameters:
radius_range - The initial state is sampled from a disk in phase space with
radius in this range.
uniform_annulus - Whether to sample uniformly on the disk or uniformly the
radius.
randomize_canvas_location - Whether to randomize th vertical position of the
particle when rendering.
"""
def __init__(
self,
m_range: utils.BoxRegion,
g_range: utils.BoxRegion,
l_range: utils.BoxRegion,
radius_range: utils.BoxRegion,
uniform_annulus: bool = True,
**kwargs):
super().__init__(system_dims=1, **kwargs)
self.m_range = m_range
self.g_range = g_range
self.l_range = l_range
self.radius_range = radius_range
self.uniform_annulus = uniform_annulus
render = functools.partial(utils.render_particles_trajectory,
canvas_limits=self.full_canvas_bounds(),
resolution=self.resolution,
num_colors=self.num_colors)
self._batch_render = jax.vmap(render)
def _hamiltonian(
self,
y: phase_space.PhaseSpace,
params: utils.Params,
**kwargs: Any
) -> jnp.ndarray:
assert len(params) == 3
m = params["m"]
l = params["l"]
g = params["g"]
potential = m * g * l * (1 - jnp.cos(y.q[..., 0]))
kinetic = y.p[..., 0] ** 2 / (2 * m * l ** 2)
return potential + kinetic
def sample_y(
self,
num_samples: int,
params: utils.Params,
rng_key: jnp.ndarray,
**kwargs: Any
) -> phase_space.PhaseSpace:
state = utils.uniform_annulus(
rng_key, num_samples, 2, self.radius_range, self.uniform_annulus)
return phase_space.PhaseSpace.from_state(state.astype(self.dtype))
def sample_params(
self,
num_samples: int,
rng_key: jnp.ndarray,
**kwargs: Any
) -> utils.Params:
key1, key2, key3 = jax.random.split(rng_key, 3)
m = jax.random.uniform(key1, [num_samples], minval=self.m_range.min,
maxval=self.m_range.max)
l = jax.random.uniform(key2, [num_samples], minval=self.l_range.min,
maxval=self.l_range.max)
g = jax.random.uniform(key3, [num_samples], minval=self.g_range.min,
maxval=self.g_range.max)
return dict(m=m, l=l, g=g)
def simulate_analytically(
self,
y0: phase_space.PhaseSpace,
t0: utils.FloatArray,
t_eval: jnp.ndarray,
params: utils.Params,
**kwargs: Any
) -> Optional[phase_space.PhaseSpace]:
return None
def canvas_bounds(self) -> utils.BoxRegion:
max_d = self.l_range.max + jnp.sqrt(self.m_range.max / jnp.pi)
return utils.BoxRegion(-max_d, max_d)
def canvas_position(
self,
position: jnp.ndarray,
params: utils.Params
) -> jnp.ndarray:
l = utils.expand_to_rank_right(params["l"], 2)
y = jnp.sin(position[..., 0] - jnp.pi / 2.0) * l
x = jnp.cos(position[..., 1] - jnp.pi / 2.0) * l
return jnp.stack([x, y], axis=-1)
def render_trajectories(
self,
position: jnp.ndarray,
params: utils.Params,
rng_key: jnp.ndarray,
**kwargs: Any
) -> Tuple[jnp.ndarray, utils.Params]:
n, _, d = position.shape
assert d == self.system_dims
assert len(params) == 3
m = utils.expand_to_rank_right(params["m"], 2)
key1, key2 = jax.random.split(rng_key, 2)
particles = self.canvas_position(position, params)
if self.randomize_canvas_location:
offset = jax.random.uniform(key1, shape=[n, 2])
offset = self.random_offset_bounds().convert_from_unit_interval(offset)
else:
offset = jnp.zeros(shape=[n, 2])
particles = particles + offset[:, None, :]
particles = particles[..., None, :]
particles_radius = jnp.sqrt(m / jnp.pi)
if self.num_colors == 1:
color_index = jnp.zeros([n, 1]).astype("int64")
else:
color_index = jax.random.randint(
key=key2, shape=[n, 1], minval=0, maxval=self.num_colors)
images = self._batch_render(particles, particles_radius, color_index)
return images, dict(offset=offset, color_index=color_index)
| dm_hamiltonian_dynamics_suite-master | dm_hamiltonian_dynamics_suite/hamiltonian_systems/ideal_pendulum.py |
# Copyright 2020 DeepMind Technologies Limited.
#
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A module with all Hamiltonian systems that have analytic solutions."""
from typing import Any, Optional, Tuple
from dm_hamiltonian_dynamics_suite.hamiltonian_systems import hamiltonian
from dm_hamiltonian_dynamics_suite.hamiltonian_systems import phase_space
from dm_hamiltonian_dynamics_suite.hamiltonian_systems import utils
import jax.numpy as jnp
import jax.random as jnr
class PotentialFreeSystem(hamiltonian.TimeIndependentHamiltonianSystem):
"""A system where the potential energy is 0 and the kinetic is quadratic.
Parameters:
matrix - a positive semi-definite matrix used for the kinetic quadratic.
The Hamiltonian is:
p^T M p / 2
Initial state parameters:
min_radius - the minimum radius to sample from
max_radius - the maximum radius to sample from
"""
def __init__(
self,
system_dims: int,
eigen_values_range: utils.BoxRegion,
init_vector_range: utils.BoxRegion,
**kwargs):
super().__init__(system_dims=system_dims, **kwargs)
if eigen_values_range.dims != 0 and eigen_values_range.dims != system_dims:
raise ValueError(f"The eigen_values_range must be of the same dimensions "
f"as the system dimensions, but is "
f"{eigen_values_range.dims}.")
if init_vector_range.dims != 0 and init_vector_range.dims != system_dims:
raise ValueError(f"The init_vector_range must be of the same dimensions "
f"as the system dimensions, but is "
f"{init_vector_range.dims}.")
self.eigen_values_range = eigen_values_range
self.init_vector_range = init_vector_range
def _hamiltonian(
self,
y: phase_space.PhaseSpace,
params: utils.Params,
**kwargs: Any
) -> jnp.ndarray:
assert len(params) == 1
matrix = params["matrix"]
potential = 0
kinetic = jnp.sum(jnp.matmul(y.p, matrix) * y.p, axis=-1) / 2
return potential + kinetic
def sample_y(
self,
num_samples: int,
params: utils.Params,
rng_key: jnp.ndarray,
**kwargs: Any
) -> phase_space.PhaseSpace:
# Sample random state
y = jnr.uniform(rng_key, [num_samples, 2 * self.system_dims],
dtype=self.dtype)
y = self.init_vector_range.convert_from_unit_interval(y)
return phase_space.PhaseSpace.from_state(y)
def sample_params(
self,
num_samples: int,
rng_key: jnp.ndarray,
**kwargs: Any
) -> utils.Params:
key1, key2 = jnr.split(rng_key)
matrix_shape = [num_samples, self.system_dims, self.system_dims]
gaussian = jnr.normal(key1, matrix_shape)
q, _ = jnp.linalg.qr(gaussian)
eigs = jnr.uniform(key2, [num_samples, self.system_dims])
eigs = self.eigen_values_range.convert_from_unit_interval(eigs)
q_eigs = q * eigs[..., None]
matrix = jnp.matmul(q_eigs, jnp.swapaxes(q_eigs, -2, -1))
return dict(matrix=matrix)
def simulate_analytically(
self,
y0: phase_space.PhaseSpace,
t0: utils.FloatArray,
t_eval: jnp.ndarray,
params: utils.Params,
**kwargs: Any
) -> Optional[phase_space.PhaseSpace]:
if self.friction != 0.0:
return None
assert len(params) == 1
matrix = params["matrix"]
t = utils.expand_to_rank_right(t_eval - t0, y0.q.ndim + 1)
q = y0.q[None] + utils.vecmul(matrix, y0.p)[None] * t
p = y0.p[None] * jnp.ones_like(t)
return phase_space.PhaseSpace(position=q, momentum=p)
def canvas_bounds(self) -> utils.BoxRegion:
raise NotImplementedError()
def canvas_position(
self,
position: jnp.ndarray,
params: utils.Params
) -> jnp.ndarray:
raise NotImplementedError()
def render_trajectories(
self,
position: jnp.ndarray,
params: utils.Params,
rng_key: jnp.ndarray,
**kwargs: Any
) -> Tuple[jnp.ndarray, utils.Params]:
raise NotImplementedError()
class KineticFreeSystem(PotentialFreeSystem):
"""A system where the kinetic energy is 0 and the potential is quadratic.
Parameters:
matrix - a positive semi-definite matrix used for the potential quadratic.
The Hamiltonian is:
q^T M q / 2
Initial state parameters:
min_radius - the minimum radius to sample from
max_radius - the maximum radius to sample from
"""
def _hamiltonian(
self,
y: phase_space.PhaseSpace,
params: utils.Params,
**kwargs: Any
) -> jnp.ndarray:
assert len(params) == 1
matrix = params["matrix"]
potential = jnp.sum(jnp.matmul(y.q, matrix) * y.q, axis=-1) / 2
kinetic = 0
return potential + kinetic
def simulate_analytically(
self,
y0: phase_space.PhaseSpace,
t0: utils.FloatArray,
t_eval: jnp.ndarray,
params: utils.Params,
**kwargs: Any
) -> Optional[phase_space.PhaseSpace]:
if self.friction != 0.0:
return None
assert len(params) == 1
matrix = params["matrix"]
t = utils.expand_to_rank_right(t_eval - t0, y0.q.ndim + 1)
q = y0.q[None] * jnp.ones_like(t)
p = y0.p[None] - utils.vecmul(matrix, y0.q)[None] * t
return phase_space.PhaseSpace(position=q, momentum=p)
def canvas_bounds(self) -> utils.BoxRegion:
raise NotImplementedError()
def canvas_position(
self,
position: jnp.ndarray,
params: utils.Params
) -> jnp.ndarray:
raise NotImplementedError()
def render_trajectories(
self,
position: jnp.ndarray,
params: utils.Params,
rng_key: jnp.ndarray,
**kwargs: Any
) -> Tuple[jnp.ndarray, utils.Params]:
raise NotImplementedError()
| dm_hamiltonian_dynamics_suite-master | dm_hamiltonian_dynamics_suite/hamiltonian_systems/simple_analytic.py |
# Copyright 2020 DeepMind Technologies Limited.
#
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A module with the abstract class for Hamiltonian systems."""
import abc
from typing import Any, Callable, Mapping, Optional, Tuple, Union
from dm_hamiltonian_dynamics_suite.hamiltonian_systems import phase_space
from dm_hamiltonian_dynamics_suite.hamiltonian_systems import utils
import jax
import jax.numpy as jnp
import jax.random as jnr
from scipy import integrate
Integrator = Callable[
[
Union[phase_space.HamiltonianFunction,
phase_space.SymplecticTangentFunction], # dy_dt
Union[jnp.ndarray, phase_space.PhaseSpace], # y0
Union[float, jnp.ndarray], # t0
Union[float, jnp.ndarray], # dt
int, # num_steps
int # steps_per_dt
],
Tuple[jnp.ndarray, phase_space.PhaseSpace]
]
class HamiltonianSystem(abc.ABC):
"""General class to represent Hamiltonian Systems and simulate them."""
def __init__(
self,
system_dims: int,
randomize_canvas_location: bool = True,
random_canvas_extra_ratio: float = 0.4,
try_analytic_solution: bool = True,
friction: float = 0.0,
method: Union[str, Integrator] = "scipy",
num_colors: int = 6,
image_resolution: int = 32,
dtype: str = "float32",
steps_per_dt: int = 1,
stiff: bool = False,
extra_ivp_kwargs: Optional[Mapping[str, Union[str, int, float]]] = None):
"""Initializes some global properties.
Args:
system_dims: Dimensionality of the positions (not joint state).
randomize_canvas_location: Whether to add random offset to images.
random_canvas_extra_ratio: How much to be the extra random ofsset.
try_analytic_solution: If True, first tries to solve the system
analytically possible. If False, alway integrates systems numerically.
friction: This changes the dynamics to non-conservative. The new
dynamics are formulated as follows:
dq/dt = dH/dp
dp/dt = - dH/dq - friction * dH/dp
This implies that the Hamiltonian energy decreases over time:
dH/dt = dH/dq^T dq/dt + dH/dp^T dp/dt = - friction * ||dH/dp||_2^2
method: "scipy" or a callable of type `Integrator`.
num_colors: The number of possible colors to use for rendering.
image_resolution: For generated images their resolution.
dtype: What dtype to use for the generated data.
steps_per_dt: Number of inner steps to use per a single observation dt.
stiff: Whether the problem represents a stiff system.
extra_ivp_kwargs: Extra arguments to the scipy solver.
Raises:
ValueError: if `dtype` is not 'float32' or 'float64'.
"""
self._system_dims = system_dims
self._randomize_canvas_location = randomize_canvas_location
self._random_canvas_extra_ratio = random_canvas_extra_ratio
self._try_analytic_solution = try_analytic_solution
self._friction = friction
self._method = method
self._num_colors = num_colors
self._resolution = image_resolution
self._dtype = dtype
self._stiff = stiff
self._steps_per_dt = steps_per_dt
if dtype == "float64":
self._scipy_ivp_kwargs = dict(rtol=1e-12, atol=1e-12)
elif dtype == "float32":
self._scipy_ivp_kwargs = dict(rtol=1e-9, atol=1e-9)
else:
raise ValueError("Currently we only support float64 and float32 dtypes.")
if stiff:
self._scipy_ivp_kwargs["method"] = "Radau"
if extra_ivp_kwargs is not None:
self._scipy_ivp_kwargs.update(extra_ivp_kwargs)
@property
def system_dims(self) -> int:
return self._system_dims
@property
def randomize_canvas_location(self) -> bool:
return self._randomize_canvas_location
@property
def random_canvas_extra_ratio(self) -> float:
return self._random_canvas_extra_ratio
@property
def try_analytic_solution(self) -> bool:
return self._try_analytic_solution
@property
def friction(self) -> float:
return self._friction
@property
def method(self):
return self._method
@property
def num_colors(self) -> int:
return self._num_colors
@property
def resolution(self) -> int:
return self._resolution
@property
def dtype(self) -> str:
return self._dtype
@property
def stiff(self) -> bool:
return self._stiff
@property
def steps_per_dt(self) -> int:
return self._steps_per_dt
@property
def scipy_ivp_kwargs(self) -> Mapping[str, Union[str, int, float]]:
return self._scipy_ivp_kwargs
@abc.abstractmethod
def parametrized_hamiltonian(
self,
t: jnp.ndarray,
y: phase_space.PhaseSpace,
params: utils.Params,
**kwargs: Any
) -> jnp.ndarray:
"""Calculates the Hamiltonian."""
def hamiltonian_from_params(
self,
params: utils.Params,
**kwargs: Any
) -> phase_space.HamiltonianFunction:
def hamiltonian(t: jnp.ndarray, y: phase_space.PhaseSpace) -> jnp.ndarray:
return self.parametrized_hamiltonian(t, y, params, **kwargs)
return hamiltonian
@abc.abstractmethod
def sample_y(
self,
num_samples: int,
params: utils.Params,
rng_key: jnp.ndarray,
**kwargs: Any
) -> phase_space.PhaseSpace:
"""Samples randomly initial states."""
@abc.abstractmethod
def sample_params(
self,
num_samples: int,
rng_key: jnp.ndarray,
**kwargs: Any
) -> utils.Params:
"""Samples randomly parameters."""
@abc.abstractmethod
def simulate_analytically(
self,
y0: phase_space.PhaseSpace,
t0: utils.FloatArray,
t_eval: jnp.ndarray,
params: utils.Params,
**kwargs: Any
) -> Optional[phase_space.PhaseSpace]:
"""If analytic solution exist returns it, else returns None."""
def simulate_analytically_dt(
self,
y0: phase_space.PhaseSpace,
t0: utils.FloatArray,
dt: utils.FloatArray,
num_steps: int,
params: utils.Params,
**kwargs: Any
) -> Optional[phase_space.PhaseSpace]:
"""Same as `simulate_analytically` but uses `dt` and `num_steps`."""
t_eval = utils.dt_to_t_eval(t0, dt, num_steps)
return self.simulate_analytically(y0, t0, t_eval, params, **kwargs)
@abc.abstractmethod
def canvas_bounds(self) -> utils.BoxRegion:
"""Returns the limits of the canvas for rendering."""
def random_offset_bounds(self) -> utils.BoxRegion:
"""Returns any extra randomized offset that can be given to the canvas."""
extra_size = self.random_canvas_extra_ratio * self.canvas_bounds().size / 2
return utils.BoxRegion(
minimum=-extra_size,
maximum=extra_size
)
def full_canvas_bounds(self) -> utils.BoxRegion:
if self.randomize_canvas_location:
return utils.BoxRegion(
self.canvas_bounds().min + self.random_offset_bounds().min,
self.canvas_bounds().max + self.random_offset_bounds().max,
)
else:
return self.canvas_bounds()
@abc.abstractmethod
def canvas_position(
self,
position: jnp.ndarray,
params: utils.Params
) -> jnp.ndarray:
"""Returns the canvas position given the position vectors and the parameters."""
@abc.abstractmethod
def render_trajectories(
self,
position: jnp.ndarray,
params: utils.Params,
rng_key: jnp.ndarray,
**kwargs: Any
) -> Tuple[jnp.ndarray, utils.Params]:
"""Renders the positions q into an image."""
def simulate_scipy(
self,
y0: phase_space.PhaseSpace,
t0: utils.FloatArray,
t_eval: jnp.ndarray,
params: utils.Params,
ivp_kwargs=None,
**kwargs: Any
) -> phase_space.PhaseSpace:
"""Simulates the system using scipy.integrate.solve_ivp."""
t_span = (t0, float(t_eval[-1]))
y0 = jnp.concatenate([y0.q, y0.p], axis=-1)
y_shape = y0.shape
y0 = y0.reshape([-1])
hamiltonian = self.hamiltonian_from_params(params, **kwargs)
@jax.jit
def fun(t, y):
f = phase_space.poisson_bracket_with_q_and_p(hamiltonian)
dy = f(t, phase_space.PhaseSpace.from_state(y.reshape(y_shape)))
if self.friction != 0.0:
friction_term = phase_space.TangentPhaseSpace(
position=jnp.zeros_like(dy.position),
momentum=-self.friction * dy.position)
dy = dy + friction_term
return dy.single_state.reshape([-1])
kwargs = dict(**self.scipy_ivp_kwargs)
kwargs.update(ivp_kwargs or dict())
solution = integrate.solve_ivp(
fun=fun,
t_span=t_span,
y0=y0,
t_eval=t_eval,
**kwargs)
y_final = solution.y.reshape(y_shape + (t_eval.size,))
return phase_space.PhaseSpace.from_state(jnp.moveaxis(y_final, -1, 0))
def simulate_scipy_dt(
self,
y0: phase_space.PhaseSpace,
t0: utils.FloatArray,
dt: utils.FloatArray,
num_steps: int,
params: utils.Params,
ivp_kwargs=None,
**kwargs: Any
) -> phase_space.PhaseSpace:
"""Same as `simulate_scipy` but uses `dt` and `num_steps`."""
t_eval = utils.dt_to_t_eval(t0, dt, num_steps)
return self.simulate_scipy(y0, t0, t_eval, params, ivp_kwargs, **kwargs)
def simulate_integrator(
self,
y0: phase_space.PhaseSpace,
t0: utils.FloatArray,
t_eval: jnp.ndarray,
params: utils.Params,
method: Union[str, Integrator],
**kwargs: Any
) -> phase_space.PhaseSpace:
"""Simulates the system using an integrator from integrators.py module."""
return self.simulate_integrator_dt(
y0=y0,
t0=t0,
dt=utils.t_eval_to_dt(t0, t_eval),
params=params,
method=method,
**kwargs
)
def simulate_integrator_dt(
self,
y0: phase_space.PhaseSpace,
t0: Union[float, jnp.ndarray],
dt: Union[float, jnp.ndarray],
params: utils.Params,
method: Integrator,
num_steps: Optional[int] = None,
**kwargs: Any
) -> phase_space.PhaseSpace:
"""Same as `simulate_integrator` but uses `dt` and `num_steps`."""
hamiltonian = self.hamiltonian_from_params(params, **kwargs)
if self.friction == 0.0:
return method(
hamiltonian,
y0,
t0,
dt,
num_steps,
self.steps_per_dt,
)[1]
else:
def dy_dt(t: jnp.ndarray, y: phase_space.PhaseSpace):
f = phase_space.poisson_bracket_with_q_and_p(hamiltonian)
dy = f(t, y)
friction_term = phase_space.TangentPhaseSpace(
position=jnp.zeros_like(dy.position),
momentum=-self.friction * dy.position)
return dy + friction_term
return method(
dy_dt,
y0,
t0,
dt,
num_steps,
self.steps_per_dt,
)[1]
def generate_trajectories(
self,
y0: phase_space.PhaseSpace,
t0: utils.FloatArray,
t_eval: jnp.ndarray,
params: utils.Params,
**kwargs: Any
) -> phase_space.PhaseSpace:
"""Generates trajectories of the system in phase space.
Args:
y0: Initial state.
t0: The time instance of the initial state y0.
t_eval: Times at which to return the computed solution.
params: Any parameters of the Hamiltonian.
**kwargs: Any extra things that go into the hamiltonian.
Returns:
A phase_space.PhaseSpace instance of size NxTxD.
"""
return self.generate_trajectories_dt(
y0=y0,
t0=t0,
dt=utils.t_eval_to_dt(t0, t_eval),
params=params,
**kwargs
)
def generate_trajectories_dt(
self,
y0: phase_space.PhaseSpace,
t0: utils.FloatArray,
dt: utils.FloatArray,
params: utils.Params,
num_steps_forward: int,
include_t0: bool = False,
num_steps_backward: int = 0,
**kwargs: Any
) -> phase_space.PhaseSpace:
"""Same as `generate_trajectories` but uses `dt` and `num_steps`."""
if num_steps_forward < 0 or num_steps_backward < 0:
raise ValueError("num_steps_forward and num_steps_backward can not be "
"negative.")
if num_steps_forward == 0 and num_steps_backward == 0:
raise ValueError("You need one of num_steps_forward or "
"num_of_steps_backward to be positive.")
if num_steps_forward > 0 and num_steps_backward > 0 and not include_t0:
raise ValueError("When both num_steps_forward and num_steps_backward are "
"positive include_t0 should be True.")
if self.try_analytic_solution and num_steps_backward == 0:
# Try to use analytical solution
y = self.simulate_analytically_dt(y0, t0, dt, num_steps_forward, params,
**kwargs)
if y is not None:
return y
if self.method == "scipy":
if num_steps_backward > 0:
raise NotImplementedError()
return self.simulate_scipy_dt(y0, t0, dt, num_steps_forward, params,
**kwargs)
yts = []
if num_steps_backward > 0:
yt = self.simulate_integrator_dt(
y0=y0,
t0=t0,
dt=-dt,
params=params,
method=self.method,
num_steps=num_steps_backward,
**kwargs)
yt = jax.tree_map(lambda x: jnp.flip(x, axis=0), yt)
yts.append(yt)
if include_t0:
yts.append(jax.tree_map(lambda x: x[None], y0))
if num_steps_forward > 0:
yt = self.simulate_integrator_dt(
y0=y0,
t0=t0,
dt=dt,
params=params,
method=self.method,
num_steps=num_steps_forward,
**kwargs)
yts.append(yt)
if len(yts) > 1:
return jax.tree_multimap(lambda *a: jnp.concatenate(a, axis=0), *yts)
else:
return yts[0]
def generate_and_render(
self,
num_trajectories: int,
rng_key: jnp.ndarray,
t0: utils.FloatArray,
t_eval: utils.FloatArray,
y0: Optional[phase_space.PhaseSpace] = None,
params: Optional[utils.Params] = None,
within_canvas_bounds: bool = True,
**kwargs: Any
) -> Mapping[str, Any]:
"""Generates trajectories and renders them.
Args:
num_trajectories: The number of trajectories to generate.
rng_key: PRNG key for sampling any random numbers.
t0: The time instance of the initial state y0.
t_eval: Times at which to return the computed solution.
y0: Initial state. If None will be sampled with `self.sample_y`
params: Parameters of the Hamiltonian. If None will be sampled with
`self.sample_params`
within_canvas_bounds: Re-samples y0 until the trajectories is within
the canvas bounds.
**kwargs: Any extra things that go into the hamiltonian.
Returns:
A dictionary containing the following elements:
"x": A numpy array representation of the PhaseSpace vector.
"dx_dt": The time derivative of "x".
"image": An image representation of the state.
"other": A dict of other parameters of the system that are not part of
the state.
"""
return self.generate_and_render_dt(
num_trajectories=num_trajectories,
rng_key=rng_key,
t0=t0,
dt=utils.t_eval_to_dt(t0, t_eval),
y0=y0,
params=params,
within_canvas_bounds=within_canvas_bounds,
**kwargs
)
def generate_and_render_dt(
self,
num_trajectories: int,
rng_key: jnp.ndarray,
t0: utils.FloatArray,
dt: utils.FloatArray,
num_steps: Optional[int] = None,
y0: Optional[phase_space.PhaseSpace] = None,
params: Optional[utils.Params] = None,
within_canvas_bounds: bool = True,
**kwargs: Any
) -> Mapping[str, Any]:
"""Same as `generate_and_render` but uses `dt` and `num_steps`."""
if within_canvas_bounds and (y0 is not None or params is not None):
raise ValueError("Within canvas bounds is valid only when y0 and params "
"are None.")
if params is None:
rng_key, key = jnr.split(rng_key)
params = self.sample_params(num_trajectories, rng_key, **kwargs)
if y0 is None:
rng_key, key = jnr.split(rng_key)
y0 = self.sample_y(num_trajectories, params, key, **kwargs)
# Generate the phase-space trajectories
x = self.generate_trajectories_dt(y0, t0, dt, params, num_steps, **kwargs)
# Make batch leading dimension
x = jax.tree_map(lambda x_: jnp.swapaxes(x_, 0, 1), x)
x = jax.tree_multimap(lambda i, j: jnp.concatenate([i[:, None], j], axis=1),
y0, x)
if within_canvas_bounds:
# Check for valid trajectories
valid = []
while len(valid) < num_trajectories:
for idx in range(x.q.shape[0]):
x_idx, params_idx = jax.tree_map(lambda a, i=idx: a[i], (x, params))
position = self.canvas_position(x_idx.q, params_idx)
if (jnp.all(position >= self.canvas_bounds().min) and
jnp.all(position <= self.canvas_bounds().max)):
valid.append((x_idx, params_idx))
if len(valid) == num_trajectories:
break
new_trajectories = num_trajectories - len(valid)
print(f"Generating {new_trajectories} new trajectories.")
rng_key, key = jnr.split(rng_key)
params = self.sample_params(new_trajectories, rng_key, **kwargs)
rng_key, key = jnr.split(rng_key)
y0 = self.sample_y(new_trajectories, params, key, **kwargs)
x = self.generate_trajectories_dt(y0, t0, dt, params, num_steps,
**kwargs)
x = jax.tree_map(lambda x_: jnp.swapaxes(x_, 0, 1), x)
x = jax.tree_multimap(lambda i, j: # pylint:disable=g-long-lambda
jnp.concatenate([i[:, None], j], axis=1), y0, x)
x, params = jax.tree_multimap(lambda *args: jnp.stack(args, axis=0),
*valid)
hamiltonian = self.hamiltonian_from_params(params, **kwargs)
df_dt = jax.vmap(phase_space.poisson_bracket_with_q_and_p(hamiltonian),
in_axes=[0, 1], out_axes=1)
if isinstance(dt, float):
dt = jnp.asarray([dt] * num_steps, dtype=x.q.dtype)
t0 = jnp.asarray(t0).astype(dt.dtype)
t = jnp.cumsum(jnp.concatenate([t0[None], dt], axis=0), axis=0)
dx_dt = df_dt(t, x)
rng_key, key = jnr.split(rng_key)
image, extra = self.render_trajectories(x.q, params, rng_key, **kwargs)
params.update(extra)
return dict(x=x.single_state, dx_dt=dx_dt.single_state,
image=image, other=params)
class TimeIndependentHamiltonianSystem(HamiltonianSystem):
"""A Hamiltonian system where the energy does not depend on time."""
@abc.abstractmethod
def _hamiltonian(
self,
y: phase_space.PhaseSpace,
params: utils.Params,
**kwargs: Any
) -> jnp.ndarray:
"""Computes the time independent Hamiltonian."""
def parametrized_hamiltonian(
self,
t: jnp.ndarray,
y: phase_space.PhaseSpace,
params: utils.Params,
**kwargs: Any
) -> jnp.ndarray:
return self._hamiltonian(y, params, **kwargs)
| dm_hamiltonian_dynamics_suite-master | dm_hamiltonian_dynamics_suite/hamiltonian_systems/hamiltonian.py |
# Copyright 2020 DeepMind Technologies Limited.
#
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
| dm_hamiltonian_dynamics_suite-master | dm_hamiltonian_dynamics_suite/multiagent_dynamics/__init__.py |
# Copyright 2020 DeepMind Technologies Limited.
#
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Continuous-time two player zero-sum game dynamics."""
from typing import Any, Mapping
from dm_hamiltonian_dynamics_suite.hamiltonian_systems import utils
import jax.numpy as jnp
import jax.random as jnr
import numpy as np
from numpy.lib.stride_tricks import as_strided
import scipy
from open_spiel.python.egt import dynamics as egt_dynamics
from open_spiel.python.egt import utils as egt_utils
from open_spiel.python.pybind11 import pyspiel
def sample_from_simplex(
num_samples: int,
rng: jnp.ndarray,
dim: int = 3,
vmin: float = 0.
) -> jnp.ndarray:
"""Samples random points from a k-simplex. See D. B. Rubin (1981), p131."""
# This is a jax version of open_spiel.python.egt.utils.sample_from_simplex.
assert vmin >= 0.
p = jnr.uniform(rng, shape=(num_samples, dim - 1))
p = jnp.sort(p, axis=1)
p = jnp.hstack((jnp.zeros((num_samples, 1)), p, jnp.ones((num_samples, 1))))
return (p[:, 1:] - p[:, 0:-1]) * (1 - 2 * vmin) + vmin
def get_payoff_tensor(game_name: str) -> jnp.ndarray:
"""Returns the payoff tensor of a game."""
game = pyspiel.extensive_to_tensor_game(pyspiel.load_game(game_name))
assert game.get_type().utility == pyspiel.GameType.Utility.ZERO_SUM
payoff_tensor = egt_utils.game_payoffs_array(game)
return payoff_tensor
def tile_array(a: jnp.ndarray, b0: int, b1: int) -> jnp.ndarray:
r, c = a.shape # number of rows/columns
rs, cs = a.strides # row/column strides
x = as_strided(a, (r, b0, c, b1), (rs, 0, cs, 0)) # view a as larger 4D array
return x.reshape(r * b0, c * b1) # create new 2D array
class ZeroSumGame:
"""Generate trajectories from zero-sum game dynamics."""
def __init__(
self,
game_name: str,
dynamics: str = 'replicator',
method: str = 'scipy'):
self.payoff_tensor = get_payoff_tensor(game_name)
assert self.payoff_tensor.shape[0] == 2, 'Only supports two-player games.'
dyn_fun = getattr(egt_dynamics, dynamics)
self.dynamics = egt_dynamics.MultiPopulationDynamics(
self.payoff_tensor, dyn_fun)
self.method = method
self.scipy_ivp_kwargs = dict(rtol=1e-12, atol=1e-12)
def sample_x0(self, num_samples: int, rng_key: jnp.ndarray) -> jnp.ndarray:
"""Samples initial states."""
nrows, ncols = self.payoff_tensor.shape[1:]
key1, key2 = jnr.split(rng_key)
x0_1 = sample_from_simplex(num_samples, key1, dim=nrows)
x0_2 = sample_from_simplex(num_samples, key2, dim=ncols)
x0 = jnp.hstack((x0_1, x0_2))
return x0
def generate_trajectories(
self,
x0: jnp.ndarray,
t0: utils.FloatArray,
t_eval: jnp.ndarray
) -> jnp.ndarray:
"""Generates trajectories of the system in phase space.
Args:
x0: Initial state.
t0: The time instance of the initial state y0.
t_eval: Times at which to return the computed solution.
Returns:
Trajectories of size BxTxD (batch, time, phase-space-dim).
"""
if self.method == 'scipy':
x0_shape = x0.shape
def fun(_, y):
y = y.reshape(x0_shape)
y_next = np.apply_along_axis(self.dynamics, -1, y)
return y_next.reshape([-1])
t_span = (t0, float(t_eval[-1]))
solution = scipy.integrate.solve_ivp(
fun=fun,
t_span=t_span,
y0=x0.reshape([-1]), # Scipy requires flat input.
t_eval=t_eval,
**self.scipy_ivp_kwargs)
x = solution.y.reshape(x0_shape + (t_eval.size,))
x = np.moveaxis(x, -1, 1) # Make time 2nd dimension.
else:
raise ValueError(f'Method={self.method} not supported.')
return x
def render_trajectories(self, x: jnp.ndarray) -> jnp.ndarray:
"""Maps from policies to joint-policy space."""
nrows, ncols = self.payoff_tensor.shape[1:]
x_1 = x[..., :nrows]
x_2 = x[..., nrows:]
x_1 = x_1.repeat(ncols, axis=-1).reshape(x.shape[:-1] + (nrows, ncols,))
x_2 = x_2.repeat(nrows, axis=-1).reshape(x.shape[:-1] + (nrows, ncols,))
x_2 = x_2.swapaxes(-2, -1)
image = x_1 * x_2
# Rescale to 32 x 32 from the original 2x2 or 3x3 data by expanding the
# matrix to the nearest to 32 multiple of 2 or 3, evenly tiling it with the
# original values, and then taking a 32x32 top left slice of it
temp_image = [
tile_array(x, np.ceil(32 / x.shape[0]).astype('int'),
np.ceil(32 / x.shape[1]).astype('int'))[:32, :32]
for x in np.squeeze(image)
]
image = np.stack(temp_image)
image = np.repeat(np.expand_dims(image, -1), 3, axis=-1)
return image[None, ...]
def generate_and_render(
self,
num_trajectories: int,
rng_key: jnp.ndarray,
t0: utils.FloatArray,
t_eval: utils.FloatArray
) -> Mapping[str, Any]:
"""Generates trajectories and renders them.
Args:
num_trajectories: The number of trajectories to generate.
rng_key: PRNG key for sampling any random numbers.
t0: The time instance of the initial state y0.
t_eval: Times at which to return the computed solution.
Returns:
A dictionary containing the following elements:
'x': A numpy array representation of the phase space vector.
'dx_dt': The time derivative of 'x'.
'image': An image representation of the state.
"""
rng_key, key = jnr.split(rng_key)
x0 = self.sample_x0(num_trajectories, key)
x = self.generate_trajectories(x0, t0, t_eval)
x = np.concatenate([x0[:, None], x], axis=1) # Add initial state.
dx_dt = np.apply_along_axis(self.dynamics, -1, x)
image = self.render_trajectories(x)
return dict(x=x, dx_dt=dx_dt, image=image)
def generate_and_render_dt(
self,
num_trajectories: int,
rng_key: jnp.ndarray,
t0: utils.FloatArray,
dt: utils.FloatArray,
num_steps: int
) -> Mapping[str, Any]:
"""Same as `generate_and_render` but uses `dt` and `num_steps`."""
t_eval = utils.dt_to_t_eval(t0, dt, num_steps)
return self.generate_and_render(num_trajectories, rng_key, t0, t_eval)
| dm_hamiltonian_dynamics_suite-master | dm_hamiltonian_dynamics_suite/multiagent_dynamics/game_dynamics.py |
# Copyright 2020 DeepMind Technologies Limited.
#
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Creates a small molecular dynamics (MD) dataset.
This binary creates a small MD dataset given a text-based trajectory file
generated with the simulation package LAMMPS (https://lammps.sandia.gov/).
The trajectory file can be generated by running LAMMPS with the input script
provided.
We note that this binary is intended as a demonstration only and is therefore
not optimised for memory efficiency or performance.
"""
from typing import Mapping, Tuple
from dm_hamiltonian_dynamics_suite import datasets
from dm_hamiltonian_dynamics_suite.hamiltonian_systems import utils
import numpy as np
Array = np.ndarray
def read_trajectory(filename: str) -> Tuple[Array, float]:
"""Reads the trajectory data from file and returns it as an array.
Each timestep contains a header and the atom data. The header is 9 lines long
and contains the timestep, the number of atoms, the box dimensions and the
list of atom properties. The header is assumed to be structured as in the
example below:
ITEM: TIMESTEP
<<timestep>>
ITEM: NUMBER OF ATOMS
<<num_atoms>>
ITEM: BOX BOUNDS pp pp pp
<<xlo>> <<xhi>>
<<ylo>> <<yhi>>
<<zlo>> <<zhi>>
ITEM: ATOMS id type x y vx vy fx fy
.... <<num_atoms>> lines with properties of <<num_atoms>> atoms....
Args:
filename: name of the input file.
Returns:
A pair where the first element corresponds to an array of shape
[num_timesteps, num_atoms, 6] containing the atom data and the second
element corresponds to the edge length of the simulation box.
"""
with open(filename, 'r') as f:
dat = f.read()
lines = dat.split('\n')
# Extract the number of particles and the edge length of the simulation box.
num_particles = int(lines[3])
box = np.fromstring(lines[5], dtype=np.float32, sep=' ')
box_length = box[1] - box[0]
# Iterate over all timesteps and extract the relevant data columns.
header_size = 9
record_size = header_size + num_particles
num_records = len(lines) // record_size
records = []
for i in range(num_records):
record = lines[header_size + i * record_size:(i + 1) * record_size]
record = np.array([l.split(' ')[:-1] for l in record], dtype=np.float32)
records.append(record)
records = np.array(records)[..., 2:]
return records, box_length
def flatten_record(x: Array) -> Array:
"""Reshapes input from [num_particles, 2*dim] to [2*num_particles*dim]."""
if x.shape[-1] % 2 != 0:
raise ValueError(f'Expected last dimension to be even, got {x.shape[-1]}.')
dim = x.shape[-1] // 2
q = x[..., 0:dim]
p = x[..., dim:]
q_flat = q.reshape(list(q.shape[:-2]) + [-1])
p_flat = p.reshape(list(p.shape[:-2]) + [-1])
x_flat = np.concatenate((q_flat, p_flat), axis=-1)
return x_flat
def render_images(
x: Array,
box_length: float,
resolution: int = 32,
particle_radius: float = 0.3
) -> Array:
"""Renders a sequence with shape [num_steps, num_particles*dim] as images."""
dim = 2
sequence_length, num_coordinates = x.shape
if num_coordinates % (2 * dim) != 0:
raise ValueError('Expected the number of coordinates to be divisible by 4, '
f'got {num_coordinates}.')
# The 4 coordinates are positions and velocities in 2d.
num_particles = num_coordinates // (2 * dim)
# `x` is formatted as [x_1, y_1,... x_N, y_N, dx_1, dy_1,..., dx_N, dy_N],
# where `N=num_particles`. For the image generation, we only require x and y
# coordinates.
particles = x[..., :num_particles * dim]
particles = particles.reshape((sequence_length, num_particles, dim))
colors = np.arange(num_particles, dtype=np.int32)
box_region = utils.BoxRegion(-box_length / 2., box_length / 2.)
images = utils.render_particles_trajectory(
particles=particles,
particles_radius=particle_radius,
color_indices=colors,
canvas_limits=box_region,
resolution=resolution,
num_colors=num_particles)
return images
def convert_sequence(sequence: Array, box_length: float) -> Mapping[str, Array]:
"""Converts a sequence of timesteps to a data point."""
num_steps, num_particles, num_fields = sequence.shape
# A LAMMPS record should contain positions, velocities and forces.
if num_fields != 6:
raise ValueError('Expected input sequence to be of shape '
f'[num_steps, num_particles, 6], got {sequence.shape}.')
x = np.empty((num_steps, num_particles * 4))
dx_dt = np.empty((num_steps, num_particles * 4))
for step in range(num_steps):
# Assign positions and momenta to `x` and momenta and forces to `dx_dt`.
x[step] = flatten_record(sequence[step, :, (0, 1, 2, 3)])
dx_dt[step] = flatten_record(sequence[step, :, (2, 3, 4, 5)])
image = render_images(x, box_length)
image = np.array(image * 255.0, dtype=np.uint8)
return dict(x=x, dx_dt=dx_dt, image=image)
def write_to_file(
data: Array,
box_length: float,
output_path: str,
split: str,
overwrite: bool,
) -> None:
"""Writes the data to file."""
def generator():
for sequence in data:
yield convert_sequence(sequence, box_length)
datasets.transform_dataset(generator(), output_path, split, overwrite)
def generate_lammps_dataset(
lammps_file: str,
folder: str,
num_steps: int,
num_train: int,
num_test: int,
dt: int,
shuffle: bool,
seed: int,
overwrite: bool,
) -> None:
"""Creates the train and test datasets."""
if num_steps < 1:
raise ValueError(f'Expected `num_steps` to be >= 1, got {num_steps}.')
if dt < 1:
raise ValueError(f'Expected `dt` to be >= 1, got {dt}.')
records, box_length = read_trajectory(lammps_file)
# Consider only every dt-th timestep in the input file.
records = records[::dt]
num_records, num_particles, num_fields = records.shape
if num_records < (num_test + num_train) * num_steps:
raise ValueError(
f'Trajectory contains only {num_records} records which is insufficient'
f'for the requested train/test split of {num_train}/{num_test} with '
f'sequence length {num_steps}.')
# Reshape and shuffle the data.
num_points = num_records // num_steps
records = records[:num_points * num_steps]
records = records.reshape((num_points, num_steps, num_particles, num_fields))
if shuffle:
np.random.RandomState(seed).shuffle(records)
# Create train/test splits and write them to file.
train_records = records[:num_train]
test_records = records[num_train:num_train + num_test]
print('Writing the train dataset to file.')
write_to_file(train_records, box_length, folder, 'train', overwrite)
print('Writing the test dataset to file.')
write_to_file(test_records, box_length, folder, 'test', overwrite)
| dm_hamiltonian_dynamics_suite-master | dm_hamiltonian_dynamics_suite/molecular_dynamics/generate_dataset.py |
# Copyright 2020 DeepMind Technologies Limited.
#
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
| dm_hamiltonian_dynamics_suite-master | dm_hamiltonian_dynamics_suite/molecular_dynamics/__init__.py |
# Copyright 2022 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Load datasets used in the figures."""
import pickle
import shutil
import subprocess
import numpy as np
import requests
import tree
BUCKET_PATH = 'https://storage.googleapis.com/dm_spatial_memory_pipeline/'
FIG2_FILES = ['trajectories.pickle']
FIG3_FILES = ['hd_stats.pickle', 'parameters.pickle', 'visual_inputs.pickle']
LOCAL_PATH = 'spatial_memory_pipeline_data'
FIG2_TRAJECTORIES_PATH = f'./{LOCAL_PATH}/trajectories.pickle'
FIG3_PARAMETERS_PATH = f'./{LOCAL_PATH}/parameters.pickle'
FIG3_VISUAL_INPUTS_PATH = f'./{LOCAL_PATH}/visual_inputs.pickle'
FIG3_HD_STATS_PATH = f'./{LOCAL_PATH}/hd_stats.pickle'
def _download_files(filenames):
subprocess.check_call(['mkdir', '-p', f'{LOCAL_PATH}'])
for fname in filenames:
url = f'{BUCKET_PATH}{fname}'
dest = f'{LOCAL_PATH}/{fname}'
print(f'Downloading: {url}')
with requests.get(url, stream=True) as r, open(dest, 'wb') as w:
r.raise_for_status()
shutil.copyfileobj(r.raw, w)
def download_figure2_files():
_download_files(FIG2_FILES)
def download_figure3_files():
_download_files(FIG3_FILES)
def load_figure_2_trajectories():
with open(FIG2_TRAJECTORIES_PATH, 'rb') as fh:
trajectories = pickle.load(fh)
return trajectories
def load_figure_3_parameters():
with open(FIG3_PARAMETERS_PATH, 'rb') as fh:
parameters = pickle.load(fh)
return parameters
def load_figure_3_hd_stats():
with open(FIG3_HD_STATS_PATH, 'rb') as fh:
hd_stats = pickle.load(fh)
return hd_stats
def load_figure_3_visual_inputs():
with open(FIG3_VISUAL_INPUTS_PATH, 'rb') as fh:
visual_inputs = pickle.load(fh)
return visual_inputs
def flatten_trajectories(traj_ds):
"""Turn a trajectory dataset into a sample dataset, and add a time field."""
def _flatten(x):
"""Flatten the first 2 dims of fields with > 2 dims."""
if isinstance(x, np.ndarray) and x.ndim > 2:
return np.reshape(x, (-1,) + x.shape[2:])
else:
return x
ds = tree.map_structure(_flatten, traj_ds)
return ds
def description(x):
if isinstance(x, dict):
return '\n'.join([f'{k}: {description(v)}' for k, v in sorted(x.items())])
if isinstance(x, np.ndarray):
return f'array{x.shape}'
elif isinstance(x, list):
return f'list({", ".join([description(k) for k in x])})'
| spatial_memory_pipeline-main | src/dataset.py |
# Copyright 2022 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Plotting utils."""
import matplotlib
import matplotlib.pyplot as plt
import numpy as np
def octant_grid(rms, rms_octants):
"""Put main rms and the 8 octant rmss in a single figure.
Args:
rms: H x W array.
rms_octants: H x W x 8 array, assuming the i-th 3D slice corresponds to the
i-th octant (0=octant centered at 0 degrees, 1 octant centered at 45
degrees, etc.)
Returns:
(3H + padding) x (3W + padding) array with the individual rms in correct
order to be plotted.
"""
all_rms = np.dstack((rms_octants, rms))
padded_rms = np.zeros((all_rms.shape[0] + 2, all_rms.shape[1] + 2, 9))
padded_rms[1:-1, 1:-1, :] = all_rms
h, w = padded_rms.shape[:2]
display_order = [[5, 4, 3], [6, 8, 2], [7, 0, 1]]
grid = padded_rms[..., display_order] # H x W x 3 x 3
grid = np.rollaxis(grid, 2, 0) # 3 x H x W x 3
grid = np.rollaxis(grid, 3, 2) # 3 x H x 3 x W
return grid.reshape(3 * h, 3 * w)
def setup_spines(ax, lw=1.5):
for sp in ax.spines.values():
sp.set_linewidth(lw)
sp.set_color("#333333")
def imshow(ax, image):
ax.imshow(image, origin="lower", interpolation="none")
ax.set_axis_off()
def prop_hist(ax, data, bins, polar=False, **kwargs):
hist = np.histogram(data, bins=bins)[0]
hist = hist / len(data)
# For polar plots we want the *area* to be determined
# by the proportion, not the radius
if polar:
hist = np.sqrt(hist)
w = (bins.max() - bins.min()) / (len(bins) - 1)
ax.bar(bins[:-1] + w/2, hist, width=w, **kwargs)
def octant_ratemap_to_rgb(rms, per_octant, min_perc=0, max_perc=100,
cmap="jet"):
"""Produce an rgb image from per-octant ratemaps.
Normally we would let plt.imshow normalize values automatically when
it applies the colormap, but since we can have nans and we may want to
make a grid of octants (where the values between the octants would be
meaninglessly color-mapped) we have to convert the ratemap to rgb color
before we do imshow, so we normalize it manually.
Args:
rms: a 2-tuple with (H x W, H x W x 8) global and per-octant ratemaps.
per_octant: (bool) If True, we'll return a per-octant rgb grid, otherwise
just the normalized RGB global ratemap.
min_perc: (float, default 0) Percentile of the range of ratemap values
to use as the bottom of the color scale.
max_perc: (float, default 100) Percentile of the range of ratemap values
to use as the top of the color scale.
cmap: (str) Name of the colormap to use.
Returns:
A single RGB image with the ratemap or the 9 per-octant + global grid.
"""
data = np.ravel(rms[0])
if per_octant:
data = np.concatenate((data, np.ravel(rms[1])))
lims = np.nanpercentile(data, [min_perc, max_perc])
center_rms = (rms[0] - lims[0]) / (lims[1] - lims[0] + 1e-16)
the_rims = None
if per_octant:
side_rms = (rms[1] - lims[0]) / (lims[1] - lims[0] + 1e-16)
center_rms = octant_grid(center_rms + 1, side_rms + 1)
the_rims = center_rms == 0
center_rms -= 1
center_rms[the_rims] = 0
rms = center_rms
nans = np.isnan(rms)
rms[nans] = lims[0]
rms = matplotlib.cm.get_cmap(cmap)(rms)[..., :3] # to rgb, discard alpha
rms[nans] = 1.0
if the_rims is not None:
rms[the_rims] = 1.0
return rms
class SuperFigure(object):
"""Plotting figures with lettered subpanels."""
def __init__(self, width_inches, height_inches, dpi=200, **kwargs):
self._dpi = dpi
self._wi = width_inches
self._hi = height_inches
kwargs["figsize"] = (width_inches * dpi / 72, height_inches * dpi / 72)
kwargs["dpi"] = 72
self._fig, self._allax = plt.subplots(1, 1, **kwargs)
self._allax.set_axis_off()
self.label_letter = "A"
self.label_suffix = ")"
def next_label(self):
curr = self.label_letter
self.label_letter = chr(ord(self.label_letter) + 1)
return curr + self.label_suffix
def add_subplots_to_figure(self, n_rows, n_cols, pos_x, pos_y, width, height,
hspace=0.1, vspace=0.1, text=None, text_offset=0.0,
**subplot_kw):
"""Add a grid of subplots.
Args:
n_rows: Number of rows of subplots to add.
n_cols: Number of columns of subplots to add.
pos_x: x position in the figure of the top-left subplot.
pos_y: y position in the figure of the top-left subplot.
width: width of each subplot.
height: height of each subplot.
hspace: horizontal space between subplot columns.
vspace: vertical space between subplot rows.
text: text next to the top-left subplot for referring to the panel.
text_offset: x offset of the text for fine-tuning the look.
**subplot_kw: dictionary of extra parameters in subplot creation.
Returns:
An array of matplotlib axes.
"""
try:
text_offset = np.asarray(text_offset).reshape([2])
except ValueError:
text_offset = np.asarray([text_offset, 0]).reshape([2])
if text is not None:
self.add_text(pos_x + text_offset[0], pos_y + text_offset[1], text,
fontsize=9)
size_inches = self._fig.get_size_inches() / (self._dpi / 72.0)
# Inches to relative
pos_x /= size_inches[0]
pos_y /= size_inches[1]
width /= size_inches[0]
height /= size_inches[1]
hspace /= size_inches[0]
vspace /= size_inches[1]
# Use distance in inches from the top, not from the bottom
pos_y = 1 - (pos_y + height * n_rows + vspace * (n_rows - 1))
axs = np.empty((n_rows, n_cols), dtype=object)
for row in range(n_rows):
for col in range(n_cols):
axs[row, col] = self._fig.add_axes((pos_x + col * (hspace + width),
pos_y - row * (vspace + height),
width, height), **subplot_kw)
return axs
def add_text(self, pos_x, pos_y, text, fontsize=9):
if text == "auto":
text = self.next_label()
size_inches = self._fig.get_size_inches() / (self._dpi / 72.0)
pos_x /= size_inches[0]
pos_y /= size_inches[1]
pos_y = 1 - pos_y
self._fig.text(
pos_x,
pos_y,
text,
horizontalalignment="right",
verticalalignment="bottom",
fontsize=fontsize * self._dpi / 72)
def show(self):
return self._fig
| spatial_memory_pipeline-main | src/plot_utils.py |
# Copyright 2022 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Classify units according to measureas used for biological cells."""
import numpy as np
from spatial_memory_pipeline import scores
def _make_intervals(x):
"""Given [0, 1, 2], return [-0.5, 0.5, 1.5, 2.5]."""
d = np.diff(x)
return np.concatenate([
[x[0] - d[0] / 2.0], # first interval's left bound
x[:-1] + d / 2.0, # interval separations
[x[-1] + d[-1] / 2.0] # last interval's right bound
])
def _rotate_distances(distances, hd):
"""Rotate allocentric distances to egocentric according to head direction.
Args:
distances: Array of shape [B, n_angles].`n_angles` the number of angles in
the rat's distance sensor. The values are the distances to the closest
wall for each world (allocentric) angle. The angles they correspond to are
supposed to be equispaced at 2 * pi / `n_angles` rad of each other.
hd: Array of shape [B]. For each distance, the corresponding allocentric
head direction, in radians.
Returns:
Array of same shape as `distances`, each row shifted (rotated) to make
the distances egocentric.
Raises:
ValueError: The dimensions of `distances` and/or `hd` are not as expected.
"""
hd = hd.squeeze() # it comes with trailing dimension typically
if hd.ndim != 1:
raise ValueError("Head direction should be a vector.")
if distances.ndim != 2:
raise ValueError("Distances should be a 2-D array.")
if hd.shape[0] != distances.shape[0]:
raise ValueError("Head direction should have same batch size as distances.")
n_angles = distances.shape[1]
shifts = np.mod(-np.round(n_angles * hd / (2 * np.pi)).astype(np.int),
n_angles)
r_idx, c_idx = np.ogrid[:distances.shape[0], :distances.shape[1]]
c_idx = c_idx - shifts[:, None]
return distances[r_idx, c_idx]
def bvc_rms_egocentric(activations, world_distances, hd, distances, **kwargs):
ego_distances = _rotate_distances(world_distances, hd)
return bvc_rms(activations, ego_distances, distances, **kwargs)
def bvc_rms_allocentric(activations, world_distances, unused_hd, distances,
**kwargs):
return bvc_rms(activations, world_distances, distances, **kwargs)
def bvc_rms(activations, world_distances, distances, for_plotting=False):
"""Compute boundary vector cell (BVC) ratemaps.
Computes the mean activation binned by distance and (allocentric)
angle to the nearest wall. The angular binning is the same as in the
`world_distances` matrix. The distance binning is the one in distances.
Args:
activations: Array of shape [B, n_units]. `B` is the batch size, `n_units`
the size of an the RNN layer we want to compute score for.
world_distances: Array of shape [B, n_angles]. `B` is the same batch size as
activations, `n_angles` the number of angles in the rat's distance sensor.
The values are the distances to the closest wall for each world
(allocentric) angle.
distances: Distance binning edges.
for_plotting: Boolean that indicates whether to have a final pi angle as it
makes the radar plots look better.
Returns:
A 3-tuple of:
distances: vector of bin centers for BVC distances.
angles: vector of bin centers for BVC angles.
correlations: a [n_distances, n_angles, n_units] matrix
with the mean activations for each distance and angle bin.
"""
n_angles = world_distances.shape[1]
if for_plotting:
angles = np.linspace(-np.pi, np.pi, n_angles + 1)
else:
angles = np.linspace(-np.pi, np.pi, n_angles + 1)[:-1]
# Create bins of distances instead of real-values
distance_edges = _make_intervals(distances)
distance_bins = np.digitize(world_distances, distance_edges)
bin_counts = np.apply_along_axis(
np.bincount,
0,
distance_bins,
weights=None,
minlength=len(distance_edges) + 1)
n_units = activations.shape[1]
rms = []
for unit_idx in range(n_units):
rms.append(
np.apply_along_axis(
np.bincount,
0,
distance_bins,
weights=activations[:, unit_idx],
minlength=len(distance_edges) + 1) / (bin_counts + 1e-12))
return distances, angles, np.dstack(rms)[1:-1, ...]
def get_resvec_shuffled_bins_threshold(ang_rmaps,
percentile=99,
n_shuffles=1000):
"""Get the percentile of resvec length after shuffling bins.
Args:
ang_rmaps: angular ratemaps.
percentile: percentile of resvec lengths to return.
n_shuffles: number of times to shuffle each unit.
Returns:
The resultant vector length threshold.
"""
assert ang_rmaps.ndim == 2
nbins, n_units = ang_rmaps.shape
rv_scorer = scores.ResultantVectorHeadDirectionScorer(nbins)
shuffle_rv_lens = np.asarray([
rv_scorer.resultant_vector_score(
np.random.permutation(ang_rmaps[:, i % n_units]))
for i in range(n_units * n_shuffles)
]).flatten()
return np.percentile(shuffle_rv_lens, percentile)
def get_hd_shuffled_bins_threshold(activations,
hds,
n_bins=20,
percentile=99,
n_shuffles=1000):
"""Calculate the resvec length threshold for HD cells.
Args:
activations: array of cells activations.
hds: head direction of the agent.
n_bins: number of head-direction bins to use.
percentile: percentile of shuffled resvec lengths to return.
n_shuffles: number of bin shuffles to use.
Returns:
The resultant vector length threshold.
"""
rv_scorer = scores.ResultantVectorHeadDirectionScorer(n_bins)
n_units = activations.shape[-1]
ang_rmaps = np.asarray([
rv_scorer.calculate_hd_ratemap(hds[:, 0], activations[:, i])
for i in range(n_units)
]).T
return get_resvec_shuffled_bins_threshold(
ang_rmaps, percentile=percentile, n_shuffles=n_shuffles)
def get_hd_cell_parameters(acts, hds, threshold, n_bins=20):
"""Calculate which cells are head-direction and their preferred angle.
Args:
acts: array of cells activations.
hds: head direction of the agent.
threshold: resvec length at which a unit is considered head-direciton cell.
n_bins: number of head-direction bins to use.
Returns:
Array of bools indicating which cells are HD cells, Array of resvec lengths,
and Array of preferred head-direction angles.
"""
rv_scorer = scores.ResultantVectorHeadDirectionScorer(n_bins)
n_units = acts.shape[-1]
ang_rmaps = [rv_scorer.calculate_hd_ratemap(hds[:, 0], acts[:, i])
for i in range(n_units)]
rv_lens = np.asarray([rv_scorer.resultant_vector_score(ang_rmaps[i])
for i in range(n_units)]).flatten()
rv_angles = np.asarray([rv_scorer.resultant_vector_angle(ang_rmaps[i])
for i in range(n_units)]).flatten()
return rv_lens > threshold, rv_lens, rv_angles
def get_bvc_cell_parameters(rms_dists,
rms_angs,
rms_vals,
threshold,
unused_distance_step=2.5):
"""Calculate which cells are bvc and their preferred distance and angle.
Args:
rms_dists: Distances in the rms data.
rms_angs: Angles in the rms data.
rms_vals: Array of activity of shape [n_distances, n_angles, n_units].
threshold: resultant vector length threshold to consider a cell bvc.
unused_distance_step: unused parameter.
Returns:
Array of bools indicating which cells are BVC cells, Array of preferred
angles, Array of preferred distances, and array of resvec lengths.
"""
_, n_angles, n_units = rms_vals.shape
l = rms_vals.sum(axis=0)
y = np.sum(np.sin(rms_angs)[:, np.newaxis] * l, axis=0)
x = np.sum(np.cos(rms_angs)[:, np.newaxis] * l, axis=0)
# In order to make this consistent with the ResVec scorer used for the HD
# cells, use arctan(x, y).
# This makes the normal angle be pi/2 - angle returned by arctan
bvc_angles = np.arctan2(x, y)
bvc_orig_angles = np.arctan2(y, x)
bvcbins = (
np.digitize(bvc_orig_angles, np.concatenate([rms_angs, [np.pi]])) - 1)
bvc_dists = rms_dists[np.argmax(
rms_vals[:, bvcbins, range(len(bvcbins))], axis=0)]
rv_scorer = scores.ResultantVectorHeadDirectionScorer(n_angles)
rvls = [rv_scorer.resultant_vector_score(rms_vals[..., i].sum(axis=0))[0]
for i in range(n_units)]
rvls = np.asarray(rvls)
is_bvc = (rvls > threshold)
return is_bvc, bvc_angles, bvc_dists, rvls
def classify_units_into_representations(activities, hds, pos, wall_distances,
n_shuffles=1000,
distance_bin_size=0.025,
percentile=99):
"""Identify which cells are HD, egoBVC or alloBVC (non-exclusive).
Args:
activities: array of cells activations.
hds: array head-direction of the agent.
pos: array of position of the agent in the enclosure.
wall_distances: array of distances to walls at each time-step.
n_shuffles: number of shuffles used in calculating the thresholds.
distance_bin_size: size of distance bins.
percentile: percentile of score in the shuffled data to use as threshold.
Returns:
Tuple of
(whether each cell is a head direction cell,
head-direction resvec length threshold,
hd score of each cell,
head-direction resultant-vector length of each cell,
preferred head direction of each cell,
whether each cell is an egocentric boundary cell,
ego-bvc resvec threshold,
ego-bcv score for each cell,
preferred egocentric boundary distance to wall of each cell,
preferred egocentric boundary angle to wall of each cell,
whether each cell is an allocentric boundary cell,
ego-bvc resvec threshold,
ego-bcv score for each cell,
preferred allocentric boundary distance to wall of each cell,
preferred allocentric boundary angle to wall of each cell)
"""
# Calculate preferred wall distances up to half of the enclosure size
max_d = (pos[:, 0].max() - pos[:, 0].min()) / 2
distances = np.arange(distance_bin_size, max_d, distance_bin_size)
# Identify head-direction cells
hd_threshold = get_hd_shuffled_bins_threshold(
activities, hds, percentile=percentile, n_shuffles=n_shuffles)
is_hd, hd_rv_lens, hd_rv_angles = get_hd_cell_parameters(
activities, hds, hd_threshold, n_bins=20)
hd_score = hd_rv_lens
# Identify egocentric-boundary cells
erms_dists, erms_angs, erms_vals = bvc_rms_egocentric(
activities, wall_distances, hds, distances)
ego_threshold = get_resvec_shuffled_bins_threshold(
erms_vals.sum(axis=0), percentile=percentile, n_shuffles=n_shuffles)
is_ego, ego_angle, ego_dist, ego_score = get_bvc_cell_parameters(
erms_dists, erms_angs, erms_vals, ego_threshold)
# Identify allocentric boundary cells
arms_dists, arms_angs, arms_vals = bvc_rms_allocentric(
activities, wall_distances, hds, distances)
allo_threshold = get_resvec_shuffled_bins_threshold(
arms_vals.sum(axis=0), percentile=percentile, n_shuffles=n_shuffles)
is_allo, allo_angle, allo_dist, allo_score = get_bvc_cell_parameters(
arms_dists, arms_angs, arms_vals, allo_threshold)
return (is_hd, hd_threshold, hd_score, hd_rv_lens, hd_rv_angles,
is_ego, ego_threshold, ego_score, ego_dist, ego_angle,
is_allo, allo_threshold, allo_score, allo_dist, allo_angle)
| spatial_memory_pipeline-main | src/classify_units.py |
# Copyright 2022 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Cell-type score calculations."""
import matplotlib.pyplot as plt
import numpy as np
import scipy.signal
import scipy.spatial
import scipy.stats
def positional_correlations_manipulation(pos, hds, acts, pos_b, hds_b, acts_b):
"""Positional correlation between two datasets."""
pos_corr = list()
for unit in range(acts.shape[1]):
ratemap_a = calculate_ratemap(
pos[:, 0], pos[:, 1], acts[:, unit],
hd=hds[:])[0][0].reshape([1, -1])
ratemap_b = calculate_ratemap(
pos_b[:, 0], pos_b[:, 1], acts_b[:, unit],
hd=hds_b[:])[0][0].reshape([1, -1])
not_nan = np.logical_and(np.isfinite(ratemap_a), np.isfinite(ratemap_b))
cc = np.corrcoef(ratemap_a[not_nan], ratemap_b[not_nan])[0, 1]
pos_corr.append(cc)
return np.asarray(pos_corr)
def positional_correlations(pos, hds, acts):
"""Spatial stability as the correlation between two halves of data."""
half = pos.shape[0] // 2
return positional_correlations_manipulation(pos[:half, ...],
hds[:half],
acts[:half, ...],
pos[half:, ...],
hds[half:],
acts[half:, ...])
def calculate_ratemap(xs,
ys,
activations,
bin_size=0.14, # to divide 2.2 meters into 16 bins
coords_range=None,
statistic="mean",
hd=None):
"""Calculate 2D arrays of activations binned by spatial location.
Args:
xs: vector of length B with the x position of the agent.
ys: vector of length B with the y position of the agent.
activations: vector of length B with the activation of one cell at each
position.
bin_size: In distance units, for binning across both dimensions.
coords_range: A pair of tuples ((xmin, xmax), (ymin, ymax)). If not given,
the `xs` and `ys` ranges are used.
statistic: What to compute over activations in each bin.
hd: Optional. The head directions corresponding to each position.
If given, not one but 9 ratemaps will be computed: the general
one and one for each head-direction octant.
Returns:
A 3-tuple:
The first element is the 2d ratemap if `hd` is None. If `hd`
is given, it will be a 2-tuple, where the first one is the
normal ratemap and second is a H x W x 8 per-octant ratemap, octants
being [-pi/8, pi/8], [pi/8, 3pi/8], [3pi/8, 5pi/8], [5pi/8, 7pi/8],
[-7pi/8, -7pi/8], [-7pi/8, -5pi/8], [-5pi/8, -3pi/8], and
[-3pi/8, -pi/8].
The second and third elements are the x and y edges used for binning.
"""
def _get_edges(min_v, max_v):
v_range = max_v - min_v
n_bins = np.ceil(v_range / bin_size).astype(int)
pad = (n_bins * bin_size - v_range) / 2
return np.linspace(min_v - pad / 2, max_v + pad / 2, n_bins + 1)
if coords_range is None:
coords_range = ((xs.min(), xs.max()), (ys.min(), ys.max()))
x_edges = _get_edges(coords_range[0][0], coords_range[0][1])
y_edges = _get_edges(coords_range[1][0], coords_range[1][1])
rms = scipy.stats.binned_statistic_2d(
xs, ys, activations, bins=(x_edges, y_edges), statistic=statistic)[0]
if hd is not None:
octants = np.mod(np.round(np.squeeze(hd) / (np.pi / 4)), 8)
octant_edges = np.linspace(-0.5, 7.5, 9)
octant_rms = scipy.stats.binned_statistic_dd(
(xs, ys, octants), activations,
bins=(x_edges, y_edges, octant_edges),
statistic=statistic)[0]
rms = (rms, octant_rms)
return rms, x_edges, y_edges
class ResultantVectorHeadDirectionScorer(object):
"""Class for scoring correlation between activation and head-direction."""
def __init__(self, nbins):
"""Constructor.
Args:
nbins: Number of bins for the angle.
"""
self._bins = np.linspace(-np.pi, np.pi, nbins + 1)
self._bin_centers = 0.5 * self._bins[:-1] + 0.5 * self._bins[1:]
self._bin_vectors = np.asarray(
[np.cos(self._bin_centers),
np.sin(self._bin_centers)]).T # n_bins x 2
def calculate_hd_ratemap(self, hds, activations, statistic="mean"):
hds = np.arctan2(np.sin(hds), np.cos(hds))
total_bin_act = scipy.stats.binned_statistic(
hds,
activations,
bins=self._bins,
statistic=statistic,
range=(-np.pi, np.pi))[0]
return total_bin_act
def resultant_vector(self, ratemaps):
if ratemaps.ndim == 1:
ratemaps = ratemaps[np.newaxis, :]
resv = (np.matmul(ratemaps, self._bin_vectors) /
(np.sum(ratemaps, axis=1)[:, np.newaxis] + 1e-5))
return resv
def resultant_vector_score(self, ratemaps):
resv = self.resultant_vector(ratemaps)
return np.hypot(resv[:, 0], resv[:, 1])
def resultant_vector_angle(self, ratemaps):
resv = self.resultant_vector(ratemaps)
return np.arctan2(resv[:, 0], resv[:, 1])
def plot_polarplot(self,
ratemaps,
ax=None,
title=None,
relative=False,
positive_lobe_color="b",
negative_lobe_color="r",
**kwargs):
"""ratemaps: list of ratemaps."""
if isinstance(ratemaps, list):
ratemaps = np.asarray(ratemaps, dtype=np.float32)
if ax is None:
ax = plt.gca()
# Plot the ratemap
for i in range(ratemaps.shape[0]):
ang = np.append(self._bin_centers, [self._bin_centers[0]], axis=0)
pos_rm = ratemaps[i] * (ratemaps[i] > 0)
pos_lens = np.append(pos_rm, [pos_rm[0]], axis=0)
neg_rm = -ratemaps[i] * (ratemaps[i] < 0)
neg_lens = np.append(neg_rm, [neg_rm[0]], axis=0)
if relative:
pos_lens /= pos_lens.sum()
neg_lens /= neg_lens.sum()
# Our ratemap convention is first coordinate positive up, second
# coordinate positive right. To have the same orientation in polar
# plots we do pi/2 - angle
ax.plot(np.pi / 2 - ang, pos_lens, color=positive_lobe_color, **kwargs)
ax.plot(np.pi / 2 - ang, neg_lens, color=negative_lobe_color, **kwargs)
if title is not None:
ax.set_title(title)
| spatial_memory_pipeline-main | src/scores.py |
# Copyright 2018 DeepMind Technologies Limited and Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Setup for pip package."""
import unittest
from setuptools import find_packages
from setuptools import setup
REQUIRED_PACKAGES = [
'absl-py', 'kfac>=0.2.3', 'numpy<1.19.0', 'pandas', 'pyscf', 'pyblock',
'dm-sonnet<2.0', 'tables', 'tensorflow>=1.14,<2.0',
'tensorflow_probability==0.8'
]
EXTRA_PACKAGES = {
'tensorflow-gpu': ['tensorflow-gpu>=1.14,<2.0'],
}
def ferminet_test_suite():
test_loader = unittest.TestLoader()
test_suite = test_loader.discover('ferminet/tests', pattern='*_test.py')
return test_suite
setup(
name='ferminet',
version='0.1',
description='A library to train networks to represent ground state wavefunctions of fermionic systems',
url='https://github.com/deepmind/ferminet',
author='DeepMind',
author_email='[email protected]',
# Contained modules and scripts.
scripts=['bin/ferminet'],
packages=find_packages(),
install_requires=REQUIRED_PACKAGES,
extras_require=EXTRA_PACKAGES,
platforms=['any'],
license='Apache 2.0',
test_suite='setup.ferminet_test_suite',
)
| ferminet-master | setup.py |
# Copyright 2021 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
| ferminet-master | ferminet/__init__.py |
# Lint as: python3
# Copyright 2019 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Extension to KFAC that adds correction term for non-normalized densities."""
import kfac
ip_p = kfac.utils.ip_p
sum_p = kfac.utils.sum_p
sprod_p = kfac.utils.sprod_p
class MeanCorrectedKfacOpt(kfac.PeriodicInvCovUpdateKfacOpt):
"""Like KFAC, but with all moments mean-centered."""
def __init__(self, *args, **kwargs):
"""batch_size is mandatory, estimation_mode='exact' or 'exact_GGN'."""
kwargs["compute_params_stats"] = True
if not kwargs["estimation_mode"].startswith("exact"): # pytype: disable=attribute-error
raise ValueError("This is probably wrong.")
super(MeanCorrectedKfacOpt, self).__init__(*args, **kwargs)
def _multiply_preconditioner(self, vecs_and_vars):
# Create a w list with the same order as vecs_and_vars
w_map = {var: ps for (ps, var) in zip(self.params_stats,
self.registered_variables)}
w = tuple((w_map[var], var) for (_, var) in vecs_and_vars)
r = super(MeanCorrectedKfacOpt, self)._multiply_preconditioner(
vecs_and_vars)
u = super(MeanCorrectedKfacOpt, self)._multiply_preconditioner(w)
scalar = ip_p(w, r) / (1. + ip_p(w, w))
return sum_p(r, sprod_p(scalar, u))
| ferminet-master | ferminet/mean_corrected_kfac_opt.py |
# Lint as: python3
# Copyright 2018 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Network building for quantum Monte Carlo calculations."""
# Using networks with MCMC and optimisation
#
# We create different instances of the same model, all using the same parameters
# via Sonnet's variable sharing: one to run pre-training, one to sample electron
# configurations using MCMC and one for use with the optimiser. This requires
# care when using K-FAC: normally one would only have multiple registrations of
# network instances if using multi-tower mode, which we aren't.
# K-FAC detects variable re-use from the variable scope, which gets set to true
# by Sonnet when we create additional instances of the model, in layer
# registration, and hence results in errors because we're not using multi-tower.
# Thus we override the defaults by explicitly setting reuse=False in all layer
# registrion calls.
# pylint: disable=g-long-lambda
import functools
import typing
from ferminet import hamiltonian
import sonnet as snt
import tensorflow.compat.v1 as tf
def extend(x, y):
"""Add extra dimensions so x can be broadcast with y."""
if isinstance(x, float):
return x
rank = y.shape.rank
rank_ = x.shape.rank
assert rank_ <= rank
for _ in range(rank - rank_):
x = tf.expand_dims(x, -1)
return x
def flatten(seq):
"""Recursively flattens a sequence."""
lst = []
for el in seq:
if isinstance(el, (list, tuple)):
lst.extend(flatten(el))
else:
lst.append(el)
return lst
@tf.custom_gradient
def sign_det(x):
"""Sign of determinant op whose gradient doesn't crash when the sign is 0."""
with tf.name_scope('sign_det'):
sign = tf.linalg.slogdet(x)[0]
def grad(_):
return tf.zeros_like(x)
return sign, grad
@tf.custom_gradient
def determinant(x):
"""Determinant op with well-defined gradient for singular matrices."""
with tf.name_scope('determinant'):
with tf.device('/cpu:0'):
s, u, v = tf.linalg.svd(x)
sign = sign_det(x)
def grad(dx):
ss = tf.tile(s[..., None], len(s.shape) * [1] + [s.shape[-1]])
ss = tf.linalg.set_diag(ss, tf.ones_like(s))
z = tf.reduce_prod(ss, axis=-2)
adj = tf.matmul(u, tf.matmul(tf.linalg.diag(z), v, transpose_b=True))
signdx = tf.expand_dims(tf.expand_dims(sign * dx, -1), -1)
return signdx * adj
return sign * tf.reduce_prod(s, axis=-1), grad
@tf.custom_gradient
def log_determinant(x):
"""Log-determinant op with well-defined gradient for singular matrices."""
with tf.name_scope('determinant'):
with tf.device('/cpu:0'):
s, u, v = tf.linalg.svd(x)
sign = sign_det(x)
def grad(dsign, dlog):
del dsign
# d ln|det(X)|/dX = transpose(X^{-1}). This uses pseudo-inverse via SVD
# and so is not numerically stable...
adj = tf.matmul(u,
tf.matmul(tf.linalg.diag(1.0 / s), v, transpose_b=True))
return tf.expand_dims(tf.expand_dims(dlog, -1), -1) * adj
return (sign, tf.reduce_sum(tf.log(s), axis=-1)), grad
def cofactor(u, s, v, shift=0.0):
r"""Calculates the cofactor matrix from singular value decomposition.
Given M = U S V*, where * indicates the conjugate transpose, the cofactor
of M, M^C = det(A) A^{-T}, is given by M^C = det(U) det(V) U \Gamma V^T,
where \Gamma_{ij} = \delta_{ij} \prod_{k!=i} s_k, s_k is the k-th singular
value of M. The latter equation for M^C is well behaved even for singular
matrices.
Note:
det(U), det(V) \in {-1, +1} are calculated as part of the determinant
evaluation and so not repeated here.
Args:
u: unitary matrix, U, from SVD.
s: singular values, S, from SVD.
v: unitary matrix, V, from SVD.
shift (optional): constant to subtract in log domain for stability.
Returns:
cofactor matrix up to the sign given by det(U) det(V), i.e.
M^C / (det(U) det(V)) = U \Gamma V^T, divided by exp(shift).
"""
return tf.matmul(
u, tf.matmul(tf.linalg.diag(gamma(s, shift)), v, transpose_b=True))
def gamma(s, shift=0.0):
r"""Calculates diagonal of \Gamma_{ij} = \delta_{ij} \Prod_{k!=i} s_k.
Args:
s: singular values of matrix.
shift (optional): constant to subtract in log domain for stability.
Returns:
Diagonal of \Gamma over the outer dimension of s, divided by exp(shift).
"""
ls = tf.log(s)
lower = tf.cumsum(ls, axis=-1, exclusive=True)
upper = tf.cumsum(ls, axis=-1, exclusive=True, reverse=True)
return tf.exp(lower + upper - shift)
def rho(s, shift=0.0):
r"""Calculates \rho_ij = \prod_{k!=i,j} s_k, \rho_ii = 0.
Args:
s: singular values of matrix.
shift (optional): constant to subtract in log domain for stability.
Returns:
\rho, the first derivative of \Gamma (see `gamma`), divided by exp(shift).
"""
# Product of singular elements before and after two indices, i, j,
# i.e. v_{ij} = \Prod_{k<i,l>j} s_i s_j -- outer product of forward and
# reverse cumulative products. Valid only for j>i.
# Computed in log domain for numerical stability.
ls = tf.log(s)
lower_cumsum = tf.cumsum(ls, axis=-1, exclusive=True)
upper_cumsum = tf.cumsum(ls, axis=-1, reverse=True, exclusive=True)
v = tf.expand_dims(lower_cumsum, -1) + tf.expand_dims(upper_cumsum, -2)
# Product of singular elements between two indices, \Prod_{i<k<l} s_k.
# Create matrix with strict upper diagonal set to s_k and ones elsewhere and
# take cumulative product.
# Also computed in log domain for numerical stability
s_mat = tf.linalg.transpose(
tf.tile(ls[..., None], [1] * len(s.shape) + [s.shape[-1]]))
triu_s_mat = tf.linalg.set_diag(
tf.matrix_band_part(s_mat, 0, -1), tf.zeros_like(s))
z = tf.cumsum(triu_s_mat, exclusive=True, axis=-1)
# Discard lower triangle.
r = tf.exp(z + v - extend(shift, z))
r = tf.matrix_band_part(r, 0, -1)
r = tf.linalg.set_diag(r, tf.zeros_like(s))
return r + tf.linalg.transpose(r)
def grad_cofactor(u, v, r, grad_dy):
r"""Calculate the first derivative of the cofactor of a matrix M.
Given M = U S V*, the cofactor of M, M^C, and \Gamma as defined in `cofactor`,
d(M^C) = U (Tr(K \Gamma) S^{-1} - S^{-1} K \Gamma) V^T, where K = V^T dY^T U,
and dY is the backwards sensitivities. Whilst S^{-1} can be singular, such
terms explicitly cancel, giving for the term in parantheses:
( ... ) = \sum_{k!=i} K_{kk} r_{ik} for i = j
= - K_{ik} r_{ik} otherwise.
where r is calculated by `rho`.
Note:
As in `cofactor`, the factor det(U) det(V) is not included.
Args:
u: unitary matrix U from SVD.
v: unitary matrix V from SVD.
r: matrix r_{ij} = \prod_{k!=i,j} s_k, where s_k is the k-th singular value.
See `rho`.
grad_dy: backwards sensitivites.
Returns:
first derivative of the cofactor matrix, up to the sign det(U) det(V).
"""
with tf.name_scope('grad_cofactor'):
k = tf.matmul(v, tf.matmul(grad_dy, u, transpose_a=True), transpose_a=True)
# r_ii = 0 for convenience. Calculate off-diagonal terms.
kr = -tf.multiply(k, r)
# NB: r is symmetric and r_ii = 0.
kr_diag = tf.matmul(r, tf.expand_dims(tf.linalg.diag_part(k), -1))
kr = tf.linalg.set_diag(kr, tf.squeeze(kr_diag))
return tf.matmul(u, tf.matmul(kr, v, transpose_b=True))
@tf.custom_gradient
def logdet_matmul(x1, x2, w):
"""Numerically stable implementation of log(abs(sum_i w_i |x1_i| * |x2_i|)).
Args:
x1: Tensor of shape [batch size, dets in, n, n].
x2: Tensor of shape [batch size, dets in, m, m].
w: Weight matrix of shape [dets in, dets out].
Returns:
Op that computes matmul(det(x1) * det(x2), w). For numerical stability,
returns this op in the form of a tuple, the first of which is the log
of the absolute value of the output, and the second of which is the sign of
the output.
Raises:
ValueError if x1 and x2 do not have the same shape except the last two
dimensions, or if the number of columns in det(x1) does not match the number
of rows in w.
"""
if x1.shape[:-2] != x2.shape[:-2]:
raise ValueError('x1 and x2 must have the same number of determinants.')
if w.shape[0] != x1.shape[-3]:
raise ValueError('Number of determinants should equal '
'number of inputs to weights.')
with tf.name_scope('logdet_matmul'):
with tf.device('/cpu:0'):
if x1.shape[-1] > 1:
s1, u1, v1 = tf.linalg.svd(x1)
if x2.shape[-1] > 1:
s2, u2, v2 = tf.linalg.svd(x2)
# Computing the sign of the determinant u and v instead of x means this
# works even with singular matrices.
if x1.shape[-1] > 1:
sign1 = sign_det(u1) * sign_det(v1)
logdet1 = tf.reduce_sum(tf.log(s1), axis=-1)
else:
sign1 = tf.sign(x1[..., 0, 0])
logdet1 = tf.log(tf.abs(x1[..., 0, 0]))
if x2.shape[-1] > 1:
sign2 = sign_det(u2) * sign_det(v2)
logdet2 = tf.reduce_sum(tf.log(s2), axis=-1)
else:
sign2 = tf.sign(x2[..., 0, 0])
logdet2 = tf.log(tf.abs(x2[..., 0, 0]))
sign = sign1 * sign2
logdet = logdet1 + logdet2
logdet_max1 = tf.reduce_max(logdet1, axis=-1, keepdims=True)
logdet_max2 = tf.reduce_max(logdet2, axis=-1, keepdims=True)
det = tf.exp(logdet - logdet_max1 - logdet_max2) * sign
output = tf.matmul(det, w)
sign_out = tf.sign(output)
log_out = tf.log(tf.abs(output)) + logdet_max1 + logdet_max2
@tf.custom_gradient
def logdet_matmul_grad(x1, x2, w, grad_log):
"""Numerically stable gradient of log(abs(sum_i w_i |x1_i| * |x2_i|)).
Args:
x1: Tensor of shape [batch size, dets in, n, n].
x2: Tensor of shape [batch size, dets in, m, m].
w: Weight matrix of shape [dets in, dets out].
grad_log: Tensor of shape [batch_size, dets_out] by which to
left-multiply the Jacobian (aka the reverse sensitivity).
Returns:
Ops that compute the gradient of log(abs(matmul(det(x1) * det(x2), w))).
The ops give the gradients with respect to the inputs x1, x2, w.
"""
# This is missing a factor of exp(-logdet_max1-logdet_max2), which is
# instead picked up by including it in the terms adj1*det2 and adj2*det1
glog_out = grad_log / output
dout = tf.matmul(glog_out, w, transpose_b=True)
if x1.shape[-1] > 1:
adj1 = cofactor(u1, s1, v1, extend(logdet_max1, s1)) * sign1[..., None,
None]
else:
adj1 = tf.ones_like(x1) * extend(tf.exp(-logdet_max1), x1)
if x2.shape[-1] > 1:
adj2 = cofactor(u2, s2, v2, extend(logdet_max2, s2)) * sign2[..., None,
None]
else:
adj2 = tf.ones_like(x2) * extend(tf.exp(-logdet_max2), x2)
det1 = tf.exp(logdet1 - logdet_max1) * sign1
det2 = tf.exp(logdet2 - logdet_max2) * sign2
dx1 = adj1 * (det2 * dout)[..., None, None]
dx2 = adj2 * (det1 * dout)[..., None, None]
dw = tf.matmul(det, glog_out, transpose_a=True)
def grad(grad_dx1, grad_dx2, grad_dw):
"""Stable gradient of gradient of log(abs(sum_i w_i |x1_i| * |x2_i|)).
Args:
grad_dx1: Tensor of shape [batch size, dets in, n, n].
grad_dx2: Tensor of shape [batch size, dets in, m, m].
grad_dw: Tensor of shape [dets in, dets out].
Returns:
Ops that compute the gradient of the gradient of
log(abs(matmul(det(x1) * det(x2), w))). The ops give the gradients
with respect to the outputs of the gradient op dx1, dx2, dw and the
reverse sensitivity grad_log.
"""
# Terms that appear repeatedly in different gradients.
det_grad_dw = tf.matmul(det, grad_dw)
# Missing a factor of exp(-2logdet_max1-2logdet_max2) which is included\
# via terms involving r1, r2, det1, det2, adj1, adj2
glog_out2 = grad_log / (output**2)
adj_dx1 = tf.reduce_sum(adj1 * grad_dx1, axis=[-1, -2])
adj_dx2 = tf.reduce_sum(adj2 * grad_dx2, axis=[-1, -2])
adj_dx1_det2 = det2 * adj_dx1
adj_dx2_det1 = det1 * adj_dx2
adj_dx = adj_dx2_det1 + adj_dx1_det2
if x1.shape[-1] > 1:
r1 = rho(s1, logdet_max1)
dadj1 = (grad_cofactor(u1, v1, r1, grad_dx1) * sign1[..., None, None])
else:
dadj1 = tf.zeros_like(x1)
if x2.shape[-1] > 1:
r2 = rho(s2, logdet_max2)
dadj2 = (grad_cofactor(u2, v2, r2, grad_dx2) * sign2[..., None, None])
else:
dadj2 = tf.zeros_like(x2)
# Computes gradients wrt x1 and x2.
ddout_w = (
tf.matmul(glog_out, grad_dw, transpose_b=True) -
tf.matmul(glog_out2 * det_grad_dw, w, transpose_b=True))
ddout_x = tf.matmul(
glog_out2 * tf.matmul(adj_dx, w), w, transpose_b=True)
grad_x1 = (
adj1 * (det2 *
(ddout_w - ddout_x) + adj_dx2 * dout)[..., None, None] +
dadj1 * (dout * det2)[..., None, None])
grad_x2 = (
adj2 * (det1 *
(ddout_w - ddout_x) + adj_dx1 * dout)[..., None, None] +
dadj2 * (dout * det1)[..., None, None])
adj_dx_w = tf.matmul(adj_dx, w)
# Computes gradient wrt w.
grad_w = (
tf.matmul(adj_dx, glog_out, transpose_a=True) - tf.matmul(
det, glog_out2 * (det_grad_dw + adj_dx_w), transpose_a=True))
# Computes gradient wrt grad_log.
grad_grad_log = (det_grad_dw + adj_dx_w) / output
return grad_x1, grad_x2, grad_w, grad_grad_log
return (dx1, dx2, dw), grad
def grad(grad_log, grad_sign):
"""Computes gradient wrt log(w*det(x1)*det(x2))."""
del grad_sign
return logdet_matmul_grad(x1, x2, w, grad_log)
return (log_out, sign_out), grad
class TensorMlp(snt.AbstractModule):
"""Regular MLP, but works with rank-2, -3, or -4 tensors as input."""
def __init__(self,
layers,
rank=0,
use_bias=True,
activate_final=True,
residual=False,
gain=1.0,
stddev=1.0,
name='tensor_mlp'):
"""Create an MLP with different input ranks.
Args:
layers: list of ints, with number of units in each layer
rank: Rank of the input, not counting batch or feature dimension. If 0,
creates a normal MLP. If 1, uses 1D convolutions with kernel size 1. If
2, uses 2D convolutions with 1x1 kernel.
use_bias: If false, leave bias out of linear layers.
activate_final: Boolean. If true, add nonlinearity to last layer.
residual: use residual connections in each layer.
gain: gain used in orthogonal initializer for weights in each linear
layer.
stddev: standard deviation used in random normal initializer for biases in
each linear layer.
name: String. Sonnet default.
Raises:
ValueError: if the rank is not 0, 1 or 2.
"""
super(TensorMlp, self).__init__(name=name)
initializers = {'w': tf.initializers.orthogonal(gain=gain)}
if use_bias:
initializers['b'] = tf.initializers.random_normal(stddev=stddev)
if rank == 0:
self._linear = lambda x: snt.Linear(
x, use_bias=use_bias, initializers=initializers)
elif rank == 1:
self._linear = lambda x: snt.Conv1D(
output_channels=x,
kernel_shape=1,
use_bias=use_bias,
initializers=initializers)
elif rank == 2:
self._linear = lambda x: snt.Conv2D(
output_channels=x,
kernel_shape=1,
use_bias=use_bias,
initializers=initializers)
else:
raise ValueError('Rank for TensorMlp must be 0, 1 or 2.')
self._rank = rank
self._nlayers = layers
self._use_bias = use_bias
self._activate_final = activate_final
self._use_residual = residual
def _build(self, inputs, layer_collection=None):
if self._nlayers:
x = inputs
self._layers = []
for i, h in enumerate(self._nlayers):
self._layers.append(self._linear(h))
y = self._layers[-1](x)
if layer_collection:
if self._use_bias:
params = (self._layers[-1].w, self._layers[-1].b)
else:
params = self._layers[-1].w
if self._rank == 0:
layer_collection.register_fully_connected(params, x, y, reuse=False)
elif self._rank == 1:
layer_collection.register_conv1d(
params, [1, 1, 1], 'SAME', x, y, reuse=False)
else:
layer_collection.register_conv2d(
params, [1, 1, 1, 1], 'SAME', x, y, reuse=False)
if i < len(self._nlayers) - 1 or self._activate_final:
y = tf.nn.tanh(y)
if self._use_residual and x.shape[-1].value == h:
residual = x
else:
residual = 0.0
x = y + residual
return x
return inputs # if layers is an empty list, just act as the identity
class ParallelNet(snt.AbstractModule):
"""A symmetric network with parallel streams for rank 1 and 2 features.
Rank 2 features propagate with parallel independent channels.
Rank 1 features are able to input from other channels and rank 2 stream.
Attributes:
shape: number of alpha and beta electrons.
layers: iterable containing integers pairs containing description of layers-
i.e tuples containing (num_rank_one_features, num_rank_two_features)
use_last_layer: bool. whether or not return rank-2 features at end of net.
residual: bool. whether or not to use residual connections.
name: for Sonnet namespacing.
"""
def __init__(self,
shape,
layers,
use_last_layer=True,
residual=False,
name='parallel_net'):
super(ParallelNet, self).__init__(name=name)
self._shape = shape # tuple of input shapes
self._n = sum(shape)
self._layers = layers
self._use_last_layer = use_last_layer
self._use_residual = residual
def _build(self, x1, x2, r_ee, layer_collection=None):
"""Construct the parallel network from rank 1 and rank 2 inputs.
Args:
x1: rank 1 input features. tensor of shape (batch, n_electrons,
n_rank_one_inputs)
x2: rank 2 input features. tensor of shape (batch, n_electrons,
n_electrons, n_rank_two_inputs)
r_ee: distances between electrons. tensor of shape (batch, n_electrons,
n_electrons)
layer_collection: KFAC layer collection or None.
Returns:
x1: rank 1 output features.
tensor of shape (batch, n_electrons, n_rank_one_outputs)
x2: rank 2 output features or None if use_last_layer is False.
if not None tensor is of shape (batch, n_electrons, n_rank_two_outputs)
"""
self._w1 = []
self._w2 = []
initializers = {
'w': tf.initializers.orthogonal(),
'b': tf.initializers.random_normal()
}
for i, (nc1, nc2) in enumerate(self._layers):
xs1 = tf.split(x1, self._shape, axis=1)
xs2 = tf.split(x2, self._shape, axis=1)
# mean field features
mf1 = [
tf.tile(
tf.reduce_mean(x, axis=1, keepdims=True), [1, x1.shape[1], 1])
for x in xs1
]
mf2 = [tf.reduce_mean(x, axis=1) for x in xs2]
y1 = tf.concat([x1] + mf1 + mf2, axis=2)
self._w1.append(
snt.Conv1D(
output_channels=nc1, kernel_shape=1, initializers=initializers))
out1 = self._w1[-1](y1)
if layer_collection:
layer_collection.register_conv1d((self._w1[-1].w, self._w1[-1].b),
[1, 1, 1],
'SAME',
y1,
out1,
reuse=False)
# Implements residual connection
if self._use_residual and x1.shape[-1].value == nc1:
res = x1
else:
res = 0.0
x1 = tf.nn.tanh(out1) + res
# If we don't need the last layer of rank-2 features, set nc2 to None
if i < len(self._layers) - 1 or self._use_last_layer:
self._w2.append(
snt.Conv2D(
output_channels=nc2, kernel_shape=1, initializers=initializers))
out2 = self._w2[-1](x2)
if layer_collection:
layer_collection.register_conv2d((self._w2[-1].w, self._w2[-1].b),
[1, 1, 1, 1],
'SAME',
x2,
out2,
reuse=False)
x2 = tf.nn.tanh(out2)
else:
x2 = None
return x1, x2
class EquivariantNet(snt.AbstractModule):
"""A symmetric network like ParallelNet, but without the rank 2 stream."""
def __init__(self, shape, layers, residual=False, name='parallel_net'):
super(EquivariantNet, self).__init__(name=name)
self._shape = shape # tuple of input shapes
self._n = sum(shape)
self._layers = layers
self._use_residual = residual
def _build(self, x1, layer_collection=None):
self._w1 = []
initializers = {
'w': tf.initializers.orthogonal(),
'b': tf.initializers.random_normal()
}
for nc1 in self._layers:
xs1 = tf.split(x1, self._shape, axis=1)
# mean field features
mf1 = [
tf.tile(
tf.reduce_mean(x, axis=1, keepdims=True), [1, x1.shape[1], 1])
for x in xs1
]
y1 = tf.concat([x1] + mf1, axis=2)
self._w1.append(
snt.Conv1D(
output_channels=nc1, kernel_shape=1, initializers=initializers))
out1 = self._w1[-1](y1)
if layer_collection:
layer_collection.register_conv1d((self._w1[-1].w, self._w1[-1].b),
[1, 1, 1],
'SAME',
y1,
out1,
reuse=False)
# Implements residual connection
if self._use_residual and x1.shape[-1].value == nc1:
res = x1
else:
res = 0.0
x1 = tf.nn.tanh(out1) + res
return x1
class LogMatmul(snt.AbstractModule):
"""Takes log-domain vector and performs numerically stable matmul."""
def __init__(self, outputs, use_bias=False, name='log_matmul'):
super(LogMatmul, self).__init__(name=name)
self._outputs = outputs
self._use_bias = use_bias
def _build(self, log_input, sign_input, layer_collection=None):
rank = log_input.shape.ndims
if rank == 2:
self._layer = snt.Linear(self._outputs, use_bias=self._use_bias)
elif rank == 3:
self._layer = snt.Conv1D(
output_channels=self._outputs,
kernel_shape=1,
use_bias=self._use_bias)
else:
raise RuntimeError('LogMatmul only supports rank 2 and 3 tensors.')
log_max = tf.reduce_max(log_input, axis=-1, keepdims=True)
inputs = tf.exp(log_input - log_max) * sign_input
output = self._layer(inputs)
sign_output = tf.sign(output)
log_output = tf.log(tf.abs(output)) + log_max
if layer_collection:
if self._use_bias:
params = (self._layer.w, self._layer.b)
else:
params = self._layer.w
if rank == 2:
layer_collection.register_fully_connected(
params, inputs, output, reuse=False)
elif rank == 3:
layer_collection.register_conv1d(
params, [1, 1, 1], 'SAME', inputs, output, reuse=False)
return log_output, sign_output
class Envelope(snt.AbstractModule):
"""Compute exponentially-decaying envelope to enforce boundary conditions."""
def __init__(self, atoms, shape, determinants, soft=False, name='envelope'):
r"""Compute exponentially-decaying envelope to enforce boundary conditions.
Given a set of electron positions r_i and atom positions z_j, this module
learns a set of Mahalonobis distance metrics A_jk and mixing proportions
\pi_jk and computes a set of exponentially-decaying envelopes of the form:
f_k(r_i) =
\sum_j \pi_jk exp(-sqrt((r_i - z_j)^T * A_jk^T * A_jk * (r_i - z_j)))
where the index `k` is over all orbitals, both spin up and spin down.
Each of these envelopes is multiplied by one of the orbitals in a Fermi Net
to enforce the boundary condition that the wavefunction goes to zero at
infinity.
Args:
atoms: list of atom positions
shape: tuple with number of spin up and spin down electrons
determinants: number of determinants, used to compute number of envelopes
soft: If true, use sqrt(1 + sum(x**2)) instead of norm(x)
name: Sonnet boilerplate
Returns:
list of envelope ops, one spin up and one for spin down orbitals
"""
super(Envelope, self).__init__(name=name)
self._atoms = atoms
self._shape = shape
self._na = len(atoms)
self._nd = determinants
self._soft = soft # remove cusp from envelope
def _build(self, inputs, layer_collection=None):
"""Create ops to compute envelope for every electron/orbital pair.
Args:
inputs: tensor of shape (batch size, nelectrons, 3) with position of every
electron
layer_collection (optional): if using KFAC, the layer collection to
register ops with.
Returns:
List of the value of the envelope for spin up and spin down electrons.
Each element of the list is a tensorflow op with size
(batch, shape[i], norbitals) where shape[i] is the number of electrons of
spin "i", and norbitals is shape[i] * the number of determinants.
"""
# shape of each (atom/orbital) envelope
self._envelope_sigma = []
# mixing proportion for all atoms for a given orbital
self._envelope_pi = []
self._envelopes = []
norms = [[] for _ in self._shape]
pos = tf.stack([
tf.constant(atom.coords, dtype=inputs.dtype.base_dtype)
for atom in self._atoms
])
pos = tf.expand_dims(tf.expand_dims(pos, 0), 0)
diff_ae = tf.expand_dims(inputs, 2) - pos
# Split difference from electron to atom into list, one per atom, then
# further split each element into a list, one for spin up electrons, one
# for spin down.
diff_aes = [
tf.split(x, self._shape, axis=1)
for x in tf.split(diff_ae, self._na, axis=2)
]
# Compute the weighted distances from electron to atom
# We swap the normal convention of `i` for the outer for loop and `j` for
# the inner for loop to conform to the notation in the docstring.
for j in range(self._na): # iterate over atoms
self._envelope_sigma.append([])
for i in range(len(self._shape)): # iterate over spin up/down electrons
norbital = self._shape[i] * self._nd
eyes = tf.transpose(
tf.reshape(
tf.eye(
3, batch_shape=[norbital], dtype=inputs.dtype.base_dtype),
[1, 1, norbital * 3, 3]), (0, 1, 3, 2))
self._envelope_sigma[j].append(
tf.get_variable(
'envelope_sigma_%d_%d' % (j, i),
initializer=eyes,
use_resource=True,
trainable=True,
dtype=inputs.dtype.base_dtype,
))
# multiply the difference from the electron to the atom by an
# anisotropic weight - one 3x3 matrix per orbital function
# From the notation in the docstring, `rout` is
# equivalent to (A_j1; ...; A_jk) * (r_i - z_j)
# Note, snt.Conv2D won't take bare TF ops as initializers, so we just
# use tf.nn.conv2d here instaed.
rout = tf.nn.conv2d(diff_aes[j][i], self._envelope_sigma[j][i],
[1, 1, 1, 1], 'SAME')
if layer_collection:
layer_collection.register_conv2d(
self._envelope_sigma[j][i], [1, 1, 1, 1],
'SAME',
diff_aes[j][i],
rout,
reuse=False)
# Reshape `rout` so that rout[:, i, k, :] represents
# A_jk * (r_i - z_j) for all r_i in the batch
rout = tf.reshape(rout, [rout.shape[0], rout.shape[1], norbital, 3])
if self._soft:
norms[i].append(
tf.sqrt(tf.reduce_sum(1 + rout**2, axis=3, keepdims=True)))
else:
norms[i].append(tf.norm(rout, axis=3, keepdims=True))
# Compute the mixing proportions for different atoms
for i in range(len(self._shape)):
norbital = self._shape[i] * self._nd
self._envelope_pi.append(
tf.get_variable(
'envelope_pi_%d' % i,
initializer=tf.ones([norbital, self._na],
dtype=inputs.dtype.base_dtype),
use_resource=True,
trainable=True,
dtype=inputs.dtype.base_dtype,
))
if layer_collection:
layer_collection.register_generic(
self._envelope_pi[i], inputs.shape[0].value, reuse=False)
# Take the exponential of the Mahalonobis distance and multiply each
# term by the weight for that atom.
norm = tf.concat(norms[i], axis=3)
mixed_norms = tf.exp(-norm) * self._envelope_pi[i]
# Add the weighted terms for each atom together to form the envelope.
# Each term in this list has shape (batch size, shape[i], norbitals)
# where shape[i] is the number of electrons of spin `i`. This computes
# the value of the envelope for every combination of electrons and
# orbitals.
self._envelopes.append(tf.reduce_sum(mixed_norms, axis=3))
return self._envelopes
class BackFlow(snt.AbstractModule):
"""Introduce correlation in Slater network via coordinate transformations.
Electron-electron, electron-nuclear and electron-electron-nuclear
contributions are computed using MLPs for the nu, mu, theta and phi
functions.
"""
def _build(self, xs, xsa, r_en, r_ee, layer_collection=None):
# Notation is explained in the CASINO manual, section 23.1
# (https://casinoqmc.net/casino_manual_dir/casino_manual.pdf)
backflow_ee = TensorMlp([64, 64, 64, 1],
rank=2,
activate_final=False,
residual=True,
gain=1.e-3,
stddev=1.e-3)
backflow_en = TensorMlp([64, 64, 64, 1],
rank=2,
activate_final=False,
residual=True,
gain=1.e-3,
stddev=1.e-3)
backflow_een = TensorMlp([64, 64, 64, 2],
rank=2,
activate_final=False,
residual=True,
gain=1.e-3,
stddev=1.e-3)
# electron-electron backflow term
eta = backflow_ee(r_ee, layer_collection=layer_collection)
# electron-nuclear backflow term
r_en = tf.expand_dims(r_en, -1)
mu = backflow_en(r_en, layer_collection=layer_collection)
# The most complicated term: the electron-electron-nuclear backflow term
shape = [r_ee.shape[0], r_ee.shape[1], r_ee.shape[2], r_en.shape[2], 1]
r_een = tf.concat((tf.broadcast_to(tf.expand_dims(
r_ee, -2), shape), tf.broadcast_to(tf.expand_dims(r_en, -3), shape),
tf.broadcast_to(tf.expand_dims(r_en, -4), shape)),
axis=-1)
# Reshape so we can squeeze this into a rank-2 network.
r_een_flat = tf.reshape(r_een, [
r_een.shape[0], r_een.shape[1], r_een.shape[2] * r_een.shape[3],
r_een.shape[4]
])
phi_theta = backflow_een(r_een_flat, layer_collection=layer_collection)
phi = tf.reshape(phi_theta[..., 0], r_een.shape[:-1] + [1])
theta = tf.reshape(phi_theta[..., 1], r_een.shape[:-1] + [1])
diffs = tf.expand_dims(xs, 1) - tf.expand_dims(xs, 2)
backflow = (
tf.reduce_sum(eta * diffs, axis=2) + tf.reduce_sum(mu * xsa, axis=2) +
tf.reduce_sum(phi * tf.expand_dims(diffs, axis=3), axis=[2, 3]) +
tf.reduce_sum(theta * tf.expand_dims(xsa, axis=2), axis=[2, 3]))
return backflow
class FermiNet(snt.AbstractModule):
"""Neural network with a determinant layer to antisymmetrize output."""
def __init__(self,
*,
atoms,
nelectrons,
slater_dets,
hidden_units,
after_det,
dim=3,
architecture='ferminet',
envelope=False,
residual=False,
r12_ee_features=True,
r12_en_features=True,
pos_ee_features=True,
build_backflow=False,
use_backflow=False,
jastrow_en=False,
jastrow_ee=False,
jastrow_een=False,
logdet=False,
pretrain_iterations=2,
name='det_net'):
super(FermiNet, self).__init__(name=name)
self._atoms = atoms
self._dim = dim
# tuple of number of (spin up, spin down) e's or a tuple of (e's) for
# spin-polarised systems.
self._shape = [nelec for nelec in nelectrons if nelec > 0]
self._ne = sum(nelectrons) # total number of electrons
self._na = len(atoms) # number of atoms
self._nd = slater_dets # number of determinants
self._architecture = architecture
self._hidden_units = hidden_units
self._after_det = after_det
self._add_envelope = envelope
self._r12_ee = r12_ee_features
self._r12_en = r12_en_features
self._pos_ee = pos_ee_features
self._use_residual = residual
self._build_backflow = build_backflow
self._use_backflow = use_backflow
self._use_jastrow_en = jastrow_en
self._use_jastrow_ee = jastrow_ee
self._use_jastrow_een = jastrow_een
self._output_log = logdet # Whether to represent wavef'n or log-wavef'n
self.pretrain_iterations = pretrain_iterations
def _features(self, xs, r_ee, r_en, xsa):
x1 = []
x2 = []
if self._r12_en:
x1.append(r_en)
if self._r12_ee:
x2.append(r_ee)
xsa = tf.reshape(xsa, [xsa.shape[0], xsa.shape[1], -1])
x1.append(xsa)
if self._pos_ee:
diffs = tf.expand_dims(xs, 1) - tf.expand_dims(xs, 2)
x2.append(diffs)
x1 = tf.concat(x1, axis=2) if x1 else None
x2 = tf.concat(x2, axis=3) if x2 else None
return x1, x2
def _build(self, inputs, layer_collection=None, back_prop=True):
self.inputs = inputs
xs = tf.reshape(inputs, [inputs.shape[0], self._ne, -1])
r_en, r_ee, xsa = hamiltonian.r12_features(
inputs,
self._atoms,
self._ne,
keep_pos=True,
flatten=False,
atomic_coords=True)
if self._use_backflow or self._build_backflow:
if self._architecture != 'slater':
raise ValueError('Backflow should only be used with Hartree-Fock/'
'Slater-Jastrow network')
backflow_net = BackFlow()
backflow = backflow_net(xs, xsa, r_en, r_ee, layer_collection)
else:
backflow = None
if backflow is not None:
# Warning: Backflow transformed coordinates should *only* be fed into the
# orbitals and not into the Jastrow factor. Denote functions of the
# backflow as q* accordingly.
q_inputs = inputs + tf.reshape(backflow, inputs.shape)
qs = tf.reshape(q_inputs, [q_inputs.shape[0], self._ne, -1])
q_en, q_ee, qsa = hamiltonian.r12_features(
q_inputs,
self._atoms,
self._ne,
keep_pos=True,
flatten=False,
atomic_coords=True)
x1, x2 = self._features(qs, q_ee, q_en, qsa)
else:
x1, x2 = self._features(xs, r_ee, r_en, xsa)
# Fermi Net or Slater-Jastrow net?
if self._architecture == 'ferminet':
if isinstance(self._hidden_units[0], typing.Iterable):
self._parallel_net = ParallelNet(
self._shape,
self._hidden_units,
residual=self._use_residual,
use_last_layer=False)
self._before_det, self._before_jastrow = self._parallel_net(
x1, x2, r_ee, layer_collection=layer_collection)
else:
self._parallel_net = EquivariantNet(
self._shape, self._hidden_units, residual=self._use_residual)
self._before_det = self._parallel_net(
x1, layer_collection=layer_collection)
self._before_jastrow = None
elif self._architecture == 'slater':
self._before_det_mlp = TensorMlp(
self._hidden_units, rank=1, residual=True)
self._before_det = self._before_det_mlp(
x1, layer_collection=layer_collection)
else:
raise ValueError('Not a recognized architecture: {}'.format(
self._architecture))
# Split the features into spin up and down electrons, and shape into
# orbitals and determinants.
before_det = tf.split(self._before_det, self._shape, axis=1)
self._orbital_layers = [
snt.Conv1D(x.shape[1] * self._nd, 1, name='orbital_%d' % i)
for i, x in enumerate(before_det)
]
orbitals = []
for i, x in enumerate(before_det):
layer = self._orbital_layers[i]
y = layer(x)
orbitals.append(y)
if layer_collection:
layer_collection.register_conv1d((layer.w, layer.b), [1, 1, 1],
'SAME',
x,
y,
reuse=False)
# Create an exponentially-decaying envelope around every atom
if self._add_envelope:
envelope = Envelope(self._atoms, self._shape, self._nd)
self._envelopes = envelope(xs, layer_collection=layer_collection)
for i in range(len(orbitals)):
orbitals[i] = orbitals[i] * self._envelopes[i]
self._orbitals = [
tf.transpose(
tf.reshape(x, [x.shape[0], x.shape[1], -1, x.shape[1]]),
[0, 2, 1, 3]) for x in orbitals
]
slog1d = lambda x: (tf.sign(x[..., 0, 0]), tf.log(tf.abs(x[..., 0, 0])))
slog = lambda x: log_determinant(x) if x.shape[-1].value > 1 else slog1d(x)
if self._output_log:
if self._nd == 1:
self._slogdets = [
slog(x) for x in self._orbitals
]
self._logdet = tf.add_n([x[1] for x in self._slogdets])
self._signs = functools.reduce(
tf.Tensor.__mul__, # pytype: disable=unsupported-operands
[x[0] for x in self._slogdets])
output = (self._logdet, self._signs)
else:
self._after_det_weights = tf.get_variable(
'after_det_weights',
shape=[self._nd, self._after_det[0]],
initializer=tf.initializers.ones(dtype=inputs.dtype.base_dtype),
use_resource=True,
dtype=inputs.dtype.base_dtype,
)
if back_prop:
if len(self._orbitals) == 1:
output = list(slog(x) for x in self._orbitals)
signdet, logdet = output[0]
log_max = tf.reduce_max(logdet, axis=-1, keepdims=True)
inputs = tf.exp(logdet - log_max) * signdet
output = tf.matmul(inputs, self._after_det_weights)
output = tf.log(tf.abs(output)) + log_max, tf.sign(output)
else:
output = logdet_matmul(self._orbitals[0], self._orbitals[1],
self._after_det_weights)
else:
# Compute logdet with tf.linalg.slogdet, as gradients are not needed
# We keep this outside logdet_matmul, despite the code duplication,
# because tf.custom_gradient decorator does not like keyword args
if len(self._orbitals) == 1:
signdet, logdet = tf.linalg.slogdet(self._orbitals[0])
else:
slogdet0 = tf.linalg.slogdet(self._orbitals[0])
slogdet1 = tf.linalg.slogdet(self._orbitals[1])
signdet = slogdet0[0] * slogdet1[0]
logdet = slogdet0[1] + slogdet1[1]
log_max = tf.reduce_max(logdet, axis=-1, keepdims=True)
inputs = tf.exp(logdet - log_max) * signdet
output = tf.matmul(inputs, self._after_det_weights)
output = tf.log(tf.abs(output)) + log_max, tf.sign(output)
self._logdet = output[0]
if layer_collection:
layer_collection.register_generic(
self._after_det_weights,
self._orbitals[0].shape[0].value,
reuse=False)
if len(self._after_det) > 1:
log_out, sign_out = output
# Constructing x and -x to feed through network to make odd function
output = (tf.stack((log_out, log_out),
axis=1), tf.stack((sign_out, -1 * sign_out),
axis=1))
for nout in self._after_det[1:]:
layer = LogMatmul(nout, use_bias=True)
# apply ReLU in log domain
output = (tf.where(
tf.greater(output[1], 0.0), output[0],
-100 * tf.ones_like(output[0])), output[1])
output = layer(*output, layer_collection=layer_collection)
# Make output an odd function
log_out, sign_out = output
logs_out = [
tf.squeeze(out, axis=1) for out in tf.split(log_out, 2, axis=1)
]
signs_out = [
tf.squeeze(out, axis=1) for out in tf.split(sign_out, 2, axis=1)
]
log_max = tf.maximum(
tf.reduce_max(logs_out[0], axis=1, keepdims=True),
tf.reduce_max(logs_out[1], axis=1, keepdims=True))
real_out = (
tf.exp(logs_out[0] - log_max) * signs_out[0] -
tf.exp(logs_out[1] - log_max) * signs_out[1])
output = (tf.log(tf.abs(real_out)) + log_max, tf.sign(real_out))
else:
self._dets = [
determinant(x) if x.shape[-1].value > 1 else x[..., 0, 0]
for x in self._orbitals
]
self._det = functools.reduce(tf.Tensor.__mul__, self._dets) # pytype: disable=unsupported-operands
after_det_input = self._det
self._after_det_mlp = TensorMlp(self._after_det, use_bias=True)
combined_input = tf.concat([after_det_input, -after_det_input], axis=0)
combined_output = self._after_det_mlp(
combined_input, layer_collection=layer_collection)
output_0, output_1 = tf.split(combined_output, 2, axis=0)
output = output_0 - output_1
total_jastrow = 0.0
if self._use_jastrow_ee:
self._jastrow_ee_net = TensorMlp(
[64, 64, 64, len(self._shape)**2],
rank=2,
activate_final=False,
residual=True)
# To prevent NaNs, rescale factor by number of electrons squared.
self._jastrow_ee = self._jastrow_ee_net(
r_ee, layer_collection=layer_collection) / (
self._ne**2)
# split output into channels for different spin interactions
jastrow_ee = flatten([
tf.split(x, self._shape, axis=2)
for x in tf.split(self._jastrow_ee, self._shape, axis=1)
])
# For each spin interaction (alpha/alpha, alpha/beta, beta/alpha,
# beta/beta), get just the Jastrow term for that interaction
jastrow_ee = [x[..., i:i + 1] for i, x in enumerate(jastrow_ee)]
# Add up all Jastrow terms for all pairs of electrons over all spins
jastrow_ee = tf.add_n([tf.reduce_sum(x, axis=[1, 2]) for x in jastrow_ee])
total_jastrow += jastrow_ee
if self._use_jastrow_en:
self._jastrow_en_net = TensorMlp([64, 64, 64, self._na],
rank=2,
activate_final=False,
residual=True)
self._jastrow_en = (
self._jastrow_en_net(
r_en[..., None], layer_collection=layer_collection) / self._ne /
self._na)
# We have one output per atom/electron distance. Rather than having one
# network for every atom, we have one network with many outputs, and take
# the diagonal of the outputs.
jastrow_en = tf.reduce_sum(tf.matrix_diag_part(self._jastrow_en), axis=2)
jastrow_en = tf.reduce_sum(jastrow_en, axis=1, keepdims=True)
total_jastrow += jastrow_en
if self._use_jastrow_een:
self._jastrow_een_nets = []
self._jastrow_een = []
for i in range(self._na):
# Create a separate network for every atom. Not efficient, but works
# well enough for the experiments we need this for. This is only for
# comparison against benchmarks in the literature and is not meant to
# be a default component of the Fermionic Neural Network.
self._jastrow_een_nets.append(
TensorMlp([64, 64, 64, len(self._shape)**2],
rank=2,
activate_final=False,
residual=True))
batch = tf.concat((r_ee,
tf.tile(
tf.expand_dims(r_en[..., i:i + 1], axis=1),
[1, self._ne, 1, 1]),
tf.tile(
tf.expand_dims(r_en[..., i:i + 1], axis=2),
[1, 1, self._ne, 1])),
axis=-1)
self._jastrow_een.append(self._jastrow_een_nets[i](
batch, layer_collection=layer_collection) / (self._ne**2))
# split output into channels for different spin interactions
jastrow_een = flatten([
tf.split(x, self._shape, axis=2)
for x in tf.split(self._jastrow_een[i], self._shape, axis=1)
])
# For each spin interaction (alpha/alpha, alpha/beta, beta/alpha,
# beta/beta), get just the Jastrow term for that interaction
jastrow_een = [x[..., i:i + 1] for i, x in enumerate(jastrow_een)]
# Add up all Jastrow terms for all pairs of electrons over all spins
jastrow_een = tf.add_n(
[tf.reduce_sum(x, axis=[1, 2]) for x in jastrow_een])
total_jastrow += jastrow_een
if self._output_log:
output = (output[0] - total_jastrow, output[1])
else:
output = output * tf.exp(-total_jastrow)
self.output = output
return self.output
def pretrain_hartree_fock(network, data, strategy, hf_approx):
"""Pretrain network so orbitals are nearly uncorrelated.
Args:
network: network to be pretrained. Note: only the network._orbitals tensor
is pretrained.
data: (nreplicas, batch, spatial_dim * nelectrons) Tensor. Input to network.
strategy: DistributionStrategy instance.
hf_approx: Instance of Scf class containing Hartree-Fock approximation.
Returns:
hf_loss: Distance between orbitals and Hartree-Fock orbitals. Minimised each
iteration by tf.AdamOptimizer.
"""
with strategy.scope():
optim = tf.train.AdamOptimizer(name='Adam_pretrain')
def pretrain_step_fn(walkers):
"""Step function for pretraining."""
replica_id = tf.distribute.get_replica_context().replica_id_in_sync_group
data = walkers[replica_id]
network(data)
xs = tf.reshape(data, [int(data.shape[0]), sum(hf_approx.nelectrons), -1])
hf_mats = hf_approx.tf_eval_hf(xs, deriv=True)
# pylint: disable=protected-access
diffs = [
tf.expand_dims(hf, 1) - nn
for (hf, nn) in zip(hf_mats, network._orbitals)
]
# pylint: enable=protected-access
mses = [tf.reduce_mean(tf.square(diff)) for diff in diffs]
if len(mses) > 1:
mse_loss = 0.5 * tf.add(*mses)
else:
mse_loss = 0.5 * mses[0]
pretrain_op = optim.minimize(mse_loss)
with tf.control_dependencies([pretrain_op]):
mse_loss = tf.identity(mse_loss)
return mse_loss
mse_loss = strategy.experimental_run(
functools.partial(pretrain_step_fn, walkers=data))
return strategy.reduce(tf.distribute.ReduceOp.MEAN, mse_loss)
| ferminet-master | ferminet/networks.py |
# Lint as: python3
# Copyright 2020 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Simple Markov Chain Monte Carlo data generator."""
import functools
import tensorflow.compat.v1 as tf
class MCMC:
"""Simple Markov Chain Monte Carlo implementation for sampling from |f|^2."""
def __init__(self,
network,
batch_size,
init_mu,
init_sigma,
move_sigma,
dtype=tf.float32):
"""Constructs MCMC object.
Args:
network: network to sample from according to the square of the output.
batch_size: batch size - number of configurations (walkers) to generate.
init_mu: mean of a truncated normal distribution to draw from to generate
initial configurations for each coordinate (i.e. of length 3 x number of
electrons).
distribution.
init_sigma: standard deviation of a truncated normal distribution to draw
from to generate initial configurations.
move_sigma: standard deviation of random normal distribution from which to
draw random moves.
dtype: the data type of the configurations.
"""
strategy = tf.distribute.get_strategy()
nreplicas = strategy.num_replicas_in_sync
self._dtype = dtype.base_dtype
self.network = network
self._init_mu = init_mu
self._init_sigma = init_sigma
self._move_sigma = move_sigma
self.walkers = tf.get_variable(
'walkers',
initializer=self._rand_init((nreplicas, batch_size)),
trainable=False,
use_resource=True,
dtype=self._dtype)
self.psi = self.update_psi()
self.move_acc = tf.constant(0.0)
def _rand_init(self, batch_dims):
"""Generate a random set of samples from a uniform distribution."""
return tf.concat(
[
tf.random.truncated_normal( # pylint: disable=g-complex-comprehension
shape=(*batch_dims, 1),
mean=mu,
stddev=self._init_sigma,
dtype=self._dtype) for mu in self._init_mu
],
axis=-1)
def reinitialize_walkers(self):
walker_reset = self.walkers.assign(
self._rand_init(self.walkers.shape.as_list()[:-1]))
with tf.control_dependencies([walker_reset]):
psi_reset = self.update_psi()
return walker_reset, psi_reset
def update_psi(self):
strategy = tf.distribute.get_strategy()
psi_per_gpu = strategy.experimental_run(
lambda: self.network(self.walkers_per_gpu)[0])
self.psi = tf.stack(
strategy.experimental_local_results(psi_per_gpu), axis=0)
return self.psi
@property
def walkers_per_gpu(self):
replica_id = tf.distribute.get_replica_context().replica_id_in_sync_group
return self.walkers[replica_id]
@property
def psi_per_gpu(self):
replica_id = tf.distribute.get_replica_context().replica_id_in_sync_group
return self.psi[replica_id]
def step(self):
"""Returns ops for one Metropolis-Hastings step across all replicas."""
strategy = tf.distribute.get_strategy()
walkers, psi, move_acc = strategy.experimental_run(
functools.partial(self._mh_step, step_size=self._move_sigma))
update_walkers = self.walkers.assign(
tf.stack(strategy.experimental_local_results(walkers), axis=0))
self.psi = tf.stack(strategy.experimental_local_results(psi), axis=0)
self.move_acc = tf.reduce_mean(
strategy.experimental_local_results(move_acc))
return update_walkers, self.psi, self.move_acc
def _mh_step(self, step_size):
"""Returns ops for one Metropolis-Hastings step in a replica context."""
walkers, psi = self.walkers_per_gpu, self.psi_per_gpu
new_walkers = walkers + tf.random.normal(
shape=walkers.shape, stddev=step_size, dtype=self._dtype)
new_psi = self.network(new_walkers)[0]
pmove = tf.squeeze(2 * (new_psi - psi))
pacc = tf.log(
tf.random_uniform(shape=walkers.shape.as_list()[:1], dtype=self._dtype))
decision = tf.less(pacc, pmove)
with tf.control_dependencies([decision]):
new_walkers = tf.where(decision, new_walkers, walkers)
new_psi = tf.where(decision, new_psi, psi)
move_acc = tf.reduce_mean(tf.cast(decision, tf.float32))
return new_walkers, new_psi, move_acc
def stats_ops(self, log_walkers=False):
"""Returns op for evaluating the move acceptance probability.
Args:
log_walkers: also include the complete walker configurations (slow, lots
of data).
"""
stats = {'pmove': self.move_acc}
if log_walkers:
stats['walkers'] = self.walkers
return stats
| ferminet-master | ferminet/mcmc.py |
# Lint as: python3
# Copyright 2018 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Top-level QMC training."""
import functools
import os
import shutil
from typing import Iterable, Optional, Text
from absl import flags
from absl import logging
from ferminet import networks
from ferminet.utils import writers
import kfac
import numpy as np
import tensorflow.compat.v1 as tf
import tensorflow_probability as tfp
from tensorflow.core.protobuf import rewriter_config_pb2
FLAGS = flags.FLAGS
class QMC:
"""Quantum Monte Carlo on a Fermi Net."""
def __init__(
self,
hamiltonian,
network,
data_gen,
hf_data_gen,
clip_el: Optional[float] = None,
check_loss: bool = False,
):
"""Constructs a QMC object for training a Fermi Net.
Args:
hamiltonian: Hamiltonian operators. See hamiltonian.py
network: network to represent the wavefunction, psi.
data_gen: mcmc.MCMC object that samples configurations from
psi^2, where psi is the output of the network.
hf_data_gen: mcmc.MCMC object that samples configurations from
phi^2, where phi is a Hartree-Fock state of the Hamiltonian.
clip_el: If not None, sets the scale at which to clip the local energy.
Note the local energy is only clipped in the grad_loss passed back to
the gradient, not in the local_energy op itself. The width of the window
in which the local energy is not clipped is a multiple of the total
variation from the median, which is the equivalent of standard deviation
for the l1 norm instead of l2 norm. This makes the clipping robuts to
outliers, which is the entire point.
check_loss: If True, check the loss for NaNs and raise an error if found.
"""
self.hamiltonian = hamiltonian
self.data_gen = data_gen
self.hf_data_gen = hf_data_gen
self._clip_el = clip_el
self._check_loss = check_loss
self.network = network
def _qmc_step_fn(self, optimizer_fn, using_kfac, global_step):
"""Training step for network given the MCMC state.
Args:
optimizer_fn: A function which takes as argument a LayerCollection object
(None) if using_kfac is True (False) and returns the optimizer.
using_kfac: True if optimizer_fn creates a instance of kfac.KfacOptimizer
and False otherwise.
global_step: tensorflow op for global step index.
Returns:
loss: per-GPU loss tensor with control dependencies for updating network.
local_energy: local energy for each walker
features: network output for each walker.
Raises:
RuntimeError: If using_kfac is True and optimizer_fn does not create a
kfac.KfacOptimizer instance or the converse.
"""
# Note layer_collection cannot be modified after the KFac optimizer has been
# constructed.
if using_kfac:
layer_collection = kfac.LayerCollection()
else:
layer_collection = None
walkers = self.data_gen.walkers_per_gpu
features, features_sign = self.network(walkers, layer_collection)
optimizer = optimizer_fn(layer_collection)
if bool(using_kfac) != isinstance(optimizer, kfac.KfacOptimizer):
raise RuntimeError('Not using KFac but using_kfac is True.')
if layer_collection:
layer_collection.register_squared_error_loss(features, reuse=False)
with tf.name_scope('local_energy'):
kinetic_fn, potential_fn = self.hamiltonian
kinetic = kinetic_fn(features, walkers)
potential = potential_fn(walkers)
local_energy = kinetic + potential
loss = tf.reduce_mean(local_energy)
replica_context = tf.distribute.get_replica_context()
mean_op = tf.distribute.ReduceOp.MEAN
mean_loss = replica_context.merge_call(
lambda strategy, val: strategy.reduce(mean_op, val), args=(loss,))
grad_loss = local_energy - mean_loss
if self._clip_el is not None:
# clip_el should be much larger than 1, to avoid bias
median = tfp.stats.percentile(grad_loss, 50.0)
diff = tf.reduce_mean(tf.abs(grad_loss - median))
grad_loss_clipped = tf.clip_by_value(grad_loss,
median - self._clip_el * diff,
median + self._clip_el * diff)
else:
grad_loss_clipped = grad_loss
with tf.name_scope('step'):
# Create functions which take no arguments and return the ops for applying
# an optimisation step.
if not optimizer:
optimize_step = tf.no_op
else:
optimize_step = functools.partial(
optimizer.minimize,
features,
global_step=global_step,
var_list=self.network.trainable_variables,
grad_loss=grad_loss_clipped)
if self._check_loss:
# Apply optimisation step only if all local energies are well-defined.
step = tf.cond(
tf.reduce_any(tf.math.is_nan(mean_loss)), tf.no_op, optimize_step)
else:
# Faster, but less safe: always apply optimisation step. If the
# gradients are not well-defined (e.g. loss contains a NaN), then the
# network will also be set to NaN.
step = optimize_step()
# A strategy step function must return tensors, not ops so apply a
# control dependency to a dummy op to ensure they're executed.
with tf.control_dependencies([step]):
loss = tf.identity(loss)
return {
'loss': loss,
'local_energies': local_energy,
'features': features,
'features_sign': features_sign
}
def train(self,
optim_fn,
iterations: int,
logging_config,
scf_approx,
strategy,
using_kfac: bool,
prefix: Text = 'fermi_net',
global_step=None,
profile_iterations: Optional[Iterable[int]] = None,
determinism_mode: bool = False,
cached_data_op=None,
write_graph: Optional[Text] = None,
burn_in: int = 0,
mcmc_steps: int = 10):
"""Training loop for VMC with hooks for logging and plotting.
Args:
optim_fn: function with zero arguments (thunk) that returns a class
implementing optimizer (specifically apply_gradients method).
iterations: number of iterations to perform for training.
logging_config: LoggingConfig object defining what and how to log data.
scf_approx: For hf pretraining, object of class Scf, otherwise None.
strategy: DistributionStrategy instance to use for parallelisation. Must
match the strategy used to build the model.
using_kfac: True if optim_fn creates a kfac.KfacOptimizer object, False
otherwise.
prefix: prefix to use for dataframes in bigtable. Appended with _sparse
and _detailed for configuration and training dataframes respectively.
global_step: tensorflow op for global step number. If None set to 0 and
incremented each iteration. Can be used to provide a learning rate
schedule.
profile_iterations: iterable of iteration numbers at which the training
session is to be profiled. Note: data generation step (e.g. MCMC) is not
included in the profile and must be added separately if required.
determinism_mode: activates deterministic computation when in CPU mode.
cached_data_op: Op which updates a variable to cache the current training
batch. Only necessary if using KFAC with adaptive damping.
write_graph: if given, write graph to given file. Must be a full path.
burn_in: Number of burn in steps after pretraining. If zero do not burn in
or reinitialize walkers.
mcmc_steps: Number of MCMC steps to perform between network updates.
Raises:
RuntimeError: if a NaN value for the loss or another accumulated statistic
is detected.
"""
if global_step is None:
with strategy.scope():
global_step = tf.train.get_or_create_global_step()
if profile_iterations is None:
profile_iterations = []
if hasattr(self.network, 'pretrain_iterations'):
stats_len = max(iterations, self.network.pretrain_iterations)
else:
stats_len = iterations
with strategy.scope():
mcmc_step = self.data_gen.step()
mcmc_hf_step = self.hf_data_gen.step()
mcmc_reset_walkers = self.data_gen.reinitialize_walkers()
mcmc_update_psi = self.data_gen.update_psi()
# Pretraining
concat_data = tf.concat([self.hf_data_gen.walkers, self.data_gen.walkers],
axis=1)
pretrain_ops = networks.pretrain_hartree_fock(self.network, concat_data,
strategy, scf_approx)
qmc_step_fn = functools.partial(self._qmc_step_fn, optim_fn, using_kfac,
global_step)
with strategy.scope():
qmc_out_per_replica = strategy.experimental_run(qmc_step_fn)
qmc_out_per_replica = {
key: tf.stack(strategy.experimental_local_results(value))
for key, value in qmc_out_per_replica.items()
}
loss = tf.reduce_mean(qmc_out_per_replica['loss'])
stats_ops = self.data_gen.stats_ops(log_walkers=logging_config.walkers)
stats_ops['loss_per_replica'] = qmc_out_per_replica['loss']
stats_loss_ = np.zeros(shape=qmc_out_per_replica['loss'].shape.as_list())
if logging_config.wavefunction:
stats_ops['log_wavefunction'] = qmc_out_per_replica['features']
stats_ops['wavefunction_sign'] = qmc_out_per_replica['feature_sign']
if logging_config.local_energy:
stats_ops['local_energy'] = qmc_out_per_replica['local_energy']
train_schema = ['energy', 'pmove']
if logging_config.replicas > 1:
train_schema += [
'energy_replica:{}'.format(i) for i in range(logging_config.replicas)
]
# As these data are much larger, we create a separated H5 file to avoid
# polluting the CSV.
h5_data = ('walkers', 'log_wavefunction', 'wavefunction_sign',
'local_energy')
h5_schema = {
key: stats_ops[key].shape.as_list()
for key in h5_data
if key in stats_ops
}
if write_graph:
tf.train.write_graph(tf.get_default_graph(), os.path.dirname(write_graph),
os.path.basename(write_graph))
if determinism_mode:
config_proto = tf.ConfigProto(
inter_op_parallelism_threads=1,
device_count={'GPU': 0},
allow_soft_placement=True)
else:
config_proto = tf.ConfigProto(allow_soft_placement=True)
# See https://github.com/tensorflow/tensorflow/issues/23780.
config_proto.graph_options.rewrite_options.memory_optimization = (
rewriter_config_pb2.RewriterConfig.OFF)
saver = tf.train.Saver(max_to_keep=10, save_relative_paths=True)
scaffold = tf.train.Scaffold(saver=saver)
checkpoint_path = os.path.join(logging_config.result_path, 'checkpoints')
if logging_config.restore_path:
logging.info('Copying %s to %s and attempting to restore',
logging_config.restore_path, checkpoint_path)
shutil.copytree(logging_config.restore_path, checkpoint_path)
with tf.train.MonitoredTrainingSession(
scaffold=scaffold,
checkpoint_dir=checkpoint_path,
save_checkpoint_secs=logging_config.save_frequency * 60,
save_checkpoint_steps=None,
save_summaries_steps=None,
log_step_count_steps=None,
config=config_proto,
) as sess:
logging.info('Initialized variables')
if self.network.pretrain_iterations > 0:
logging.info('Pretraining network parameters')
with writers.Writer(
name='pretrain_stats',
schema=['loss'],
directory=logging_config.result_path) as pretrain_writer:
for t in range(self.network.pretrain_iterations):
for _ in range(mcmc_steps):
sess.run(mcmc_step)
for _ in range(mcmc_steps):
sess.run(mcmc_hf_step)
pretrain_loss = sess.run(pretrain_ops)
pretrain_writer.write(t, loss=pretrain_loss)
pass
logging.info('Pretrained network parameters.')
if burn_in > 0:
logging.info('Resetting MCMC state')
sess.run(mcmc_reset_walkers)
for _ in range(burn_in):
for _ in range(mcmc_steps):
sess.run(mcmc_step)
logging.info('MCMC burn in complete')
with writers.Writer(name='train_stats', schema=train_schema, directory=logging_config.result_path) as training_writer, \
writers.H5Writer(name='data.h5', schema=h5_schema, directory=logging_config.result_path) as h5_writer:
for t in range(iterations):
if cached_data_op is not None:
sess.run(cached_data_op)
sess.run(mcmc_update_psi)
for _ in range(mcmc_steps):
sess.run(mcmc_step)
loss_, stats_ops_ = sess.run([loss, stats_ops])
stats_values = [loss_] + list(stats_ops_.values())
# Update the contents (but don't reassign) stats_loss_, so the
# conditional checkpoint check uses the current per-replica loss via
# an earlier local-scope capture on stats_loss_.
stats_loss_[:] = stats_ops_['loss_per_replica']
if any(np.isnan(stat).any() for stat in stats_values):
logging.info('NaN value detected. Loss: %s. Stats: %s.', loss_,
stats_ops_)
if self._check_loss:
# Re-run the mcmc_reset (setting psi) as the checkpoint hook is
# evaluated at the end of a session run call.
sess.run(mcmc_update_psi)
raise RuntimeError(
'NaN value detected. Loss: {}. Stats: {}.'.format(
loss_, stats_ops_))
if t % logging_config.stats_frequency == 0:
writer_kwargs = {
'energy': loss_,
'pmove': stats_ops_['pmove'],
}
if logging_config.replicas > 1:
for i in range(logging_config.replicas):
lpr = stats_ops_['loss_per_replica'][i]
writer_kwargs['energy_replica:%d' % i] = lpr
training_writer.write(t, **writer_kwargs)
h5_writer.write(t, stats_ops_)
| ferminet-master | ferminet/qmc.py |
# Lint as: python3
# Copyright 2018 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Learn ground state wavefunctions for molecular systems using VMC."""
import copy
import os
from typing import Any, Mapping, Optional, Sequence, Tuple
from absl import logging
import attr
from ferminet import hamiltonian
from ferminet import mcmc
from ferminet import mean_corrected_kfac_opt
from ferminet import networks
from ferminet import qmc
from ferminet import scf
from ferminet.utils import elements
from ferminet.utils import system
import numpy as np
import tensorflow.compat.v1 as tf
def _validate_directory(obj, attribute, value):
"""Validates value is a directory."""
del obj
if value and not os.path.isdir(value):
raise ValueError(f'{attribute.name} is not a directory')
@attr.s(auto_attribs=True)
class LoggingConfig:
"""Logging information for Fermi Nets.
Attributes:
result_path: directory to use for saving model parameters and calculations
results. Created if does not exist.
save_frequency: frequency (in minutes) at which parameters are saved.
restore_path: directory to use for restoring model parameters.
stats_frequency: frequency (in iterations) at which statistics (via stats
hooks) are updated and stored.
replicas: the number of replicas used during training. Will be set
automatically.
walkers: If true, log walkers at every step.
wavefunction: If true, log wavefunction at every step.
local_energy: If true, log local energy at every step.
config: dictionary of additional information about the calculation setup.
Reported along with calculation statistics.
"""
result_path: str = '.'
save_frequency: float = 10
restore_path: str = attr.ib(default=None, validator=_validate_directory)
stats_frequency: int = 1
replicas: int = 1
walkers: bool = False
wavefunction: bool = False
local_energy: bool = False
config: Mapping[str, Any] = attr.ib(converter=dict,
default=attr.Factory(dict))
@attr.s(auto_attribs=True)
class MCMCConfig:
"""Markov Chain Monte Carlo configuration for Fermi Nets.
Attributes:
burn_in: Number of burn in steps after pretraining.
steps: 'Number of MCMC steps to make between network updates.
init_width: Width of (atom-centred) Gaussians used to generate initial
electron configurations.
move_width: Width of Gaussian used for random moves.
init_means: Iterable of 3*nelectrons giving the mean initial position of
each electron. Configurations are drawn using Gaussians of width
init_width at each 3D position. Alpha electrons are listed before beta
electrons. If empty, electrons are assigned to atoms based upon the
isolated atom spin configuration. Expert use only.
"""
burn_in: int = 100
steps: int = 10
init_width: float = 0.8
move_width: float = 0.02
init_means: Optional[Sequence[float]] = None
@attr.s(auto_attribs=True)
class PretrainConfig:
"""Hartree-Fock pretraining algorithm configuration for Fermi Nets.
Attributes:
iterations: Number of iterations for which to pretrain the network to match
Hartree-Fock orbitals.
basis: Basis set used to run Hartree-Fock calculation in PySCF.
"""
iterations: int = 1000
basis: str = 'sto-3g'
@attr.s(auto_attribs=True)
class OptimConfig:
"""Optimization configuration for Fermi Nets.
Attributes:
iterations: Number of iterations')
clip_el: If not none, scale at which to clip local energy.
learning_rate: learning rate.
learning_rate_decay: exponent of learning rate decay.
learning_rate_delay: scale of the rate decay.
use_kfac: Use the K-FAC optimizer if true, ADAM optimizer otherwise.
check_loss: Apply gradient update only if the loss is not NaN. If true,
training could be slightly slower but the checkpoint written out when a
NaN is detected will be with the network weights which led to the NaN.
deterministic: CPU only mode that also enforces determinism. Will run
*significantly* slower if used.
"""
iterations: int = 1000000
learning_rate: float = 1.e-4
learning_rate_decay: float = 1.0
learning_rate_delay: float = 10000.0
clip_el: float = 5.0
use_kfac: bool = True
check_loss: bool = False
deterministic: bool = False
@attr.s(auto_attribs=True)
class KfacConfig:
"""K-FAC configuration - see docs at https://github.com/tensorflow/kfac/."""
invert_every: int = 1
cov_update_every: int = 1
damping: float = 0.001
cov_ema_decay: float = 0.95
momentum: float = 0.0
momentum_type: str = attr.ib(
default='regular',
validator=attr.validators.in_(
['regular', 'adam', 'qmodel', 'qmodel_fixedmu']))
adapt_damping: bool = False
damping_adaptation_decay: float = 0.9
damping_adaptation_interval: int = 5
min_damping: float = 1.e-5
norm_constraint: float = 0.001
@attr.s(auto_attribs=True)
class NetworkConfig:
"""Network configuration for Fermi Net.
Attributes:
architecture: The choice of architecture to run the calculation with. Either
"ferminet" or "slater" for the Fermi Net and standard Slater determinant
respectively.
hidden_units: Number of hidden units in each layer of the network. If
the Fermi Net with one- and two-electron streams is used, a tuple is
provided for each layer, with the first element giving the number of
hidden units in the one-electron stream and the second element giving the
number of units in the two-electron stream. Otherwise, each layer is
represented by a single integer.
determinants: Number of determinants to use.
r12_en_features: Include r12/distance features between electrons and nuclei.
Highly recommended.
r12_ee_features: Include r12/distance features between pairs of electrons.
Highly recommended.
pos_ee_features: Include electron-electron position features. Highly
recommended.
use_envelope: Include multiplicative exponentially-decaying envelopes on
each orbital. Calculations will not converge if set to False.
backflow: Include backflow transformation in input coordinates. '
Only for use if network_architecture == "slater". Implies build_backflow
is also True.
build_backflow: Create backflow weights but do not include backflow
coordinate transformation in the netwrok. Use to train a Slater-Jastrow
architecture and then train a Slater-Jastrow-Backflow architecture
based on it in a two-stage optimization process.
residual: Use residual connections in network. Recommended.
after_det: Number of hidden units in each layer of the neural network after
the determinants. By default, just takes a weighted sum of
determinants with no nonlinearity.
jastrow_en: Include electron-nuclear Jastrow factor. Only relevant with
Slater-Jastrow-Backflow architectures.
jastrow_ee: Include electron-electron Jastrow factor. Only relevant with
Slater-Jastrow-Backflow architectures.
jastrow_een: Include electron-electron-nuclear Jastrow factor. Only
relevant with Slater-Jastrow-Backflow architectures.
"""
architecture: str = attr.ib(
default='ferminet', validator=attr.validators.in_(['ferminet', 'slater']))
hidden_units: Sequence[Tuple[int, int]] = ((256, 32),) * 4
determinants: int = 16
r12_en_features: bool = True
r12_ee_features: bool = True
pos_ee_features: bool = True
use_envelope: bool = True
backflow: bool = False
build_backflow: bool = False
residual: bool = True
after_det: Sequence[int] = (1,)
jastrow_en: bool = False
jastrow_ee: bool = False
jastrow_een: bool = False
def assign_electrons(molecule, electrons):
"""Assigns electrons to atoms using non-interacting spin configurations.
Args:
molecule: List of Hamiltonian.Atom objects for each atom in the system.
electrons: Pair of ints giving number of alpha (spin-up) and beta
(spin-down) electrons.
Returns:
1D np.ndarray of length 3N containing initial mean positions of each
electron based upon the atom positions, where N is the total number of
electrons. The first 3*electrons[0] positions correspond to the alpha
(spin-up) electrons and the next 3*electrons[1] to the beta (spin-down)
electrons.
Raises:
RuntimeError: if a different number of electrons or different spin
polarisation is generated.
"""
# Assign electrons based upon unperturbed atoms and ignore impact of
# fractional nuclear charge.
nuclei = [int(round(atom.charge)) for atom in molecule]
total_charge = sum(nuclei) - sum(electrons)
# Construct a dummy iso-electronic neutral system.
neutral_molecule = [copy.copy(atom) for atom in molecule]
if total_charge != 0:
logging.warning(
'Charged system. Using heuristics to set initial electron positions')
charge = 1 if total_charge > 0 else -1
while total_charge != 0:
# Poor proxy for electronegativity.
atom_index = nuclei.index(max(nuclei) if total_charge < 0 else min(nuclei))
atom = neutral_molecule[atom_index]
atom.charge -= charge
atom.atomic_number = int(round(atom.charge))
if int(round(atom.charge)) == 0:
neutral_molecule.pop(atom_index)
else:
atom.symbol = elements.ATOMIC_NUMS[atom.atomic_number].symbol
total_charge -= charge
nuclei = [int(round(atom.charge)) for atom in neutral_molecule]
spin_pol = lambda electrons: electrons[0] - electrons[1]
abs_spin_pol = abs(spin_pol(electrons))
if len(neutral_molecule) == 1:
elecs_atom = [electrons]
else:
elecs_atom = []
spin_pol_assigned = 0
for ion in neutral_molecule:
# Greedily assign up and down electrons based upon the ground state spin
# configuration of an isolated atom.
atom_spin_pol = elements.ATOMIC_NUMS[ion.atomic_number].spin_config
nelec = ion.atomic_number
na = (nelec + atom_spin_pol) // 2
nb = nelec - na
# Attempt to keep spin polarisation as close to 0 as possible.
if (spin_pol_assigned > 0 and
spin_pol_assigned + atom_spin_pol > abs_spin_pol):
elec_atom = [nb, na]
else:
elec_atom = [na, nb]
spin_pol_assigned += spin_pol(elec_atom)
elecs_atom.append(elec_atom)
electrons_assigned = [sum(e) for e in zip(*elecs_atom)]
spin_pol_assigned = spin_pol(electrons_assigned)
if np.sign(spin_pol_assigned) == -np.sign(abs_spin_pol):
# Started with the wrong guess for spin-up vs spin-down.
elecs_atom = [e[::-1] for e in elecs_atom]
spin_pol_assigned = -spin_pol_assigned
if spin_pol_assigned != abs_spin_pol:
logging.info('Spin polarisation does not match isolated atoms. '
'Using heuristics to set initial electron positions.')
while spin_pol_assigned != abs_spin_pol:
atom_spin_pols = [abs(spin_pol(e)) for e in elecs_atom]
atom_index = atom_spin_pols.index(max(atom_spin_pols))
elec_atom = elecs_atom[atom_index]
if spin_pol_assigned < abs_spin_pol and elec_atom[0] <= elec_atom[1]:
elec_atom[0] += 1
elec_atom[1] -= 1
spin_pol_assigned += 2
elif spin_pol_assigned < abs_spin_pol and elec_atom[0] > elec_atom[1]:
elec_atom[0] -= 1
elec_atom[1] += 1
spin_pol_assigned += 2
elif spin_pol_assigned > abs_spin_pol and elec_atom[0] > elec_atom[1]:
elec_atom[0] -= 1
elec_atom[1] += 1
spin_pol_assigned -= 2
else:
elec_atom[0] += 1
elec_atom[1] -= 1
spin_pol_assigned -= 2
electrons_assigned = [sum(e) for e in zip(*elecs_atom)]
if spin_pol(electrons_assigned) == -spin_pol(electrons):
elecs_atom = [e[::-1] for e in elecs_atom]
electrons_assigned = electrons_assigned[::-1]
logging.info(
'Electrons assigned %s.', ', '.join([
'{}: {}'.format(atom.symbol, elec_atom)
for atom, elec_atom in zip(molecule, elecs_atom)
]))
if any(e != e_assign for e, e_assign in zip(electrons, electrons_assigned)):
raise RuntimeError(
'Assigned incorrect number of electrons ([%s instead of %s]' %
(electrons_assigned, electrons))
if any(min(ne) < 0 for ne in zip(*elecs_atom)):
raise RuntimeError('Assigned negative number of electrons!')
electron_positions = np.concatenate([
np.tile(atom.coords, e[0])
for atom, e in zip(neutral_molecule, elecs_atom)
] + [
np.tile(atom.coords, e[1])
for atom, e in zip(neutral_molecule, elecs_atom)
])
return electron_positions
def train(molecule: Sequence[system.Atom],
spins: Tuple[int, int],
batch_size: int,
network_config: Optional[NetworkConfig] = None,
pretrain_config: Optional[PretrainConfig] = None,
optim_config: Optional[OptimConfig] = None,
kfac_config: Optional[KfacConfig] = None,
mcmc_config: Optional[MCMCConfig] = None,
logging_config: Optional[LoggingConfig] = None,
multi_gpu: bool = False,
double_precision: bool = False,
graph_path: Optional[str] = None):
"""Configures and runs training loop.
Args:
molecule: molecule description.
spins: pair of ints specifying number of spin-up and spin-down electrons
respectively.
batch_size: batch size. Also referred to as the number of Markov Chain Monte
Carlo configurations/walkers.
network_config: network configuration. Default settings in NetworkConfig are
used if not specified.
pretrain_config: pretraining configuration. Default settings in
PretrainConfig are used if not specified.
optim_config: optimization configuration. Default settings in OptimConfig
are used if not specified.
kfac_config: K-FAC configuration. Default settings in KfacConfig are used if
not specified.
mcmc_config: Markov Chain Monte Carlo configuration. Default settings in
MCMCConfig are used if not specified.
logging_config: logging and checkpoint configuration. Default settings in
LoggingConfig are used if not specified.
multi_gpu: Use all available GPUs. Default: use only a single GPU.
double_precision: use tf.float64 instead of tf.float32 for all operations.
Warning - double precision is not currently functional with K-FAC.
graph_path: directory to save a representation of the TF graph to. Not saved
Raises:
RuntimeError: if mcmc_config.init_means is supplied but is of the incorrect
length.
"""
if not mcmc_config:
mcmc_config = MCMCConfig()
if not logging_config:
logging_config = LoggingConfig()
if not pretrain_config:
pretrain_config = PretrainConfig()
if not optim_config:
optim_config = OptimConfig()
if not kfac_config:
kfac_config = KfacConfig()
if not network_config:
network_config = NetworkConfig()
nelectrons = sum(spins)
precision = tf.float64 if double_precision else tf.float32
if multi_gpu:
strategy = tf.distribute.MirroredStrategy()
else:
# Get the default (single-device) strategy.
strategy = tf.distribute.get_strategy()
if multi_gpu:
batch_size = batch_size // strategy.num_replicas_in_sync
logging.info('Setting per-GPU batch size to %s.', batch_size)
logging_config.replicas = strategy.num_replicas_in_sync
logging.info('Running on %s replicas.', strategy.num_replicas_in_sync)
# Create a re-entrant variable scope for network.
with tf.variable_scope('model') as model:
pass
with strategy.scope():
with tf.variable_scope(model, auxiliary_name_scope=False) as model1:
with tf.name_scope(model1.original_name_scope):
fermi_net = networks.FermiNet(
atoms=molecule,
nelectrons=spins,
slater_dets=network_config.determinants,
hidden_units=network_config.hidden_units,
after_det=network_config.after_det,
architecture=network_config.architecture,
r12_ee_features=network_config.r12_ee_features,
r12_en_features=network_config.r12_en_features,
pos_ee_features=network_config.pos_ee_features,
build_backflow=network_config.build_backflow,
use_backflow=network_config.backflow,
jastrow_en=network_config.jastrow_en,
jastrow_ee=network_config.jastrow_ee,
jastrow_een=network_config.jastrow_een,
logdet=True,
envelope=network_config.use_envelope,
residual=network_config.residual,
pretrain_iterations=pretrain_config.iterations)
scf_approx = scf.Scf(
molecule,
nelectrons=spins,
restricted=False,
basis=pretrain_config.basis)
if pretrain_config.iterations > 0:
scf_approx.run()
hamiltonian_ops = hamiltonian.operators(molecule, nelectrons)
if mcmc_config.init_means:
if len(mcmc_config.init_means) != 3 * nelectrons:
raise RuntimeError('Initial electron positions of incorrect shape. '
'({} not {})'.format(
len(mcmc_config.init_means), 3 * nelectrons))
init_means = [float(x) for x in mcmc_config.init_means]
else:
init_means = assign_electrons(molecule, spins)
# Build the MCMC state inside the same variable scope as the network.
with tf.variable_scope(model, auxiliary_name_scope=False) as model1:
with tf.name_scope(model1.original_name_scope):
data_gen = mcmc.MCMC(
fermi_net,
batch_size,
init_mu=init_means,
init_sigma=mcmc_config.init_width,
move_sigma=mcmc_config.move_width,
dtype=precision)
with tf.variable_scope('HF_data_gen'):
hf_data_gen = mcmc.MCMC(
scf_approx.tf_eval_slog_hartree_product,
batch_size,
init_mu=init_means,
init_sigma=mcmc_config.init_width,
move_sigma=mcmc_config.move_width,
dtype=precision)
with tf.name_scope('learning_rate_schedule'):
global_step = tf.train.get_or_create_global_step()
lr = optim_config.learning_rate * tf.pow(
(1.0 / (1.0 + (tf.cast(global_step, tf.float32) /
optim_config.learning_rate_delay))),
optim_config.learning_rate_decay)
if optim_config.learning_rate < 1.e-10:
logging.warning('Learning rate less than 10^-10. Not using an optimiser.')
optim_fn = lambda _: None
update_cached_data = None
elif optim_config.use_kfac:
cached_data = tf.get_variable(
'MCMC_cache',
initializer=tf.zeros(shape=data_gen.walkers.shape, dtype=precision),
use_resource=True,
trainable=False,
dtype=precision,
)
if kfac_config.adapt_damping:
update_cached_data = tf.assign(cached_data, data_gen.walkers)
else:
update_cached_data = None
optim_fn = lambda layer_collection: mean_corrected_kfac_opt.MeanCorrectedKfacOpt( # pylint: disable=g-long-lambda
invert_every=kfac_config.invert_every,
cov_update_every=kfac_config.cov_update_every,
learning_rate=lr,
norm_constraint=kfac_config.norm_constraint,
damping=kfac_config.damping,
cov_ema_decay=kfac_config.cov_ema_decay,
momentum=kfac_config.momentum,
momentum_type=kfac_config.momentum_type,
loss_fn=lambda x: tf.nn.l2_loss(fermi_net(x)[0]),
train_batch=data_gen.walkers,
prev_train_batch=cached_data,
layer_collection=layer_collection,
batch_size=batch_size,
adapt_damping=kfac_config.adapt_damping,
is_chief=True,
damping_adaptation_decay=kfac_config.damping_adaptation_decay,
damping_adaptation_interval=kfac_config.damping_adaptation_interval,
min_damping=kfac_config.min_damping,
use_passed_loss=False,
estimation_mode='exact',
)
else:
adam = tf.train.AdamOptimizer(lr)
optim_fn = lambda _: adam
update_cached_data = None
qmc_net = qmc.QMC(
hamiltonian_ops,
fermi_net,
data_gen,
hf_data_gen,
clip_el=optim_config.clip_el,
check_loss=optim_config.check_loss,
)
qmc_net.train(
optim_fn,
optim_config.iterations,
logging_config,
using_kfac=optim_config.use_kfac,
strategy=strategy,
scf_approx=scf_approx,
global_step=global_step,
determinism_mode=optim_config.deterministic,
cached_data_op=update_cached_data,
write_graph=os.path.abspath(graph_path) if graph_path else None,
burn_in=mcmc_config.burn_in,
mcmc_steps=mcmc_config.steps,
)
| ferminet-master | ferminet/train.py |
# Lint as: python3
# Copyright 2019 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Interaction with Hartree-Fock solver in pyscf."""
# Abbreviations used:
# SCF: self-consistent field (method). Another name for Hartree-Fock
# HF: Hartree-Fock method.
# RHF: restricted Hartre-Fock. Require molecular orbital for the i-th alpha-spin
# and i-th beta-spin electrons to have the same spatial component.
# ROHF: restricted open-shell Hartree-Fock. Same as RHF except allows the number
# of alpha and beta electrons to differ.
# UHF: unrestricted Hartre-Fock. Permits breaking of spin symmetry and hence
# alpha and beta electrons to have different spatial components.
# AO: Atomic orbital. Underlying basis set (typically Gaussian-type orbitals and
# built into pyscf).
# MO: molecular orbitals/Hartree-Fock orbitals. Single-particle orbitals which
# are solutions to the Hartree-Fock equations.
from typing import Callable, Tuple
from ferminet.utils import scf as base_scf
import tensorflow.compat.v1 as tf
class Scf(base_scf.Scf):
"""Helper class for running Hartree-Fock (self-consistent field) with pyscf.
This extends ferminet.utils.scf.Scf with tensorflow ops.
Attributes:
molecule: list of system.Atom objects giving the atoms in the molecule
and their positions.
nelectrons: Tuple with number of alpha electrons and beta electrons.
basis: Basis set to use, best specified with the relevant string for a
built-in basis set in pyscf. A user-defined basis set can be used
(advanced). See https://sunqm.github.io/pyscf/gto.html#input-basis for
more details.
restricted: If true, use the restriced Hartree-Fock method, otherwise use
the unrestricted Hartree-Fock method.
"""
def tf_eval_mos(self, positions: tf.Tensor, deriv: bool = False,
) -> Tuple[tf.Tensor, Callable[[tf.Tensor], tf.Tensor]]:
"""Evaluates the Hartree-Fock single-particle orbitals as a tensorflow op.
See: `eval_mos` for evaluating the orbitals given positions as numpy arrays.
Args:
positions: Tensor of shape (N, 3) containing N 3D position vectors at
which the Hartree-Fock orbitals are evaluated.
deriv: If True, also evaluates the first derivatives of the orbitals with
respect to positions and makes them available via tf.gradients.
Returns:
Tensor of shape (N, 2M) and the same dtype as positions. The first
(second) M elements along the last axis gives the value of the alpha-spin
(beta-spin) Hartree-Fock orbitals at the desired positions.
Raises:
RuntimeError: If the first derivatives are requested but deriv is False.
"""
# Evaluate MOs in a nested function to avoid issues with custom_gradients
# and self or with the deriv flag argument.
@tf.custom_gradient
def _eval_mos(positions: tf.Tensor
) -> Tuple[tf.Tensor, Callable[[tf.Tensor], tf.Tensor]]:
"""Computes Hartree-Fock orbitals at given positions."""
mo_values_derivs = tf.py_func(self.eval_mos, [positions, deriv],
(tf.float64, tf.float64))
mo_values_derivs = [
tf.cast(vals, positions.dtype) for vals in mo_values_derivs
]
if deriv:
mo_values = tf.concat([vals[0] for vals in mo_values_derivs], axis=-1)
mo_derivs = tf.concat([vals[1:] for vals in mo_values_derivs], axis=-1)
else:
mo_values = tf.concat(mo_values_derivs, axis=-1)
mo_derivs = None
def grad(dmo: tf.Tensor) -> tf.Tensor:
"""Computes gradients of orbitals with respect to positions."""
# dmo, reverse sensitivity, is a (N, 2M) tensor.
# mo_derivs is the full Jacobian, (3, N, 2M), where mo_derivs[i,j,k] is
# d\phi_{k} / dr^j_i (derivative of the k-th orbital with respect to the
# i-th component of the j-th position vector).
# positions is a (N, 3) tensor => tf.gradients(mo, positions) is (N, 3).
# tensorflow gradients use summation over free indices (i.e. orbitals).
if mo_derivs is None:
raise RuntimeError(
'Gradients not computed in forward pass. Set derivs=True.')
g = tf.reduce_sum(mo_derivs * tf.expand_dims(dmo, 0), axis=-1)
return tf.linalg.transpose(g)
return mo_values, grad
return _eval_mos(positions)
def tf_eval_hf(self, positions: tf.Tensor,
deriv: bool = False) -> Tuple[tf.Tensor, tf.Tensor]:
"""Evaluates Hartree-Fock occupied orbitals at a set of electron positions.
Note: pyscf evaluates all orbitals at each position, which is somewhat
wasteful for this use-case, where we only need occupied orbitals and further
factorise the wavefunction into alpha and beta parts.
Args:
positions: (N, nelectrons, 3) tensor. Batch of positions for each
electron. The first self.nelectrons[0] elements in the second axis refer
to alpha electrons, the next self.electrons[1] positions refer to the
beta electrons.
deriv: If True, also evaluates the first derivatives of the orbitals with
respect to positions and makes them available via tf.gradients.
Returns:
Tuple of tensors for alpha and beta electrons respectively of shape
(N, nspin, nspin), where nspin is the corresponding number of electrons of
that spin. The (i, j, k) element gives the value of the k-th orbital for
the j-th electron in batch i. The product of the determinants of the
tensors hence gives the Hartree-Fock determinant at the set of input
positions.
Raises:
RuntimeError: If the sum of alpha and beta electrons does not equal
nelectrons.
"""
if sum(self.nelectrons) != positions.shape.as_list()[1]:
raise RuntimeError(
'positions does not contain correct number of electrons.')
# Evaluate MOs at all electron positions.
# pyscf evaluates all 2M MOs at each 3D position vector. Hance reshape into
# (N*nelectron, 3) tensor for pyscf input.
one_e_positions = tf.reshape(positions, (-1, positions.shape[-1]))
mos = self.tf_eval_mos(one_e_positions, deriv)
nbasis = tf.shape(mos)[-1] // 2
# Undo reshaping of electron positions to give (N, nelectrons, 2M), where
# the (i, j, k) element gives the value of the k-th MO at the j-th electron
# position in the i-th batch.
mos = tf.reshape(mos, (-1, sum(self.nelectrons), 2*nbasis))
# Return (using Aufbau principle) the matrices for the occupied alpha and
# beta orbitals. Number of alpha electrons given by electrons[0].
alpha_spin = mos[:, :self.nelectrons[0], :self.nelectrons[0]]
beta_spin = mos[:, self.nelectrons[0]:, nbasis:nbasis+self.nelectrons[1]]
return alpha_spin, beta_spin
def tf_eval_slog_slater_determinant(self, flat_positions: tf.Tensor
) -> Tuple[tf.Tensor, tf.Tensor]:
"""Evaluates the signed log HF slater determinant at given flat_positions.
Interface is chosen to be similar to that used for QMC networks.
Args:
flat_positions: (N, total_electrons * 3) tensor. Batch of flattened
positions for each electron, reshapeable to (N, total_electrons, 3)
where the first self.nelectrons[0] elements in the second axis refer to
alpha electrons, the next self.nelectrons[1] positions refer to the beta
electrons.
Returns:
log_abs_slater_determinant: (N, 1) tensor
# log of absolute value of slater determinant
sign: (N, 1) tensor. sign of wavefunction.
Raises:
RuntimeError: on incorrect shape of flat_positions.
"""
xs = tf.reshape(flat_positions,
[flat_positions.shape[0],
sum(self.nelectrons), -1])
if xs.shape[2] != 3:
msg = 'flat_positions must be of shape (N, total_electrons*3)'
raise RuntimeError(msg)
matrices = self.tf_eval_hf(xs)
slogdets = [tf.linalg.slogdet(elem) for elem in matrices]
sign_alpha, sign_beta = [elem[0] for elem in slogdets]
log_abs_wf_alpha, log_abs_wf_beta = [elem[1] for elem in slogdets]
log_abs_slater_determinant = tf.expand_dims(tf.math.add(log_abs_wf_alpha,
log_abs_wf_beta), 1)
sign = tf.expand_dims(tf.math.multiply(sign_alpha, sign_beta), 1)
return log_abs_slater_determinant, sign
def tf_eval_slog_hartree_product(self, flat_positions: tf.Tensor,
deriv: bool = False
) -> Tuple[tf.Tensor, tf.Tensor]:
"""Evaluates the signed log Hartree product without anti-symmetrization.
Interface is chosen to be similar to that used for QMC networks.
Args:
flat_positions: (N, total_electrons * 3) tensor. Batch of flattened
positions for each electron, reshapeable to (N, total_electrons, 3)
where the first self.nelectrons[0] elements in the second axis refer to
alpha electrons, the next self.nelectrons[1] positions refer to the beta
electrons.
deriv: specifies if we will need derivatives.
Returns:
log_abs_hartree_product: (N, 1) tensor. log of abs of Hartree product.
sign: (N, 1) tensor. sign of Hartree product.
"""
xs = tf.reshape(flat_positions,
[int(flat_positions.shape[0]),
sum(self.nelectrons), -1])
matrices = self.tf_eval_hf(xs, deriv)
log_abs_alpha, log_abs_beta = [
tf.linalg.trace(tf.log(tf.abs(elem))) for elem in matrices
]
sign_alpha, sign_beta = [
tf.reduce_prod(tf.sign(tf.linalg.diag_part(elem)), axis=1)
for elem in matrices
]
log_abs_hartree_product = tf.expand_dims(
tf.math.add(log_abs_alpha, log_abs_beta), axis=1)
sign = tf.expand_dims(tf.math.multiply(sign_alpha, sign_beta), axis=1)
return log_abs_hartree_product, sign
def tf_eval_hartree_product(self, flat_positions: tf.Tensor,
deriv: bool = False) -> tf.Tensor:
"""Evaluates the signed log Hartree product without anti-symmetrization.
Interface is chosen to be similar to that used for QMC networks. This is
a convenience wrapper around tf_eval_slog_hartree_product and is hence not
optimised.
Args:
flat_positions: (N, total_electrons * 3) tensor. Batch of flattened
positions for each electron, reshapeable to (N, total_electrons, 3)
where the first self.nelectrons[0] elements in the second axis refer to
alpha electrons, the next self.nelectrons[1] positions refer to the beta
electrons.
deriv: specifies if we will need derivatives.
Returns:
(N,1) tensor containing the Hartree product.
"""
log_abs, sign = self.tf_eval_slog_hartree_product(flat_positions, deriv)
return tf.exp(log_abs) * sign
| ferminet-master | ferminet/scf.py |
# Lint as: python3
# Copyright 2018 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utilities for constructing Hamiltonians and setting up VMC calculations."""
import numpy as np
import tensorflow.compat.v1 as tf
def kinetic_from_log(f, x):
r"""Compute -1/2 \nabla^2 \psi / \psi from log|psi|."""
with tf.name_scope('kinetic_from_log'):
df = tf.gradients(f, x)[0]
lapl_elem = tf.map_fn(
lambda i: tf.gradients(tf.expand_dims(df[..., i], -1), x)[0][..., i],
tf.range(x.shape[1]),
dtype=x.dtype.base_dtype,
back_prop=False,
parallel_iterations=20)
lapl = (tf.reduce_sum(lapl_elem, axis=0) +
tf.reduce_sum(df**2, axis=-1))
return -0.5*tf.expand_dims(lapl, -1)
def operators(atoms,
nelectrons,
potential_epsilon=0.0):
"""Creates kinetic and potential operators of Hamiltonian in atomic units.
Args:
atoms: list of Atom objects for each atom in the system.
nelectrons: number of electrons
potential_epsilon: Epsilon used to smooth the divergence of the 1/r
potential near the origin for algorithms with numerical stability issues.
Returns:
The functions that generates the kinetic and potential energy as a TF op.
"""
vnn = 0.0
for i, atom_i in enumerate(atoms):
for atom_j in atoms[i+1:]:
qij = float(atom_i.charge * atom_j.charge)
vnn += qij / np.linalg.norm(atom_i.coords_array - atom_j.coords_array)
def smooth_norm(x):
with tf.name_scope('smooth_norm'):
if potential_epsilon == 0.0:
return tf.norm(x, axis=1, keepdims=True)
else:
return tf.sqrt(tf.reduce_sum(x**2 + potential_epsilon**2,
axis=1,
keepdims=True))
def nuclear_potential(xs):
"""Calculates nuclear potential for set of electron positions."""
with tf.name_scope('Vne'):
v = []
for atom in atoms:
charge = tf.constant(atom.charge, dtype=xs[0].dtype)
coords = tf.constant(atom.coords, dtype=xs[0].dtype)
v.extend([-charge / smooth_norm(coords - x) for x in xs])
return tf.add_n(v)
def electronic_potential(xs):
"""Calculates electronic potential for set of electron positions."""
with tf.name_scope('Vee'):
if len(xs) > 1:
v = []
for (i, ri) in enumerate(xs):
v.extend([
1.0 / smooth_norm(ri - rj) for rj in xs[i + 1:]
])
return tf.add_n(v)
else:
return 0.0
def nuclear_nuclear(dtype):
"""Calculates the nuclear-nuclear interaction contribution."""
with tf.name_scope('Vnn'):
return tf.constant(vnn, dtype=dtype)
def potential(x):
"""Calculates the total potential energy at each electron position."""
xs = tf.split(x, nelectrons, axis=1)
return (nuclear_potential(xs) + electronic_potential(xs)
+ nuclear_nuclear(xs[0].dtype))
return kinetic_from_log, potential
def exact_hamiltonian(atoms,
nelectrons,
potential_epsilon=0.0):
"""Construct function that evaluates exact Hamiltonian of a system.
Args:
atoms: list of Atom objects for each atom in the system.
nelectrons: number of electrons
potential_epsilon: Epsilon used to smooth the divergence of the 1/r
potential near the origin for algorithms with numerical stability issues.
Returns:
A functions that generates the wavefunction and hamiltonian op.
"""
k_fn, v_fn = operators(atoms, nelectrons, potential_epsilon)
def _hamiltonian(f, x):
# kinetic_from_log actually computes -1/2 \nabla^2 f / f, so multiply by f
logpsi, signpsi = f(x)
psi = tf.exp(logpsi) * signpsi
hpsi = psi * (k_fn(logpsi, x) + v_fn(x))
return psi, hpsi
return _hamiltonian
def r12_features(x, atoms, nelectrons, keep_pos=True, flatten=False,
atomic_coords=False):
"""Adds physically-motivated features depending upon electron distances.
The tensor of electron positions is extended to include the distance of each
electron to each nucleus and distance between each pair of electrons.
Args:
x: electron positions. Tensor either of shape (batch_size, nelectrons*ndim),
or (batch_size, nelectrons, ndim), where ndim is the dimensionality of the
system (usually 3).
atoms: list of Atom objects for each atom in the system.
nelectrons: number of electrons.
keep_pos: If true, includes the original electron positions in the output
flatten: If true, return the distances as a flat vector for each element of
the batch. If false, return the atom-electron distances and electron-
electron distances each as 3D arrays.
atomic_coords: If true, replace the original position of the electrons with
the position of the electrons relative to all atoms.
Returns:
If flatten is true, keep_pos is true and atomic_coords is false:
tensor of shape (batch_size, ndim*Ne + Ne*Na + Ne(Ne-1)/2), where Ne (Na)
is the number of electrons (atoms). The first ndim*Ne terms are the
original x, the next Ne terms are |x_i - R_1|, where R_1 is the position
of the first nucleus (and so on for each atom), and the remaining terms
are |x_i - x_j| for each (i,j) pair, where i and j run over all electrons,
with i varied slowest.
If flatten is true and keep_pos is false: it does not include the first
ndim*Ne features.
If flatten is false and keep_pos is false: tensors of shape
(batch_size, Ne, Na) and (batch_size, Ne, Ne)
If flatten is false and keep_pos is true: same as above, and also a tensor
of size (batch_size, Ne, ndim)
If atomic_coords is true: the same as if keep_pos is true, except the
ndim*Ne coordinates corresponding to the original positions are replaced
by ndim*Ne*Na coordinates corresponding to the different from each
electron position to each atomic position.
"""
with tf.name_scope('r12_features'):
if len(x.shape) == 2:
xs = tf.reshape(x, [x.shape[0], nelectrons, -1])
else:
xs = x
coords = tf.stack([
tf.constant(atom.coords, dtype=x.dtype.base_dtype) for atom in atoms
])
coords = tf.expand_dims(tf.expand_dims(coords, 0), 0)
xsa = tf.expand_dims(xs, 2) - coords # xs in atomic coordinates
r_ae = tf.norm(xsa, axis=-1)
r_ee = np.zeros((nelectrons, nelectrons), dtype=object)
for i in range(nelectrons):
for j in range(i+1, nelectrons):
r_ee[i, j] = tf.norm(xs[:, i, :] - xs[:, j, :], axis=1, keepdims=True)
if flatten:
r_ae = tf.reshape(r_ae, [r_ae.shape[0], -1])
if nelectrons > 1:
r_ee = tf.concat(
r_ee[np.triu_indices(nelectrons, k=1)].tolist(), axis=1)
else:
r_ee = tf.zeros([r_ae.shape[0], 0])
if keep_pos:
if atomic_coords:
xsa = tf.reshape(xsa, [xsa.shape[0], -1])
return tf.concat([r_ae, r_ee, xsa], axis=1)
else:
return tf.concat([r_ae, r_ee, x], axis=1)
else:
return tf.concat([r_ae, r_ee], axis=1)
else:
zeros_like = tf.zeros((xs.shape[0], 1), dtype=x.dtype.base_dtype)
for i in range(nelectrons):
r_ee[i, i] = zeros_like
for j in range(i):
r_ee[i, j] = r_ee[j, i]
r_ee = tf.transpose(tf.stack(r_ee.tolist()), [2, 0, 1, 3])
if keep_pos:
if atomic_coords:
return r_ae, r_ee, xsa
else:
return r_ae, r_ee, xs
else:
return r_ae, r_ee
| ferminet-master | ferminet/hamiltonian.py |
# Lint as: python3
# Copyright 2018 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for ferminet.hamiltonian."""
from ferminet import hamiltonian
from ferminet.utils import system
import numpy as np
import tensorflow.compat.v1 as tf
def _hydrogen(xs):
"""Hydrogen (3D) atom ground state (1s) wavefunction.
Energy: -1/2 hartrees.
Args:
xs: tensor (batch_size, 3) of electron positions.
Returns:
tensor (batch_size, 1) of the (unnormalised) wavefunction at each position.
"""
with tf.name_scope('Psi_H'):
return tf.exp(-tf.norm(xs, axis=1, keepdims=True))
def _helium(xs):
"""Compact parametrized Helium wavefunction.
See https://opencommons.uconn.edu/chem_educ/30/ and Phys Rev A 74, 014501
(2006).
Energy: -2.901188 hartrees (compared to -2.9037243770 hartrees for the exact
ground state).
Args:
xs: tensor (batch_size, 6) of electron positions.
Returns:
tensor (batch_size, 1) of the (unnormalised) wavefunction at each pair of
positions.
"""
with tf.name_scope('Psi_He'):
x1, x2 = tf.split(xs, 2, axis=1)
x1n = tf.norm(x1, axis=1, keepdims=True)
x2n = tf.norm(x2, axis=1, keepdims=True)
s = x1n + x2n
t = x1n - x2n
u = tf.norm(x1 - x2, axis=1, keepdims=True)
return (tf.exp(-2*s)
* (1 + 0.5*u*tf.exp(-1.013*u))
* (1 + 0.2119*s*u + 0.1406*t*t - 0.003*u*u))
def _grid_points(start, stop, npoints, ndim):
x1 = np.linspace(start, stop, npoints)
x2 = np.zeros(npoints)
g1 = np.array([x1] + [x2 for _ in range(ndim-1)]).T
g2 = np.array([x1, x1] + [x2 for _ in range(ndim-2)]).T
return np.ascontiguousarray(g1), np.ascontiguousarray(g2)
def _wfn_to_log(f):
def _logf(x):
psi = f(x)
return tf.log(tf.abs(psi)), tf.sign(psi)
return _logf
class WavefunctionTest(tf.test.TestCase):
"""Basic tests for the toy wavefunctions defined above."""
def test_hydrogen(self):
grid1, grid2 = _grid_points(-1.1, 1.1, 21, 3)
psi_np = np.exp(-np.abs(grid1[:, 0]))
psi = _hydrogen(tf.constant(grid1))
with tf.train.MonitoredSession() as session:
psi_ = np.squeeze(session.run(psi))
np.testing.assert_allclose(psi_, psi_np)
np.testing.assert_allclose(psi_, psi_[::-1])
psi = _hydrogen(tf.constant(grid2))
with tf.train.MonitoredSession() as session:
psi_ = np.squeeze(session.run(psi))
np.testing.assert_allclose(psi_, psi_np**np.sqrt(2))
np.testing.assert_allclose(psi_, psi_[::-1])
def test_helium(self):
grid1, grid2 = _grid_points(-1.1, 1.0, 5, 6)
grid1[:, 3] += 0.5 # Avoid nuclear singularity for second electron
grid2[:, 3] += 0.5 # Avoid nuclear singularity for second electron
# Exact results calculated by hand...
psi1 = np.array([0.07484748391,
0.17087261364,
0.42062736263,
0.14476421949,
0.06836260905])
psi2 = np.array([0.037059842334,
0.114854125198,
0.403704643707,
0.123475237250,
0.040216273342])
psi = _helium(tf.constant(grid1))
with tf.train.MonitoredSession() as session:
psi_ = np.squeeze(session.run(psi))
np.testing.assert_allclose(psi_, psi1)
# Check for symmetry: swap x and z and electron 1 and electron 2.
grid1 = np.flip(grid1, axis=1)
psi = _helium(tf.constant(grid1))
with tf.train.MonitoredSession() as session:
psiz_ = np.squeeze(session.run(psi))
np.testing.assert_allclose(psi_, psiz_)
psi = _helium(tf.constant(grid2))
with tf.train.MonitoredSession() as session:
psi_ = np.squeeze(session.run(psi))
np.testing.assert_allclose(psi_, psi2)
class HamiltonianAtomTest(tf.test.TestCase):
def test_hydrogen_atom(self):
xs = tf.random_uniform((6, 3), minval=-1.0, maxval=1.0)
atoms = [system.Atom(symbol='H', coords=(0, 0, 0))]
kin, pot = hamiltonian.operators(atoms, 1, 0.0)
logpsi = tf.log(tf.abs(_hydrogen(xs)))
e_loc = kin(logpsi, xs) + pot(xs)
with tf.train.MonitoredSession() as session:
e_loc_ = session.run(e_loc)
# Wavefunction is the ground state eigenfunction. Hence H\Psi = -1/2 \Psi.
np.testing.assert_allclose(e_loc_, -0.5, rtol=1.e-6, atol=1.e-7)
def test_helium_atom(self):
x1 = np.linspace(-1, 1, 6, dtype=np.float32)
x2 = np.zeros(6, dtype=np.float32) + 0.25
x12 = np.array([x1] + [x2 for _ in range(5)]).T
atoms = [system.Atom(symbol='He', coords=(0, 0, 0))]
hpsi_test = np.array([
-0.25170374, -0.43359983, -0.61270618, -1.56245542, -0.39977553,
-0.2300467
])
log_helium = _wfn_to_log(_helium)
h = hamiltonian.exact_hamiltonian(atoms, 2)
xs = tf.constant(x12)
_, hpsi = h(log_helium, xs)
with tf.train.MonitoredSession() as session:
hpsi_ = session.run(hpsi)
xs = tf.reverse(xs, axis=[1])
_, hpsi2 = h(log_helium, xs)
with tf.train.MonitoredSession() as session:
hpsi2_ = session.run(hpsi2)
np.testing.assert_allclose(hpsi_[:, 0], hpsi_test, rtol=1.e-6)
np.testing.assert_allclose(hpsi_, hpsi2_, rtol=1.e-6)
class HamiltonianMoleculeTest(tf.test.TestCase):
def setUp(self):
super(HamiltonianMoleculeTest, self).setUp()
self.rh1 = np.array([0, 0, 0], dtype=np.float32)
self.rh2 = np.array([0, 0, 200], dtype=np.float32)
self.rhh = self.rh2 - self.rh1
self.dhh = 1.0 / np.sqrt(np.dot(self.rhh, self.rhh))
self.xs = (2 * np.random.random((6, 3)) - 1).astype(np.float32)
self.xs = [self.xs + self.rh1, self.xs + self.rh2]
self.atoms = [
system.Atom(symbol='H', coords=self.rh1),
system.Atom(symbol='H', coords=self.rh2)
]
def test_hydrogen_molecular_ion(self):
def h2_psi(xs):
# Trial wavefunction as linear combination of H 1s wavefunctions centered
# on each atom.
return _hydrogen(xs - self.rh1) + _hydrogen(xs - self.rh2)
h = hamiltonian.exact_hamiltonian(self.atoms, 1, 0.0)
# H2+; 1 electron. Use points around both nuclei.
xs = tf.constant(np.concatenate(self.xs, axis=0))
psi, hpsi = h(_wfn_to_log(h2_psi), xs)
with tf.train.MonitoredSession() as session:
psi_, hpsi_ = session.run([psi, hpsi])
# Leading order correction to energy is nuclear interaction with the far
# nucleus.
np.testing.assert_allclose(
hpsi_, -(0.5 + self.dhh) * psi_, rtol=self.dhh, atol=self.dhh)
def test_hydrogen_molecule(self):
def h2_psi(xs):
x1, x2 = tf.split(xs, 2, axis=1)
# Essentially a non-interacting system.
return _hydrogen(x1 - self.rh1) * _hydrogen(x2 - self.rh2)
h = hamiltonian.exact_hamiltonian(self.atoms, 2, 0.0)
# H2; 2 electrons. Place one electron around each nucleus.
xs = tf.constant(np.concatenate(self.xs, axis=1))
psi, hpsi = h(_wfn_to_log(h2_psi), xs)
with tf.train.MonitoredSession() as session:
psi_, hpsi_ = session.run([psi, hpsi])
np.testing.assert_allclose(
hpsi_, -(1 + self.dhh) * psi_, rtol=self.dhh, atol=self.dhh)
class R12FeaturesTest(tf.test.TestCase):
def test_r12_features_atom1(self):
atoms = [system.Atom(symbol='H', coords=(0, 0, 0))]
one = np.ones((1, 3))
xs = tf.constant(one, dtype=tf.float32)
xs12 = hamiltonian.r12_features(xs, atoms, 1, flatten=True)
with tf.train.MonitoredSession() as session:
xs12_ = session.run(xs12)
# Should add |x|.
x_test = np.concatenate([[[np.linalg.norm(one[0])]], one], axis=1)
np.testing.assert_allclose(xs12_, x_test)
def test_r12_features_atom2(self):
atoms = [system.Atom(symbol='He', coords=(0, 0, 0))]
one = np.concatenate([np.ones((1, 3)), 0.5 * np.ones((1, 3))], axis=1)
xs = tf.constant(one, dtype=tf.float32)
xs12 = hamiltonian.r12_features(xs, atoms, 2, flatten=True)
with tf.train.MonitoredSession() as session:
xs12_ = session.run(xs12)
# Should add |x_1|, |x_2|, |x1-x2|
norm = np.linalg.norm
x_test = np.concatenate([
[[
norm(one[0, :3]),
norm(one[0, 3:]),
norm(one[0, :3] - one[0, 3:]),
]],
one
],
axis=1)
np.testing.assert_allclose(xs12_, x_test)
def test_r12_features_molecule(self):
atoms = [
system.Atom(symbol='H', coords=(0, 0, 0)),
system.Atom(symbol='H', coords=(1, 0, 0))
]
one = np.concatenate([np.ones((1, 3)), 0.5 * np.ones((1, 3))], axis=1)
xs = tf.constant(one, dtype=tf.float32)
xs12 = hamiltonian.r12_features(xs, atoms, 2, flatten=True)
with tf.train.MonitoredSession() as session:
xs12_ = session.run(xs12)
# Should add |x_11|, |x_21|, |x_12|, |x_22|, |x1-x2|
norm = np.linalg.norm
x_test = np.concatenate([
[[
norm(one[0, :3]),
norm(one[0, :3] - atoms[1].coords),
norm(one[0, 3:]),
norm(one[0, 3:] - atoms[1].coords),
norm(one[0, :3] - one[0, 3:]),
]],
one
],
axis=1)
np.testing.assert_allclose(xs12_, x_test, rtol=1e-6)
if __name__ == '__main__':
tf.test.main()
| ferminet-master | ferminet/tests/hamiltonian_test.py |
# Lint as: python3
# Copyright 2020 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for ferminet.mcmc."""
from absl.testing import parameterized
from ferminet import hamiltonian
from ferminet import mcmc
from ferminet.utils import system
import numpy as np
import tensorflow.compat.v1 as tf
def _hydrogen(xs):
"""Hydrogen (3D) atom ground state (1s) wavefunction.
Energy: -1/2 hartrees.
Args:
xs: tensor (batch_size, 3) of electron positions.
Returns:
tensor (batch_size, 1) of the (unnormalised) wavefunction at each position.
"""
with tf.name_scope('Psi_H'):
return tf.exp(-tf.norm(xs, axis=1, keepdims=True))
def _helium(xs):
"""Compact parametrized Helium wavefunction.
See https://opencommons.uconn.edu/chem_educ/30/ and Phys Rev A 74, 014501
(2006).
Energy: -2.901188 hartrees (compared to -2.9037243770 hartrees for the exact
ground state).
Args:
xs: tensor (batch_size, 6) of electron positions.
Returns:
tensor (batch_size, 1) of the (unnormalised) wavefunction at each pair of
positions.
"""
with tf.name_scope('Psi_He'):
x1, x2 = tf.split(xs, 2, axis=1)
x1n = tf.norm(x1, axis=1, keepdims=True)
x2n = tf.norm(x2, axis=1, keepdims=True)
s = x1n + x2n
t = x1n - x2n
u = tf.norm(x1 - x2, axis=1, keepdims=True)
return (tf.exp(-2*s)
* (1 + 0.5*u*tf.exp(-1.013*u))
* (1 + 0.2119*s*u + 0.1406*t*t - 0.003*u*u))
def _run_mcmc(atoms,
nelectrons,
net,
batch_size=1024,
steps=10,
dtype=tf.float32):
gen = mcmc.MCMC(
net,
batch_size,
[0] * 3 * nelectrons,
1.0,
0.1,
dtype=dtype)
kin, pot = hamiltonian.operators(atoms, nelectrons, 0.0)
walkers = tf.squeeze(gen.walkers)
psi, _ = net(walkers)
e_loc = tf.reduce_sum(kin(psi, walkers) + pot(walkers)) / batch_size
e = []
mcmc_step = gen.step()
with tf.train.MonitoredSession() as session:
for _ in range(steps):
session.run(mcmc_step)
e.append(session.run(e_loc))
return np.array(e)
class McmcTest(tf.test.TestCase, parameterized.TestCase):
@parameterized.parameters(
{'dtype': tf.float64},
{'dtype': tf.float64},
)
def test_hydrogen_atom(self, dtype):
atoms = [system.Atom(symbol='H', coords=(0, 0, 0))]
def net(x):
psi = _hydrogen(x)
return (tf.log(tf.abs(psi)), tf.sign(psi))
e = _run_mcmc(atoms, 1, net, dtype=dtype)
np.testing.assert_allclose(e, -np.ones_like(e) / 2)
def test_helium_atom(self):
atoms = [system.Atom(symbol='He', coords=(0, 0, 0))]
def net(x):
psi = _helium(x)
return (tf.log(tf.abs(psi)), tf.sign(psi))
e = _run_mcmc(atoms, 2, net, steps=500)
np.testing.assert_allclose(e[100:].mean(), -2.901188, atol=5.e-3)
if __name__ == '__main__':
tf.test.main()
| ferminet-master | ferminet/tests/mcmc_test.py |
# Lint as: python3
# Copyright 2018 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for ferminet.train."""
import os
from absl import flags
from absl.testing import parameterized
from ferminet import train
from ferminet.utils import system
import numpy as np
import pyscf
import tensorflow.compat.v1 as tf
FLAGS = flags.FLAGS
# Default flags are sufficient so mark FLAGS as parsed so we can run the tests
# with py.test, which imports this file rather than runs it.
FLAGS.mark_as_parsed()
class MolVmcTest(tf.test.TestCase, parameterized.TestCase):
def setUp(self):
super(MolVmcTest, self).setUp()
# disable use of temp directory in pyscf.
# Test calculations are small enough to fit in RAM and we don't need
# checkpoint files.
pyscf.lib.param.TMPDIR = None
def _get_atoms(self, name, kwargs=None):
configs = {
'h2': lambda _: system.diatomic('H', 'H', bond_length=1.4),
'helium': lambda _: system.atom('He'),
'helium_triplet': lambda _: system.atom('He', spins=(2, 0)),
# of each spin. Handle entirely spin-polarised systems.
'hydrogen': lambda _: system.atom('H'),
'lithium': lambda _: system.atom('Li'),
'hn': lambda kwargs: system.hn(r=1.4, **kwargs),
}
return configs[name](kwargs)
@parameterized.parameters(
{
'name': 'h2',
'use_kfac': False
},
{
'name': 'helium',
'use_kfac': False
},
{
'name': 'lithium',
'use_kfac': False
},
{
'name': 'helium',
'use_kfac': False
},
{
'name': 'helium_triplet',
'use_kfac': False
},
{
'name': 'hn',
'args': {'n': 3},
'use_kfac': False
},
{
'name': 'hn',
'args': {'n': 3},
'use_kfac': True
},
{
'name': 'hn',
'args': {'n': 5},
'use_kfac': True
},
{
'name': 'hn',
'args': {'n': 3},
'use_kfac': True
},
{
'name': 'hn',
'args': {'n': 3},
'use_kfac': False
},
{
'name': 'hn',
'args': {'n': 3},
'use_kfac': True
},
{
'name': 'hn',
'args': {'n': 3},
'use_kfac': True,
},
)
def test_system(self,
name,
args=None,
use_kfac=True):
atoms, electrons = self._get_atoms(name, args)
train.train(
atoms,
electrons,
batch_size=8,
network_config=train.NetworkConfig(determinants=2),
pretrain_config=train.PretrainConfig(iterations=20),
optim_config=train.OptimConfig(iterations=2, use_kfac=use_kfac),
mcmc_config=train.MCMCConfig(steps=2),
logging_config=train.LoggingConfig(
result_path=self.create_tempdir().full_path),
multi_gpu=False)
def test_restart(self):
atoms, electrons = self._get_atoms('lithium')
batch_size = 8
network_config = train.NetworkConfig(
determinants=2, hidden_units=((64, 16),) * 3)
optim_config = train.OptimConfig(iterations=10, use_kfac=False)
result_directory = self.create_tempdir().full_path
# First run
train.train(
atoms,
electrons,
batch_size=batch_size,
network_config=network_config,
pretrain_config=train.PretrainConfig(iterations=20),
optim_config=optim_config,
mcmc_config=train.MCMCConfig(steps=10),
logging_config=train.LoggingConfig(
result_path=os.path.join(result_directory, 'run1')),
multi_gpu=False)
# Restart from first run
# Already done pretraining and burn in, so just want to resume training
# immediately.
tf.reset_default_graph()
train.train(
atoms,
electrons,
batch_size=batch_size,
network_config=network_config,
pretrain_config=train.PretrainConfig(iterations=0),
optim_config=optim_config,
mcmc_config=train.MCMCConfig(burn_in=0, steps=10),
logging_config=train.LoggingConfig(
result_path=os.path.join(result_directory, 'run2'),
restore_path=os.path.join(result_directory, 'run1', 'checkpoints')
),
multi_gpu=False)
class AssignElectronsTest(parameterized.TestCase):
def _expected_result(self, molecule, e_per_atom):
nelectrons = [sum(spin_electrons) for spin_electrons in zip(*e_per_atom)]
if nelectrons[0] == nelectrons[1]:
for electrons in e_per_atom:
if electrons[0] < electrons[1]:
flip = True
break
elif electrons[0] > electrons[1]:
flip = False
break
if flip:
e_per_atom = [electrons[::-1] for electrons in e_per_atom]
return np.concatenate(
[np.tile(atom.coords, e[0]) for atom, e in zip(molecule, e_per_atom)] +
[np.tile(atom.coords, e[1]) for atom, e in zip(molecule, e_per_atom)])
@parameterized.parameters(
{
'molecule': [('O', 0, 0, 0)],
'electrons': (5, 3),
'electrons_per_atom': ((5, 3),),
},
{
'molecule': [('O', 0, 0, 0), ('N', 1.2, 0, 0)],
'electrons': (8, 7),
'electrons_per_atom': ((3, 5), (5, 2)),
},
{
'molecule': [('O', 0, 0, 0), ('N', 1.2, 0, 0)],
'electrons': (7, 8),
'electrons_per_atom': ((5, 3), (2, 5)),
},
{
'molecule': [('O', 0, 0, 0), ('N', 1.2, 0, 0)],
'electrons': (9, 7),
'electrons_per_atom': ((4, 5), (5, 2)),
},
{
'molecule': [('O', 0, 0, 0), ('N', 1.2, 0, 0)],
'electrons': (10, 7),
'electrons_per_atom': ((5, 5), (5, 2)),
},
{
'molecule': [('O', 0, 0, 0), ('N', 1.2, 0, 0)],
'electrons': (7, 7),
'electrons_per_atom': ((5, 3), (2, 4)),
},
{
'molecule': [('O', 0, 0, 0), ('O', 1.2, 0, 0)],
'electrons': (9, 7),
'electrons_per_atom': ((4, 4), (5, 3)),
},
{
'molecule': [('O', 0, 0, 0), ('O', 20, 0, 0)],
'electrons': (10, 6),
'electrons_per_atom': ((5, 3), (5, 3)),
},
{
'molecule': [('H', 0, 0, 0), ('H', 1.2, 0, 0)],
'electrons': (1, 1),
'electrons_per_atom': ((1, 0), (0, 1)),
},
{
'molecule': [('B', 0, 0, 0), ('H', 1.2, 0, 0), ('H', -0.6, 0.6, 0),
('H', -0.6, -0.6, 0)],
'electrons': (4, 4),
'electrons_per_atom': ((3, 2), (0, 1), (1, 0), (0, 1)),
},
{
'molecule': [('B', 0, 0, 0), ('H', 1.2, 0, 0)],
'electrons': (3, 2),
'electrons_per_atom': ((3, 2), (0, 0)),
},
{
'molecule': [('B', 0, 0, 0), ('H', 1.2, 0, 0), ('H', -0.6, 0.6, 0)],
'electrons': (3, 2),
'electrons_per_atom': ((3, 2), (0, 0), (0, 0)),
},
)
def test_assign_electrons(self, molecule, electrons, electrons_per_atom):
molecule = [system.Atom(v[0], v[1:]) for v in molecule]
e_means = train.assign_electrons(molecule, electrons)
expected_e_means = self._expected_result(molecule, electrons_per_atom)
np.testing.assert_allclose(e_means, expected_e_means)
e_means = train.assign_electrons(molecule[::-1], electrons)
if any(atom.symbol != molecule[0].symbol for atom in molecule):
expected_e_means = self._expected_result(molecule[::-1],
electrons_per_atom[::-1])
else:
expected_e_means = self._expected_result(molecule[::-1],
electrons_per_atom)
np.testing.assert_allclose(e_means, expected_e_means)
if __name__ == '__main__':
tf.test.main()
| ferminet-master | ferminet/tests/train_test.py |
# Lint as: python3
# Copyright 2019 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for ferminet.scf."""
from absl.testing import parameterized
from ferminet import scf
from ferminet.utils import system
import numpy as np
import pyscf
import tensorflow.compat.v1 as tf
def create_o2_hf(bond_length):
molecule = [
system.Atom('O', (0, 0, 0)),
system.Atom('O', (0, 0, bond_length))
]
spin = 2
oxygen_atomic_number = 8
nelectrons = [oxygen_atomic_number + spin, oxygen_atomic_number - spin]
hf = scf.Scf(molecule=molecule, nelectrons=nelectrons, restricted=False)
return hf
class ScfTest(tf.test.TestCase, parameterized.TestCase):
def setUp(self):
super(ScfTest, self).setUp()
# disable use of temp directory in pyscf.
# Test calculations are small enough to fit in RAM and we don't need
# checkpoint files.
pyscf.lib.param.TMPDIR = None
@parameterized.parameters(np.float32, np.float64)
def test_tf_eval_mos(self, dtype):
"""Tests tensorflow op for evaluating Hartree-Fock orbitals.
Args:
dtype: numpy type to use for position vectors.
"""
xs = np.random.randn(100, 3).astype(dtype)
hf = create_o2_hf(1.4)
hf.run()
# Evaluate MOs at positions xs directly in pyscf.
# hf.eval_mos returns np.float64 arrays always. Cast to dtype for
# like-for-like comparison.
mo_vals = [mo_val.astype(dtype) for mo_val in hf.eval_mos(xs, deriv=True)]
# Evaluate MOs as a tensorflow op.
tf_xs = tf.constant(xs)
tf_mo_vals = hf.tf_eval_mos(tf_xs, deriv=True)
tf_dmo_vals = tf.gradients(tf.square(tf_mo_vals), tf_xs)[0]
tf_dmo_vals_shape = tf.shape(tf_dmo_vals)
with tf.train.MonitoredSession() as session:
tf_mo_vals_, tf_dmo_vals_ = session.run([tf_mo_vals, tf_dmo_vals])
tf_dmo_vals_shape_ = session.run(tf_dmo_vals_shape)
tf_mo_vals_ = np.split(tf_mo_vals_, 2, axis=-1)
for np_val, tf_val in zip(mo_vals, tf_mo_vals_):
self.assertEqual(np_val.dtype, tf_val.dtype)
np.testing.assert_allclose(np_val[0], tf_val)
np.testing.assert_array_equal(tf_dmo_vals_shape_, xs.shape)
np.testing.assert_array_equal(tf_dmo_vals_shape_, tf_dmo_vals_.shape)
# Compare analytic derivative of orbital^2 with tensorflow backprop.
# Sum over the orbital index and spin channel because of definition of
# tensorflow gradients.
np_deriv = sum(
np.sum(2 * np_val[1:] * np.expand_dims(np_val[0], 0), axis=-1)
for np_val in mo_vals)
# Tensorflow returns (N,3) but pyscf returns (3, N, M).
np_deriv = np.transpose(np_deriv)
self.assertEqual(np_deriv.dtype, tf_dmo_vals_.dtype)
if dtype == np.float32:
atol = 1e-6
rtol = 1e-6
else:
atol = 0
rtol = 1.e-7
np.testing.assert_allclose(np_deriv, tf_dmo_vals_, atol=atol, rtol=rtol)
def test_tf_eval_mos_deriv(self):
hf = create_o2_hf(1.4)
hf.run()
xs = tf.random_normal((100, 3))
tf_mos_derivs = hf.tf_eval_mos(xs, deriv=True)
tf_mos = hf.tf_eval_mos(xs, deriv=False)
with tf.train.MonitoredSession() as session:
tf_mos_, tf_mos_derivs_ = session.run([tf_mos, tf_mos_derivs])
np.testing.assert_allclose(tf_mos_, tf_mos_derivs_)
def test_tf_eval_hf(self):
# Check we get consistent answers between multiple calls to eval_mos and
# a single tf_eval_hf call.
molecule = [system.Atom('O', (0, 0, 0))]
nelectrons = (5, 3)
hf = scf.Scf(molecule=molecule,
nelectrons=nelectrons,
restricted=False)
hf.run()
batch = 100
xs = [np.random.randn(batch, 3) for _ in range(sum(nelectrons))]
mos = []
for i, x in enumerate(xs):
ispin = 0 if i < nelectrons[0] else 1
orbitals = hf.eval_mos(x)[ispin]
# Select occupied orbitals via Aufbau.
mos.append(orbitals[:, :nelectrons[ispin]])
np_mos = (np.stack(mos[:nelectrons[0]], axis=1),
np.stack(mos[nelectrons[0]:], axis=1))
tf_xs = tf.constant(np.stack(xs, axis=1))
tf_mos = hf.tf_eval_hf(tf_xs, deriv=True)
with tf.train.MonitoredSession() as session:
tf_mos_ = session.run(tf_mos)
for i, (np_mos_mat, tf_mos_mat) in enumerate(zip(np_mos, tf_mos_)):
self.assertEqual(np_mos_mat.shape, tf_mos_mat.shape)
self.assertEqual(np_mos_mat.shape, (batch, nelectrons[i], nelectrons[i]))
np.testing.assert_allclose(np_mos_mat, tf_mos_mat)
def test_tf_eval_slog_wavefuncs(self):
# Check TensorFlow evaluation runs and gives correct shapes.
molecule = [system.Atom('O', (0, 0, 0))]
nelectrons = (5, 3)
total_electrons = sum(nelectrons)
num_spatial_dim = 3
hf = scf.Scf(molecule=molecule,
nelectrons=nelectrons,
restricted=False)
hf.run()
batch = 100
rng = np.random.RandomState(1)
flat_positions_np = rng.randn(batch,
total_electrons * num_spatial_dim)
flat_positions_tf = tf.constant(flat_positions_np)
for method in [hf.tf_eval_slog_slater_determinant,
hf.tf_eval_slog_hartree_product]:
slog_wavefunc, signs = method(flat_positions_tf)
with tf.train.MonitoredSession() as session:
slog_wavefunc_, signs_ = session.run([slog_wavefunc, signs])
self.assertEqual(slog_wavefunc_.shape, (batch, 1))
self.assertEqual(signs_.shape, (batch, 1))
hartree_product = hf.tf_eval_hartree_product(flat_positions_tf)
with tf.train.MonitoredSession() as session:
hartree_product_ = session.run(hartree_product)
np.testing.assert_allclose(hartree_product_,
np.exp(slog_wavefunc_) * signs_)
if __name__ == '__main__':
tf.test.main()
| ferminet-master | ferminet/tests/scf_test.py |
# Lint as: python3
# Copyright 2019 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for ferminet.networks."""
from absl.testing import parameterized
from ferminet import networks
from ferminet import scf
from ferminet.utils import system
import numpy as np
import pyscf
import tensorflow.compat.v1 as tf
class NetworksTest(tf.test.TestCase, parameterized.TestCase):
def setUp(self):
super(NetworksTest, self).setUp()
# disable use of temp directory in pyscf.
# Test calculations are small enough to fit in RAM and we don't need
# checkpoint files.
pyscf.lib.param.TMPDIR = None
@parameterized.parameters(
{
'hidden_units': [8, 8],
'after_det': [8, 8, 2],
},
{
'hidden_units': [[8, 8], [8, 8]],
'after_det': [8, 8, 2],
},
)
def test_ferminet(self, hidden_units, after_det):
"""Check that FermiNet is actually antisymmetric."""
atoms = [
system.Atom(symbol='H', coords=(0, 0, -1.0)),
system.Atom(symbol='H', coords=(0, 0, 1.0)),
]
ne = (3, 2)
x1 = tf.random_normal([3, 3 * sum(ne)])
xs = tf.split(x1, sum(ne), axis=1)
# swap indices to test antisymmetry
x2 = tf.concat([xs[1], xs[0]] + xs[2:], axis=1)
x3 = tf.concat(
xs[:ne[0]] + [xs[ne[0] + 1], xs[ne[0]]] + xs[ne[0] + 2:], axis=1)
ferminet = networks.FermiNet(
atoms=atoms,
nelectrons=ne,
slater_dets=4,
hidden_units=hidden_units,
after_det=after_det)
y1 = ferminet(x1)
y2 = ferminet(x2)
y3 = ferminet(x3)
with tf.train.MonitoredSession() as session:
out1, out2, out3 = session.run([y1, y2, y3])
np.testing.assert_allclose(out1, -out2, rtol=4.e-5, atol=1.e-6)
np.testing.assert_allclose(out1, -out3, rtol=4.e-5, atol=1.e-6)
def test_ferminet_mask(self):
"""Check that FermiNet with a decaying mask on the output works."""
atoms = [
system.Atom(symbol='H', coords=(0, 0, -1.0)),
system.Atom(symbol='H', coords=(0, 0, 1.0)),
]
ne = (3, 2)
hidden_units = [[8, 8], [8, 8]]
after_det = [8, 8, 2]
x = tf.random_normal([3, 3 * sum(ne)])
ferminet = networks.FermiNet(
atoms=atoms,
nelectrons=ne,
slater_dets=4,
hidden_units=hidden_units,
after_det=after_det,
envelope=True)
y = ferminet(x)
with tf.train.MonitoredSession() as session:
session.run(y)
@parameterized.parameters(1, 3)
def test_ferminet_size(self, size):
atoms = [
system.Atom(symbol='H', coords=(0, 0, -1.0)),
system.Atom(symbol='H', coords=(0, 0, 1.0)),
]
ne = (size, size)
nout = 1
batch_size = 3
ndet = 4
hidden_units = [[8, 8], [8, 8]]
after_det = [8, 8, nout]
x = tf.random_normal([batch_size, 3 * sum(ne)])
ferminet = networks.FermiNet(
atoms=atoms,
nelectrons=ne,
slater_dets=ndet,
hidden_units=hidden_units,
after_det=after_det)
y = ferminet(x)
for i in range(len(ne)):
self.assertEqual(ferminet._dets[i].shape.as_list(), [batch_size, ndet])
self.assertEqual(ferminet._orbitals[i].shape.as_list(),
[batch_size, ndet, size, size])
self.assertEqual(y.shape.as_list(), [batch_size, nout])
@parameterized.parameters(
{
'hidden_units': [8, 8],
'after_det': [8, 8, 2]
},
{
'hidden_units': [[8, 8], [8, 8]],
'after_det': [8, 8, 2]
},
)
def test_ferminet_pretrain(self, hidden_units, after_det):
"""Check that FermiNet pretraining runs."""
atoms = [
system.Atom(symbol='Li', coords=(0, 0, -1.0)),
system.Atom(symbol='Li', coords=(0, 0, 1.0)),
]
ne = (4, 2)
x = tf.random_normal([1, 10, 3 * sum(ne)])
strategy = tf.distribute.get_strategy()
with strategy.scope():
ferminet = networks.FermiNet(
atoms=atoms,
nelectrons=ne,
slater_dets=4,
hidden_units=hidden_units,
after_det=after_det,
pretrain_iterations=10)
# Test Hartree fock pretraining - no change of position.
hf_approx = scf.Scf(atoms, nelectrons=ne)
hf_approx.run()
pretrain_op_hf = networks.pretrain_hartree_fock(ferminet, x, strategy,
hf_approx)
self.assertEqual(ferminet.pretrain_iterations, 10)
with tf.train.MonitoredSession() as session:
for _ in range(ferminet.pretrain_iterations):
session.run(pretrain_op_hf)
class LogDetMatmulTest(tf.test.TestCase, parameterized.TestCase):
def _make_mats(self, n, m, l1, l2, k, singular=False):
x1 = np.random.randn(n, m, l1, l1).astype(np.float32)
if singular:
# Make one matrix singular
x1[0, 0, 0] = 2.0 * x1[0, 0, 1]
x2 = np.random.randn(n, m, l2, l2).astype(np.float32)
w = np.random.randn(m, k).astype(np.float32)
x1_tf = tf.constant(x1)
x2_tf = tf.constant(x2)
w_tf = tf.constant(w)
output = networks.logdet_matmul(x1_tf, x2_tf, w_tf)
return x1, x2, w, x1_tf, x2_tf, w_tf, output
def _cofactor(self, x):
u, s, v = np.linalg.svd(x)
ss = np.tile(s[..., None], [1, s.shape[0]])
np.fill_diagonal(ss, 1.0)
z = np.prod(ss, axis=0)
return np.dot(u, np.dot(np.diag(z), v)).transpose()
@parameterized.parameters(True, False)
def test_logdet_matmul(self, singular):
n = 4
m = 5
l1 = 3
l2 = 4
k = 2
x1, x2, w, _, _, _, output = self._make_mats(
n, m, l1, l2, k, singular=singular)
with tf.Session() as session:
logout, signout = session.run(output)
tf_result = np.exp(logout) * signout
det1 = np.zeros([n, m], dtype=np.float32)
det2 = np.zeros([n, m], dtype=np.float32)
for i in range(n):
for j in range(m):
det1[i, j] = np.linalg.det(x1[i, j])
det2[i, j] = np.linalg.det(x2[i, j])
np_result = np.dot(det1 * det2, w)
np.testing.assert_allclose(np_result, tf_result, atol=1e-5, rtol=1e-5)
@parameterized.parameters(True, False)
def test_logdet_matmul_grad(self, singular):
n = 4
m = 5
l1 = 3
l2 = 4
k = 2
result = self._make_mats(n, m, l1, l2, k, singular=singular)
x1, x2, w, x1_tf, x2_tf, w_tf, output = result
with tf.Session():
theor_x1, numer_x1 = tf.test.compute_gradient(
x1_tf, [n, m, l1, l1], output[0], [n, k], x_init_value=x1)
np.testing.assert_allclose(theor_x1, numer_x1, atol=1e-2, rtol=1e-3)
theor_x2, numer_x2 = tf.test.compute_gradient(
x2_tf, [n, m, l2, l2], output[0], [n, k], x_init_value=x2)
np.testing.assert_allclose(theor_x2, numer_x2, atol=1e-2, rtol=1e-3)
theor_w, numer_w = tf.test.compute_gradient(
w_tf, [m, k], output[0], [n, k], x_init_value=w)
np.testing.assert_allclose(theor_w, numer_w, atol=1e-2, rtol=1e-3)
@parameterized.parameters(True, False)
def test_logdet_matmul_grad_grad(self, singular):
n = 2
m = 3
l1 = 2
l2 = 3
k = 2
result = self._make_mats(n, m, l1, l2, k, singular=singular)
x1, x2, w, x1_tf, x2_tf, w_tf, output = result
glog = np.random.randn(*(output[0].shape.as_list())).astype(np.float32)
glog_tf = tf.constant(glog)
grad_op = tf.gradients(output[0], [x1_tf, x2_tf, w_tf], grad_ys=glog_tf)
inp_op = [x1_tf, x2_tf, w_tf, glog_tf]
inp_np = [x1, x2, w, glog]
with tf.Session():
for i in range(len(inp_op)):
for j in range(len(grad_op)):
theor, numer = tf.test.compute_gradient(
inp_op[i],
inp_op[i].shape,
grad_op[j],
grad_op[j].shape,
x_init_value=inp_np[i])
np.testing.assert_allclose(theor, numer, atol=1e-2, rtol=1.3e-3)
class AdjugateGradTest(tf.test.TestCase, parameterized.TestCase):
@parameterized.parameters(
(2, False),
(2, True),
(8, False),
(8, True),
)
def test_grad_adj(self, dim, singular):
def cofactor_tf(xs):
s, u, v = tf.linalg.svd(xs)
return networks.cofactor(u, s, v)
@tf.custom_gradient
def cofactor_tf_custom(xs):
s, u, v = tf.linalg.svd(xs)
def grad(dy):
r = networks.rho(s, 0.0)
return networks.grad_cofactor(u, v, r, dy)
return networks.cofactor(u, s, v), grad
n, m = 6, 3
x = np.random.randn(n, m, dim, dim).astype(np.float32)
if singular:
# Make one matrix singular
x[0, 0, 0] = 2.0 * x[0, 0, 1]
x_tf = tf.constant(x)
adj1 = cofactor_tf(x_tf)
grad_adj_backprop = tf.gradients(adj1, x_tf)
adj2 = cofactor_tf_custom(x_tf)
grad_adj_closed = tf.gradients(adj2, x_tf)
with tf.train.MonitoredSession() as session:
backprop, closed = session.run([grad_adj_backprop, grad_adj_closed])
np.testing.assert_allclose(backprop, closed, atol=1.e-5, rtol=1.e-3)
class GammaTest(tf.test.TestCase, parameterized.TestCase):
@parameterized.parameters(
((5,), 0),
((5,), 2),
((
12,
6,
5,
), 0),
((
12,
1,
8,
), 2),
((
12,
8,
8,
), 2),
)
def test_gamma(self, shape, nsingular):
s = tf.Variable(tf.random_uniform(shape))
if nsingular > 0:
zeros = list(shape)
zeros[-1] = nsingular
s = s[..., shape[-1] - nsingular:].assign(tf.zeros(zeros))
g = networks.gamma(s)
with tf.train.MonitoredSession() as session:
s_, g_ = session.run([s, g])
s_flat = np.reshape(s_, (-1, shape[-1]))
gamma_exact = np.ones_like(s_flat)
for i, row in enumerate(s_flat):
for j in range(shape[-1]):
row[j], r_j = 1.0, row[j]
gamma_exact[i, j] = np.prod(row)
row[j] = r_j
gamma_exact = np.reshape(gamma_exact, g_.shape)
np.testing.assert_allclose(g_, gamma_exact, atol=1.e-7)
class RhoTest(tf.test.TestCase, parameterized.TestCase):
@parameterized.parameters(
(None, 2, 0),
(None, 5, 0),
(None, 5, 2),
((8, 4), 5, 2),
)
def test_rho(self, tile, dim, nsingular):
assert dim > 0
assert dim > nsingular
s = tf.Variable(tf.random_uniform((dim,)))
s = s[dim - nsingular:].assign(tf.zeros(nsingular))
s_batch = s
if tile:
for _ in range(len(tile)):
s_batch = tf.expand_dims(s_batch, 0)
s_batch = tf.tile(s_batch, list(tile) + [1])
r = networks.rho(s)
r_batch = networks.rho(s_batch)
with tf.train.MonitoredSession() as session:
s_, r_, r_batch_ = session.run([s, r, r_batch])
rho_exact = np.zeros((dim, dim), dtype=np.float32)
for i in range(dim - 1):
s_[i], s_i = 1.0, s_[i]
for j in range(i + 1, dim):
s_[j], s_j = 1.0, s_[j]
rho_exact[i, j] = np.prod(s_)
s_[j] = s_j
s_[i] = s_i
rho_exact = rho_exact + np.transpose(rho_exact)
atol = 1e-5
rtol = 1e-5
np.testing.assert_allclose(r_, rho_exact, atol=atol, rtol=rtol)
if tile:
r_batch_ = np.reshape(r_batch_, (-1, dim, dim))
for i in range(r_batch_[0].shape[0]):
np.testing.assert_allclose(r_batch_[i], rho_exact, atol=atol, rtol=rtol)
if __name__ == '__main__':
tf.test.main()
| ferminet-master | ferminet/tests/networks_test.py |
# Lint as: python3
# Copyright 2019 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tools for reading and analysing QMC data."""
from typing import Iterable, Optional, Text, Union
from absl import logging
import numpy as np
import pandas as pd
from pyblock import pd_utils as blocking
def _format_network(network_option: Union[int, Iterable[int]]) -> Text:
"""Formats a network configuration to a (short) string.
Args:
network_option: a integer or iterable of integers.
Returns:
String representation of the network option. If the network option is an
iterable of the form [V, V, ...], return NxV, where N is the length of the
iterable.
"""
try:
# pytype doesn't handle try...except TypeError gracefully.
if all(xi == network_option[0] for xi in network_option[1:]): # pytype: disable=unsupported-operands
return '{}x{}'.format(len(network_option), network_option[0]) # pytype: disable=unsupported-operands,wrong-arg-types
else:
return '{}'.format(network_option)
except TypeError:
return '{}'.format(network_option)
def estimate_stats(df: pd.DataFrame,
burn_in: int,
groups: Optional[Iterable[Text]] = None,
group_by_work_unit: bool = True) -> pd.DataFrame:
"""Estimates statistics for the (local) energy.
Args:
df: pd.DataFrame containing local energy data in the 'eigenvalues' column.
burn_in: number of data points to discard before accumulating statistics to
allow for learning and equilibration time.
groups: list of column names in df to group by. The statistics for each
group are returned, along with the corresponding data for the group. The
group columns should be sufficient to distinguish between separate work
units/calculations in df.
group_by_work_unit: add 'work_unit_id' to the list of groups if not already
present and 'work_unit_id' is a column in df. This is usually helpful
for safety, when each work unit is a separate calculation and should be
treated separately statistically.
Returns:
pandas DataFrame containing estimates of the mean, standard error and error
in the standard error from a blocking analysis of the local energy for each
group in df.
Raises:
RuntimeError: If groups is empty or None and group_by_work_unit is False. If
df does not contain a key to group over, insert a dummy column with
identical values or use pyblock directly.
"""
wid = 'work_unit_id'
if groups is None:
groups = []
else:
groups = list(groups)
if group_by_work_unit and wid not in groups and wid in df.columns:
groups.append(wid)
if not groups:
raise RuntimeError(
'Must group by at least one key or set group_by_work_unit to True.')
if len(groups) == 1:
index_dict = {'index': groups[0]}
else:
index_dict = {'level_{}'.format(i): group for i, group in enumerate(groups)}
stats_dict = {
'mean': 'energy',
'standard error': 'stderr',
'standard error error': 'stderrerr'
}
def block(key, values):
blocked = blocking.reblock_summary(blocking.reblock(values)[1])
if not blocked.empty:
return blocked.iloc[0]
else:
logging.warning('Reblocking failed to estimate statistics for %s.', key)
return pd.Series({statistic: np.nan for statistic in stats_dict})
stats = (
pd.DataFrame.from_dict({
n: block(n, d.eigenvalues[burn_in:])
for n, d in df.groupby(groups) if not d[burn_in:].eigenvalues.empty
}, orient='index')
.reset_index()
.rename(index_dict, axis=1)
.rename(stats_dict, axis=1)
)
stats = stats.sort_values(by=groups).reset_index(drop=True)
stats['burn_in'] = burn_in
return stats
| ferminet-master | ferminet/utils/analysis_tools.py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.