python_code
stringlengths 0
4.04M
| repo_name
stringlengths 8
58
| file_path
stringlengths 5
147
|
---|---|---|
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
from __future__ import annotations
import logging
from dataclasses import dataclass
from enum import Enum
from typing import Optional
import torch
from flsim.channels.base_channel import IdentityChannel
from flsim.common.logger import Logger
from flsim.interfaces.model import IFLModel
from flsim.privacy.common import PrivacyBudget
from flsim.reducers.base_round_reducer import ReductionType, RoundReducerConfig
from flsim.reducers.dp_round_reducer import DPRoundReducerConfig
from flsim.reducers.weighted_dp_round_reducer import WeightedDPRoundReducerConfig
from flsim.utils.config_utils import fullclassname, init_self_cfg
from flsim.utils.fl.common import FLModelParamUtils
from flsim.utils.fl.stats import ModelSequenceNumberTracker
from hydra.utils import instantiate
from omegaconf import MISSING
from torch.nn import Module as Model # @manual
class AsyncAggregationType(Enum):
r"""
There are two ways in which we can aggregate local model updates
into the global model in AsyncFL.
Assume we're trying to aggregate updates from userA
local_model_before_training = model sent to userA by server
final_local_model = model after local training by userA
global_model = model on server when userA finishes training
new_global_model = global_model - delta*lr
The difference is in how `delta` is computed
a) fed_buff_aggregation: delta = local_model_before_training - final_local_model
b) fed_async_aggregation: delta = global_model - final_local_model
Literature uses fed_async_aggregation (https://arxiv.org/abs/1903.03934)
We find empirically that fed_buff_aggregation performs better
"""
fed_buff_aggregation = 1
fed_async_aggregation = 2
@staticmethod
def from_str(name: str):
name_lower = name.lower()
names = [e.name for e in AsyncAggregationType]
assert name_lower in names, "Unknown async aggregation type:" + name
return AsyncAggregationType[name_lower]
class AsyncAggregator:
r"""
Implements Asynchronous Federated Learning
Input: local_model_before_training, final_local_model, global_model, wt
Output: global_model = global_model - lr*delta, where
delta = (local_model_before_training - final_local_model)
lr: original_lr*wt (higher wt = higher learning rate)
original_lr: lr given in config.
smaller original_lr: small changes in global_model
higher original_lr: large changes in global_model
Eg: lr=1 means keep global_model=final_local_model (when wt=1)
TODO: add adaptive learning rate based on staleness of gradient
Attributes:
Internal attributes:
optimizer (torch.optim.Optimizer): an optimizer kept
internally to prevent optimizer creation on every epoch
"""
logger: logging.Logger = Logger.get_logger("AsyncAggregator")
def __init__(
self,
*,
global_model: IFLModel,
channel: Optional[IdentityChannel] = None,
**kwargs,
):
init_self_cfg(
self,
component_class=__class__, # pyre-fixme[10]: Name `__class__` is used but not defined.
config_class=AsyncAggregatorConfig,
**kwargs,
)
self.optimizer = create_optimizer_for_async_aggregator(
# pyre-fixme[16]: `AsyncAggregator` has no attribute `cfg`.
self.cfg,
global_model.fl_get_module(),
)
if self.cfg.aggregation_type not in [
AsyncAggregationType.fed_buff_aggregation,
AsyncAggregationType.fed_async_aggregation,
]:
raise AssertionError(
f"Unknown AsyncAggregationType:{self.cfg.aggregation_type}"
)
self.orig_lr = self.optimizer.param_groups[0]["lr"]
self._global_model: IFLModel = global_model
self._reconstructed_grad: IFLModel = FLModelParamUtils.clone(self._global_model)
# there is no concept of a round in async, hence round reducer is not tied to a round
self.reducer = instantiate(
self.cfg.reducer,
global_model=self._global_model,
channel=channel,
num_users_per_round=self.cfg.num_users_per_round,
total_number_of_users=self.cfg.total_number_of_users,
)
self.seqnum_tracker = ModelSequenceNumberTracker()
@classmethod
def _set_defaults_in_cfg(cls, cfg):
pass
@property
def global_model(self) -> IFLModel:
return self._global_model
def set_num_total_users(self, num_total_users: int) -> None:
self.reducer.set_num_total_users(num_total_users)
def model_staleness(self, model_seqnum: int):
seqnum_diff = self.seqnum_tracker.get_staleness_and_update_stats(model_seqnum)
return seqnum_diff
def zero_grad(self):
r"""
Reset the reducer and optimizer to prepare to update
the global model
"""
self.reducer.reset(ref_model=self._global_model)
self.optimizer.zero_grad()
@property
def privacy_budget(self) -> Optional[PrivacyBudget]:
return self.reducer.privacy_budget if self.is_private else None
@property
def global_seqnum(self):
return self.seqnum_tracker.current_seqnum
@property
def is_private(self):
return self.cfg.reducer._target_ in {
DPRoundReducerConfig._target_,
WeightedDPRoundReducerConfig._target_,
}
def on_client_training_end(
self,
client_delta: IFLModel,
final_local_model: IFLModel,
weight: float,
) -> bool:
"""
Collects the client update and updates the global model
Note:
Async updates the global model on every client hence return value is always true
Args:
client_delta (IFLModel): the difference between the client's before and after training
final_local_model (IFLModel): client's model after local training
weight (float): client's update weight
Returns:
bool: whether or not the global model was updated
"""
client_delta = self.reducer.receive_through_channel(client_delta)
# pyre-fixme[16]: `AsyncAggregator` has no attribute `cfg`.
if self.cfg.aggregation_type == AsyncAggregationType.fed_buff_aggregation:
FLModelParamUtils.set_gradient(
model=self._global_model.fl_get_module(),
reference_gradient=client_delta.fl_get_module(),
)
else:
FLModelParamUtils.subtract_model(
minuend=self._global_model.fl_get_module(),
subtrahend=final_local_model.fl_get_module(),
difference=self._reconstructed_grad.fl_get_module(),
)
FLModelParamUtils.set_gradient(
model=self._global_model.fl_get_module(),
reference_gradient=self._reconstructed_grad.fl_get_module(),
)
self._step_with_modified_lr(lr_normalizer=weight)
return True
def on_training_epoch_end(self) -> bool:
"""
Base method for end of training epoch
Returns whether some client updates were aggregated into global model
(In k-async, up to (k-1) client updates may not have been aggregated
at the end of the epoch, and need to be handled explicitly)
"""
pass
def _step_with_modified_lr(self, lr_normalizer: float):
r"""
Updates the learning rate based on the weight and
increments global seqnum
original_lr: lr given in config
lr: original_lr * weight (higher weight = higher learning rate)
smaller original_lr: small changes in global_model
higher original_lr: large changes in global_model
Eg: lr=1 means keep global_model=final_local_model (when wt=1)
"""
# TODO: for optimizers that don't use momentum or adaptive learning rate,
# scaling LR directly is optimal
# For optimizers like Adam, or SGD+Momentum, add optimal weight-based scaling
# (open research problem)
for param_group in self.optimizer.param_groups:
param_group["lr"] = self.orig_lr * lr_normalizer
self.optimizer.step()
self.seqnum_tracker.increment()
class FedAdamAsyncAggregator(AsyncAggregator):
def __init__(
self,
*,
global_model: IFLModel,
channel: Optional[IdentityChannel] = None,
**kwargs,
):
init_self_cfg(
self,
component_class=__class__,
config_class=FedAdamAsyncAggregatorConfig,
**kwargs,
)
super().__init__(global_model=global_model, channel=channel, **kwargs)
@classmethod
def _set_defaults_in_cfg(cls, cfg):
pass
class FedAvgWithLRAsyncAggregator(AsyncAggregator):
def __init__(
self,
*,
global_model: IFLModel,
channel: Optional[IdentityChannel] = None,
**kwargs,
):
init_self_cfg(
self,
component_class=__class__,
config_class=FedAvgWithLRAsyncAggregatorConfig,
**kwargs,
)
super().__init__(global_model=global_model, channel=channel, **kwargs)
class FedAvgWithLRWithMomentumAsyncAggregator(FedAvgWithLRAsyncAggregator):
"""Implements the correct version of momentum"""
def __init__(
self,
*,
global_model: IFLModel,
channel: Optional[IdentityChannel] = None,
**kwargs,
):
init_self_cfg(
self,
component_class=__class__,
config_class=FedAvgWithLRWithMomentumAsyncAggregatorConfig,
**kwargs,
)
super().__init__(global_model=global_model, channel=channel, **kwargs)
self.orig_momentum = self.optimizer.param_groups[0]["momentum"]
assert (
# pyre-fixme[16]: `FedAvgWithLRWithMomentumAsyncAggregator` has no attribute
# `cfg`.
self.cfg.aggregation_type
== AsyncAggregationType.fed_buff_aggregation
), "Only delta direction is supported by "
f"{FedAvgWithLRWithMomentumAsyncAggregatorConfig.__class__.__name__}"
@classmethod
def _set_defaults_in_cfg(cls, cfg):
pass
def on_client_training_end(
self,
client_delta: IFLModel,
final_local_model: IFLModel,
weight: float,
) -> bool:
# TODO: better if this assert fires at config creation time
assert (
weight >= 0 and weight <= 1
), f"{FedAvgWithLRWithMomentumAsyncAggregatorConfig.__class__.__name__}"
"only supports weights between 0 and 1"
# scale delta by weight
FLModelParamUtils.multiply_model_by_weight(
model=client_delta.fl_get_module(),
weight=weight,
model_to_save=self._reconstructed_grad.fl_get_module(),
)
FLModelParamUtils.set_gradient(
model=self._global_model.fl_get_module(),
reference_gradient=self._reconstructed_grad.fl_get_module(),
)
# change momentum parameter
momentum = self.orig_momentum + ((1 - self.orig_momentum) * (1 - weight))
for param_group in self.optimizer.param_groups:
param_group["momentum"] = momentum
self._step_with_modified_lr(lr_normalizer=weight)
return True
class FedBuffAggregator(AsyncAggregator):
r"""
Aggregator for FedBuff (Buffered Asynchronous Aggregation)
Keeps track of number clients reported and take a global step after reaching the
threshold set by the config
"""
logger: logging.Logger = Logger.get_logger("FedBuffAggregator")
def __init__(
self,
*,
global_model: IFLModel,
channel: Optional[IdentityChannel] = None,
**kwargs,
):
init_self_cfg(
self,
component_class=__class__,
config_class=FedBuffAggregatorConfig,
**kwargs,
)
super().__init__(global_model=global_model, channel=channel, **kwargs)
# pyre-fixme[16]: `FedBuffAggregator` has no attribute `cfg`.
if self.cfg.aggregation_type != AsyncAggregationType.fed_buff_aggregation:
raise ValueError("Buffered Aggregator only supports delta direction")
self.num_clients_reported = 0
@classmethod
def _set_defaults_in_cfg(cls, cfg):
pass
def zero_grad(self):
r"""
Zero'd out the grads if it's the first update or if
reaching buffer_size otherwise no-op
"""
if self.num_clients_reported == 0 or self.should_update_global_model():
super().zero_grad()
def on_client_training_end(
self,
client_delta: IFLModel,
final_local_model: IFLModel,
weight: float,
) -> bool:
"""
Collects client update and update global model if reaching buffer_size
Args:
client_delta (IFLModel): the difference between the client's before and after training
final_local_model (IFLModel): client's model after local training
weight (float): client's update weight
Returns:
bool: whether or not the global model was updated
"""
# total_delta += delta
self._collect_client_update(update=client_delta, weight=weight)
if self.should_update_global_model():
self._update_global_model()
return True
return False
def should_update_global_model(self) -> bool:
# pyre-fixme[16]: `FedBuffAggregator` has no attribute `cfg`.
return self.num_clients_reported >= self.cfg.buffer_size
def on_training_epoch_end(self) -> bool:
"""
Updates the global model in case when
there are remaining clients who didn't get aggregated
into the global model at the end of an epoch.
Return value:
True if there were any such clients with pending updates. In this case,
the global model was updated.
False if there were no such clients. Global model was not update
"""
if self.num_clients_reported != 0:
self._update_global_model()
return True
return False
def _update_global_model(self):
total_delta, _ = self.reducer.reduce()
FLModelParamUtils.set_gradient(
model=self._global_model.fl_get_module(), reference_gradient=total_delta
)
self._step_with_modified_lr(lr_normalizer=1.0)
self.num_clients_reported = 0
def _collect_client_update(self, update: IFLModel, weight: float) -> None:
"""
Collects update from one client and aggregtes it internally.
reduced model = reduced model + update * weight
"""
self.reducer.collect_update(delta=update, weight=weight)
self.num_clients_reported += 1
class FedAvgWithLRFedBuffAggregator(FedBuffAggregator):
def __init__(
self,
*,
global_model: IFLModel,
channel: Optional[IdentityChannel] = None,
**kwargs,
):
init_self_cfg(
self,
component_class=__class__,
config_class=FedAvgWithLRFedBuffAggregatorConfig,
**kwargs,
)
super().__init__(global_model=global_model, channel=channel, **kwargs)
@classmethod
def _set_defaults_in_cfg(cls, cfg):
pass
class FedAdamFedBuffAggregator(FedBuffAggregator):
def __init__(
self,
*,
global_model: IFLModel,
channel: Optional[IdentityChannel] = None,
**kwargs,
):
init_self_cfg(
self,
component_class=__class__,
config_class=FedAdamFedBuffAggregatorConfig,
**kwargs,
)
super().__init__(global_model=global_model, channel=channel, **kwargs)
@classmethod
def _set_defaults_in_cfg(cls, cfg):
pass
def create_optimizer_for_async_aggregator(config: AsyncAggregatorConfig, model: Model):
if config._target_ in {
FedAvgWithLRAsyncAggregatorConfig._target_,
FedAvgWithLRFedBuffAggregatorConfig._target_,
FedAvgWithLRWithMomentumAsyncAggregatorConfig._target_,
}:
return torch.optim.SGD(
model.parameters(),
# pyre-fixme[16]: `AsyncAggregatorConfig` has no attribute `lr`.
lr=config.lr,
# pyre-fixme[16]: `AsyncAggregatorConfig` has no attribute `momentum`.
momentum=config.momentum,
)
elif config._target_ in {
FedAdamAsyncAggregatorConfig._target_,
FedAdamFedBuffAggregatorConfig._target_,
}:
return torch.optim.Adam(
model.parameters(),
lr=config.lr,
# pyre-fixme[16]: `AsyncAggregatorConfig` has no attribute `weight_decay`.
weight_decay=config.weight_decay,
# pyre-fixme[16]: `AsyncAggregatorConfig` has no attribute `beta1`.
# pyre-fixme[16]: `AsyncAggregatorConfig` has no attribute `beta2`.
betas=(config.beta1, config.beta2),
# pyre-fixme[16]: `AsyncAggregatorConfig` has no attribute `eps`.
eps=config.eps,
)
@dataclass
class AsyncAggregatorConfig:
_target_: str = MISSING
_recursive_: bool = False
aggregation_type: AsyncAggregationType = AsyncAggregationType.fed_buff_aggregation
# reducer to collect client updates, there is no concept of a round
# in async, hence round reducer is not tied to a round
reducer: RoundReducerConfig = RoundReducerConfig(
reduction_type=ReductionType.WEIGHTED_SUM
)
num_users_per_round: int = 1
total_number_of_users: int = 10000000000
@dataclass
class FedAvgWithLRAsyncAggregatorConfig(AsyncAggregatorConfig):
_target_: str = fullclassname(FedAvgWithLRAsyncAggregator)
lr: float = 0.001
momentum: float = 0.0
@dataclass
class FedAvgWithLRWithMomentumAsyncAggregatorConfig(FedAvgWithLRAsyncAggregatorConfig):
_target_: str = fullclassname(FedAvgWithLRWithMomentumAsyncAggregator)
@dataclass
class FedAdamAsyncAggregatorConfig(AsyncAggregatorConfig):
_target_: str = fullclassname(FedAdamAsyncAggregator)
lr: float = 0.001
weight_decay: float = 0.00001
beta1: float = 0.9
beta2: float = 0.999
eps: float = 1e-8
@dataclass
class FedBuffAggregatorConfig(AsyncAggregatorConfig):
# number of clients to collect before taking a global step
buffer_size: int = 1
@dataclass
class FedAvgWithLRFedBuffAggregatorConfig(FedBuffAggregatorConfig):
_target_: str = fullclassname(FedAvgWithLRFedBuffAggregator)
lr: float = 0.001
momentum: float = 0.0
@dataclass
class FedAdamFedBuffAggregatorConfig(FedBuffAggregatorConfig):
_target_: str = fullclassname(FedAdamFedBuffAggregator)
lr: float = 0.001
weight_decay: float = 0.00001
beta1: float = 0.9
beta2: float = 0.999
eps: float = 1e-8
| canife-main | FLSim/flsim/optimizers/async_aggregators.py |
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
from __future__ import annotations
import abc
import copy
from dataclasses import dataclass
from typing import Any, Optional
from flsim.interfaces.batch_metrics import IFLBatchMetrics
from flsim.interfaces.model import IFLModel
from flsim.utils.config_utils import fullclassname, init_self_cfg
from flsim.utils.fl.common import FLModelParamUtils
from omegaconf import MISSING
from torch.optim.optimizer import Optimizer
class OptimizerScheduler(abc.ABC):
"""
base class for local LR scheduler, enable the laerning rate
of local optimizers for individual users during local training
"""
def __init__(
self,
*,
optimizer: Optimizer,
**kwargs,
):
init_self_cfg(
self,
component_class=__class__, # pyre-fixme[10]: Name `__class__` is used but not defined.
config_class=OptimizerSchedulerConfig,
**kwargs,
)
self.optimizer = optimizer
@classmethod
def _set_defaults_in_cfg(cls, cfg):
pass
@abc.abstractmethod
def step(
self,
batch_metric: Optional[IFLBatchMetrics] = None,
model: Optional[IFLModel] = None,
data: Optional[Any] = None,
epoch: Optional[int] = None,
):
"""
interface for updating learning rate. Some learning rate scheduling methods
rely produces multiple trial forward passes internally, e.g, line search
methods, hence model is required in the interface.
"""
pass
def get_lr(self):
lrs = [param_group["lr"] for param_group in self.optimizer.param_groups]
return lrs
class ConstantLRScheduler(OptimizerScheduler):
def __init__(
self,
*,
optimizer: Optimizer,
**kwargs,
):
init_self_cfg(
self,
component_class=__class__,
config_class=ConstantLRSchedulerConfig,
**kwargs,
)
super().__init__(optimizer=optimizer, **kwargs)
@classmethod
def _set_defaults_in_cfg(cls, cfg):
pass
def step(
self,
batch_metric: Optional[IFLBatchMetrics] = None,
model: Optional[IFLModel] = None,
data: Optional[Any] = None,
epoch: Optional[int] = None,
):
pass
@property
def lr(self):
return self.cfg.base_lr
class LRBatchSizeNormalizer(OptimizerScheduler):
"""
normalized the LR by number of examples in the batch
"""
def __init__(
self,
*,
optimizer: Optimizer,
**kwargs,
):
init_self_cfg(
self,
component_class=__class__,
config_class=LRBatchSizeNormalizerSchedulerConfig,
**kwargs,
)
super().__init__(optimizer=optimizer, **kwargs)
@classmethod
def _set_defaults_in_cfg(cls, cfg):
pass
def step(
self,
batch_metric: Optional[IFLBatchMetrics],
model: Optional[IFLModel] = None,
data: Optional[Any] = None,
epoch: Optional[int] = None,
):
assert (
batch_metric is not None
), "`batch_metric` param cannot be None for LRBatchSizeNormalizer"
lr_normalizer = self._get_lr_normalizer(batch_metric)
for param_group in self.optimizer.param_groups:
# pyre-fixme[16]: `LRBatchSizeNormalizer` has no attribute `cfg`.
param_group["lr"] = self.cfg.base_lr * lr_normalizer
def _get_lr_normalizer(self, batch_metric: IFLBatchMetrics):
# pyre-fixme[16]: `LRBatchSizeNormalizer` has no attribute `cfg`.
return batch_metric.num_examples / self.cfg.local_lr_normalizer
class ArmijoLineSearch(OptimizerScheduler):
"""
Classical Armijo line-search for step-size selection in optimization.
Recent work suggests that it might also be used in stochastic over-parametrized
setting, cf.
"Painless Stochastic Gradient: Interpolation, Line-Search, and Convergence Rates"
"""
def __init__(
self,
*,
optimizer: Optimizer,
**kwargs,
):
init_self_cfg(
self,
component_class=__class__,
config_class=ArmijoLineSearchSchedulerConfig,
**kwargs,
)
super().__init__(optimizer=optimizer, **kwargs)
assert (
0
# pyre-fixme[16]: `ArmijoLineSearch` has no attribute `cfg`.
< self.cfg.shrinking_factor
<= 1.0
), "shrinking_factor must be between 0 and 1.0"
assert 0 < self.cfg.c <= 1.0, "constant c must be between 0 and 1.0"
assert (
self.cfg.max_iter > 0
), "number of line-search iterations must be a non-negative integer"
@classmethod
def _set_defaults_in_cfg(cls, cfg):
pass
def step(
self,
batch_metric: Optional[IFLBatchMetrics],
model: Optional[IFLModel],
data: Optional[Any] = None,
epoch: Optional[int] = None,
):
assert (
batch_metric is not None
), "`batch_metric` param cannot be None for ArmijoLineSearch"
assert model is not None, "`model` param cannot be None for ArmijoLineSearch"
state_dict = copy.deepcopy(
FLModelParamUtils.get_state_dict(
model.fl_get_module(), only_federated_params=False
)
)
grad_norm_before_update = FLModelParamUtils.get_gradient_l2_norm_raw(
model.fl_get_module()
)
loss_before_update = batch_metric.loss.item()
# pyre-fixme[16]: `ArmijoLineSearch` has no attribute `cfg`.
if self.cfg.reset:
self._reset_lr()
for _ in range(self.cfg.max_iter):
FLModelParamUtils.load_state_dict(
model.fl_get_module(), state_dict, only_federated_params=False
)
proposed_lr = self.get_lr()
assert (
len(proposed_lr) == 1
), "Armijo line-search only works with single param_group"
# pyre-ignore[20]
self.optimizer.step()
# DO NOT compute backprop after forward here, only the forward is
# required for step-size selection, use existent gradient direction
new_batch_metrics = model.fl_forward(data)
# loss if we use the proposed LR
new_loss = new_batch_metrics.loss.item()
if (
float(new_loss)
<= loss_before_update
- self.cfg.c * proposed_lr[0] * grad_norm_before_update**2
):
# satisfy sufficient descent, accept proposed_lr
# and terminate line search
break
# reduce lr
self._shrink_lr()
# recover model state before the line search scatching, do the actual
# optimizer.step() outside of the scheduler
FLModelParamUtils.load_state_dict(
model.fl_get_module(), state_dict, only_federated_params=False
)
def _shrink_lr(self):
for param_group in self.optimizer.param_groups:
param_group["lr"] *= self.cfg.shrinking_factor
def _reset_lr(self):
# reset LR back to base lr, use for resetting LR across training batches
for param_group in self.optimizer.param_groups:
param_group["lr"] = self.cfg.base_lr
@dataclass
class OptimizerSchedulerConfig:
_target_: str = MISSING
_recursive_: bool = False
base_lr: float = 0.001
@dataclass
class ConstantLRSchedulerConfig(OptimizerSchedulerConfig):
_target_: str = fullclassname(ConstantLRScheduler)
@dataclass
class LRBatchSizeNormalizerSchedulerConfig(OptimizerSchedulerConfig):
_target_: str = fullclassname(LRBatchSizeNormalizer)
local_lr_normalizer: int = 1
@dataclass
class ArmijoLineSearchSchedulerConfig(OptimizerSchedulerConfig):
_target_: str = fullclassname(ArmijoLineSearch)
# between (0, 1), algorithm parameter, no need to sweep usually
shrinking_factor: float = 0.5
# between (0, 1), algorithm parameter, no need to sweep usually
c: float = 0.5
# whether to reset the learning rate to base_lr in between steps
# if False, line search for next optimizer.step() will continue
# from the step-size found in the previous step
reset: bool = False
# maximum number of line-search iterations
max_iter: int = 5
| canife-main | FLSim/flsim/optimizers/optimizer_scheduler.py |
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
from __future__ import annotations
import abc
import logging
from dataclasses import dataclass
from typing import Optional
import torch
from flsim.channels.base_channel import IdentityChannel, IFLChannel
from flsim.common.logger import Logger
from flsim.interfaces.model import IFLModel
from flsim.optimizers.layerwise_optimizers import LAMB, LARS
from flsim.reducers.base_round_reducer import (
IFLRoundReducer,
IFLRoundReducerConfig,
RoundReducerConfig,
)
from flsim.reducers.dp_round_reducer import DPRoundReducerConfig
from flsim.utils.config_utils import fullclassname, init_self_cfg
from flsim.utils.fl.common import FLModelParamUtils
from hydra.utils import instantiate
from omegaconf import MISSING, DictConfig, OmegaConf
from torch.nn import Module as Model # @manual
class SyncAggregator(abc.ABC):
"""
FL global optimizer for trainers with locally aggregated model
"""
logger: logging.Logger = Logger.get_logger("SyncAggregator")
def __init__(
self,
*,
global_model: IFLModel,
channel: Optional[IFLChannel] = None,
**kwargs,
):
init_self_cfg(
self,
component_class=__class__, # pyre-fixme[10]: Name `__class__` is used but not defined.
config_class=SyncAggregatorConfig,
**kwargs,
)
assert (
not self.is_round_reducer_dp
), "To create a private round reducer, use PrivateSyncTrainer instead."
self.reducer = instantiate(
# pyre-fixme[16]: `SyncAggregator` has no attribute `cfg`.
self.cfg.reducer,
global_model=global_model,
channel=channel,
num_users_per_round=self.cfg.num_users_per_round,
total_number_of_users=self.cfg.total_number_of_users,
)
self._global_model: IFLModel = global_model
@classmethod
def _set_defaults_in_cfg(cls, cfg):
if OmegaConf.is_missing(cfg.reducer, "_target_"):
cfg.reducer = RoundReducerConfig()
@property
def is_round_reducer_dp(self):
# reducer can be a DictConfig (if constructed the normal way via constructor)
# or a dataclass instance (if the param is set directly - not a recommended way).
return issubclass(self.cfg.reducer.__class__, DPRoundReducerConfig) or (
isinstance(self.cfg.reducer, DictConfig)
and issubclass(OmegaConf.get_type(self.cfg.reducer), DPRoundReducerConfig)
)
@property
def global_model(self) -> IFLModel:
return self._global_model
def collect_client_update(self, update: IFLModel, weight: float) -> None:
"""
Collects update from one client and aggregates it internally.
"""
self.reducer.collect_update(delta=update, weight=weight)
def init_round(self, reducer: Optional[IFLRoundReducer] = None):
"""
Just like an optimizer that requires zero_grad to be called
beginning of each step, FL aggregator requires this function
to be called beginning of each FL round.
"""
if reducer is not None and reducer is not self.reducer:
self.logger.warning("Changing the round reducer!")
del self.reducer
self.reducer = reducer
self.reducer.reset(ref_model=self._global_model)
@abc.abstractmethod
def step(self) -> Optional[float]:
pass
class FedAvgSyncAggregator(SyncAggregator):
"""
Implements federated averaging
"""
def __init__(
self,
*,
global_model: IFLModel,
channel: Optional[IdentityChannel] = None,
**kwargs,
):
init_self_cfg(
self,
component_class=__class__,
config_class=FedAvgSyncAggregatorConfig,
**kwargs,
)
super().__init__(global_model=global_model, channel=channel, **kwargs)
@classmethod
def _set_defaults_in_cfg(cls, cfg):
pass
def step(self) -> Optional[float]:
reduced_module, sum_weights = self.reducer.reduce()
if not self.reducer.is_averaged:
raise AttributeError(
"Reduction type of a FedAvg reducer should be either "
"AVERAGE or WEIGHTED_AVERAGE."
)
if sum_weights > 0:
FLModelParamUtils.subtract_model(
minuend=self._global_model.fl_get_module(),
subtrahend=reduced_module,
difference=self._global_model.fl_get_module(),
only_federated_params=True,
)
class SyncAggregatorWithOptimizer(SyncAggregator):
"""
Base class for SyncAggregators that use a PyTorch optimizer underneath,
like FedAvgWithLR and FedAdam
"""
def __init__(
self,
*,
global_model: IFLModel,
channel: Optional[IdentityChannel] = None,
**kwargs,
) -> None:
init_self_cfg(
self,
component_class=__class__,
config_class=SyncAggregatorConfig,
**kwargs,
)
super().__init__(global_model=global_model, channel=channel, **kwargs)
self.optimizer = create_optimizer_for_sync_aggregator(
# pyre-fixme[16]: `SyncAggregatorWithOptimizer` has no attribute `cfg`.
self.cfg,
global_model.fl_get_module(),
)
# creating two temporary models so we don't have to initialize them
# every time step() is called
self._reconstructed_grad = FLModelParamUtils.clone(self._global_model)
@classmethod
def _set_defaults_in_cfg(cls, cfg):
pass
def init_round(self, reducer: Optional[IFLRoundReducer] = None):
super().init_round(reducer)
self.optimizer.zero_grad()
def step(self) -> Optional[float]:
"""
computes grad and takes an optimizer step:
grad := (global_model - new_model)
new_model := global_model - f(grad)
f() depends on the optimizer:
e.g. Adam uses first and second moments, LARS and LAMB
normalize the gradient, etc.
"""
reduced_module, sum_weights = self.reducer.reduce()
if sum_weights > 0:
FLModelParamUtils.set_gradient(
model=self._global_model.fl_get_module(),
reference_gradient=reduced_module,
)
self.optimizer.step()
class FedAvgWithLRSyncAggregator(SyncAggregatorWithOptimizer):
def __init__(
self,
*,
global_model: IFLModel,
channel: Optional[IdentityChannel] = None,
**kwargs,
) -> None:
init_self_cfg(
self,
component_class=__class__,
config_class=FedAvgWithLRSyncAggregatorConfig,
**kwargs,
)
super().__init__(global_model=global_model, channel=channel, **kwargs)
@classmethod
def _set_defaults_in_cfg(cls, cfg):
pass
class FedAdamSyncAggregator(SyncAggregatorWithOptimizer):
def __init__(
self,
*,
global_model: IFLModel,
channel: Optional[IdentityChannel] = None,
**kwargs,
) -> None:
init_self_cfg(
self,
component_class=__class__,
config_class=FedAdamSyncAggregatorConfig,
**kwargs,
)
super().__init__(global_model=global_model, channel=channel, **kwargs)
@classmethod
def _set_defaults_in_cfg(cls, cfg):
pass
class FedLARSSyncAggregator(SyncAggregatorWithOptimizer):
def __init__(
self,
*,
global_model: IFLModel,
channel: Optional[IdentityChannel] = None,
**kwargs,
) -> None:
init_self_cfg(
self,
component_class=__class__,
config_class=FedLARSSyncAggregatorConfig,
**kwargs,
)
super().__init__(global_model=global_model, channel=channel, **kwargs)
@classmethod
def _set_defaults_in_cfg(cls, cfg):
pass
class FedLAMBSyncAggregator(SyncAggregatorWithOptimizer):
def __init__(
self,
*,
global_model: IFLModel,
channel: Optional[IdentityChannel] = None,
**kwargs,
) -> None:
init_self_cfg(
self,
component_class=__class__,
config_class=FedLAMBSyncAggregatorConfig,
**kwargs,
)
super().__init__(global_model=global_model, channel=channel, **kwargs)
@classmethod
def _set_defaults_in_cfg(cls, cfg):
pass
def create_optimizer_for_sync_aggregator(config: SyncAggregatorConfig, model: Model):
if config._target_ == FedAvgWithLRSyncAggregatorConfig._target_:
return torch.optim.SGD(
model.parameters(),
# pyre-fixme[16]: `SyncAggregatorConfig` has no attribute `lr`.
lr=config.lr,
# pyre-fixme[16]: `SyncAggregatorConfig` has no attribute `momentum`.
momentum=config.momentum,
)
elif config._target_ == FedAdamSyncAggregatorConfig._target_:
return torch.optim.Adam(
model.parameters(),
lr=config.lr,
# pyre-fixme[16]: `SyncAggregatorConfig` has no attribute `weight_decay`.
weight_decay=config.weight_decay,
# pyre-fixme[16]: `SyncAggregatorConfig` has no attribute `beta1`.
# pyre-fixme[16]: `SyncAggregatorConfig` has no attribute `beta2`.
betas=(config.beta1, config.beta2),
# pyre-fixme[16]: `SyncAggregatorConfig` has no attribute `eps`.
eps=config.eps,
)
elif config._target_ == FedLARSSyncAggregatorConfig._target_:
return LARS(
model.parameters(),
lr=config.lr,
# pyre-fixme[16]: `SyncAggregatorConfig` has no attribute `beta`.
beta=config.beta,
weight_decay=config.weight_decay,
)
elif config._target_ == FedLAMBSyncAggregatorConfig._target_:
return LAMB(
model.parameters(),
lr=config.lr,
beta1=config.beta1,
beta2=config.beta2,
weight_decay=config.weight_decay,
eps=config.eps,
)
@dataclass
class SyncAggregatorConfig:
_target_: str = MISSING
_recursive_: bool = False
reducer: IFLRoundReducerConfig = IFLRoundReducerConfig()
num_users_per_round: int = 1
total_number_of_users: int = 10000000000
@dataclass
class FedAvgSyncAggregatorConfig(SyncAggregatorConfig):
_target_: str = fullclassname(FedAvgSyncAggregator)
@dataclass
class FedAvgWithLRSyncAggregatorConfig(SyncAggregatorConfig):
_target_: str = fullclassname(FedAvgWithLRSyncAggregator)
lr: float = 0.001
momentum: float = 0.0
@dataclass
class FedAdamSyncAggregatorConfig(SyncAggregatorConfig):
_target_: str = fullclassname(FedAdamSyncAggregator)
lr: float = 0.001
weight_decay: float = 0.00001
beta1: float = 0.9
beta2: float = 0.999
eps: float = 1e-8
@dataclass
class FedLARSSyncAggregatorConfig(SyncAggregatorConfig):
_target_: str = fullclassname(FedLARSSyncAggregator)
lr: float = 0.001
weight_decay: float = 0.00001
beta: float = 0.9
@dataclass
class FedLAMBSyncAggregatorConfig(SyncAggregatorConfig):
_target_: str = fullclassname(FedLAMBSyncAggregator)
lr: float = 0.001
weight_decay: float = 0.00001
beta1: float = 0.9
beta2: float = 0.999
eps: float = 1e-8
| canife-main | FLSim/flsim/optimizers/sync_aggregators.py |
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import random
from dataclasses import dataclass
from typing import List
import numpy as np
import pytest
import torch
from flsim.common.pytest_helper import assertEmpty, assertEqual, assertTrue
from flsim.interfaces.model import IFLModel
from flsim.optimizers.async_aggregators import (
AsyncAggregationType,
AsyncAggregatorConfig,
FedAvgWithLRAsyncAggregatorConfig,
FedAvgWithLRFedBuffAggregatorConfig,
FedAvgWithLRWithMomentumAsyncAggregatorConfig,
)
from flsim.utils.fl.common import FLModelParamUtils
from flsim.utils.test_utils import (
MockQuadratic1DFL,
Quadratic1D,
SampleNet,
TwoFC,
verify_models_equivalent_after_training,
)
from hydra.utils import instantiate
@dataclass
class MockClientModel:
delta: SampleNet
after_train: SampleNet
weight: float
class TestAsyncAggregator:
def _test_one_step(
self,
param_after_local_training: float,
param_after_global_training: float,
weight: float,
config: AsyncAggregatorConfig,
) -> None:
"""
Test async aggregator by:
1. Create AsyncAggregator of given type (fed_buff_aggregation or fed_async_aggregation)
2. Set local param after training to param_after_local_training
3. Set global param (simulate update from another user) to
param_after_global_training
4. Verify that async_optimizer.step(weight=weight) sets global param to correct value
"""
# initial parameters: x=1.0, y=1.0 (x=param, y=constant)
init_val = 1
global_model = MockQuadratic1DFL(Quadratic1D())
async_aggregator = instantiate(config, global_model=global_model)
local_model = FLModelParamUtils.clone(global_model)
delta = FLModelParamUtils.clone(global_model)
# local training, x becomes param_after_local_training
local_model.fl_get_module().x.data = torch.Tensor([param_after_local_training])
# global update from another user, x becomes param_after_global_training
global_model.fl_get_module().x.data = torch.Tensor(
[param_after_global_training]
)
# client delta is init_val - param_after_local_training
delta.fl_get_module().x.data = torch.Tensor(
[init_val - param_after_local_training]
)
async_aggregator.on_client_training_end(
client_delta=delta,
final_local_model=local_model,
weight=weight,
)
if config.aggregation_type == AsyncAggregationType.fed_buff_aggregation:
# model delta = final_local_model - local_model_deltaing = param_after_local_training - 1
# global_model = param_after_global_training + weight*(param_after_local_training - 1)
global_model_expected = param_after_global_training + weight * (
param_after_local_training - init_val
)
else:
# global_model = (1- weight)*param_after_global_training + weight*param_after_local_training
global_model_expected = (
1 - weight
) * param_after_global_training + weight * param_after_local_training
assertTrue(
torch.allclose(
global_model.fl_get_module().x.data,
torch.Tensor([global_model_expected]),
atol=1e-7,
)
)
def test_fed_buff_aggregation(self) -> None:
"""
Test that AsyncAggregator with 'fed_buff_aggregation' works as expected
1. Create global_model, some_param=1
2. Copy global_model into local_model
3. Simulate local training by local_model changing some_param to a (some_param=a, delta=a-1)
4. Simulate global model updata by other user, some_param being set to b (some_param=b)
5. Call AsyncOptimizer.step(). global_model.some_param should be b + (a-1) (some_param == b + a -1)
"""
num_random_values = 5
num_random_weights = 5
for config in [
FedAvgWithLRAsyncAggregatorConfig(
aggregation_type=AsyncAggregationType.fed_buff_aggregation, lr=1.0
),
FedAvgWithLRFedBuffAggregatorConfig(
aggregation_type=AsyncAggregationType.fed_buff_aggregation,
lr=1.0,
buffer_size=1,
),
]:
for _ in range(num_random_values):
for _ in range(num_random_weights):
self._test_one_step(
param_after_local_training=np.random.random_sample(),
param_after_global_training=np.random.random_sample(),
weight=np.random.random_sample(),
config=config,
)
def test_fed_async_aggregation(self) -> None:
"""
Test that AsyncAggregator with 'fed_async_aggregation' works as expected
1. Create global_model, some_param=1
2. Copy global_model into local_model
3. Simulate local training by local_model changing some_param to a (some_param=a)
4. Simulate global model updata by other user, some_param being set to b (some_param=b)
5. Call AsyncOptimizer.step(weight=1). global_model.some_param should be
w*a + (1-w)*b (some_param == w*a + (1-w)*b)
"""
num_random_values = 5
num_random_weights = 5
for _ in range(num_random_values):
for _ in range(num_random_weights):
self._test_one_step(
param_after_local_training=np.random.random_sample(),
param_after_global_training=np.random.random_sample(),
weight=np.random.random_sample(),
config=FedAvgWithLRAsyncAggregatorConfig(
aggregation_type=AsyncAggregationType.fed_async_aggregation,
lr=1.0,
),
)
def _create_n_clients(self, num_clients):
return [
MockClientModel(
delta=SampleNet(TwoFC()),
after_train=SampleNet(TwoFC()),
weight=np.random.random_sample(),
)
for _ in range(num_clients)
]
def _symmetry_test(self, num_users, fedbuff_config) -> str:
fedbuff_global_model_1 = SampleNet(TwoFC())
fedbuff_global_model_2 = FLModelParamUtils.clone(fedbuff_global_model_1)
fedbuff_aggregator_1 = instantiate(
fedbuff_config, global_model=fedbuff_global_model_1
)
fedbuff_aggregator_2 = instantiate(
fedbuff_config, global_model=fedbuff_global_model_2
)
client_models = self._create_n_clients(num_users)
for client_model in client_models:
fedbuff_aggregator_1.zero_grad()
fedbuff_aggregator_1.on_client_training_end(
client_model.delta,
client_model.after_train,
weight=client_model.weight,
)
random.shuffle(client_models)
for client_model in client_models:
fedbuff_aggregator_2.zero_grad()
fedbuff_aggregator_2.on_client_training_end(
client_model.delta,
client_model.after_train,
weight=client_model.weight,
)
return FLModelParamUtils.get_mismatched_param(
models=[
fedbuff_global_model_1.fl_get_module(),
fedbuff_global_model_2.fl_get_module(),
],
rel_epsilon=1e-6,
abs_epsilon=1e-6,
)
def _equivalence_test(self, num_users, fedbuff_config, async_config) -> str:
async_global_model = SampleNet(TwoFC())
fedbuff_global_model = FLModelParamUtils.clone(async_global_model)
async_aggregator = instantiate(async_config, global_model=async_global_model)
fedbuff_aggregator = instantiate(
fedbuff_config, global_model=fedbuff_global_model
)
client_models = self._create_n_clients(num_users)
for client_model in client_models:
async_aggregator.zero_grad()
async_aggregator.on_client_training_end(
client_model.delta,
client_model.after_train,
weight=client_model.weight,
)
for client_model in client_models:
fedbuff_aggregator.zero_grad()
fedbuff_aggregator.on_client_training_end(
client_model.delta,
client_model.after_train,
weight=client_model.weight,
)
return FLModelParamUtils.get_mismatched_param(
models=[
async_global_model.fl_get_module(),
fedbuff_global_model.fl_get_module(),
],
rel_epsilon=1e-6,
abs_epsilon=1e-6,
)
def test_fedbuff_async_symmetry(self) -> None:
"""
Test for symmetry:
To satisfy symmetry, a buffered async aggregation algorithm should be invariant to the order of user updates
f(userA, userB) = f(userB, userA) where f() is aggregation mechanism
1. Create async and fedbuff aggregators with same global model
2. Create a list of N clients
3. Run fedbuff_aggregator
4. Shuffle client list
5. Run async_aggregator
6. Both should reach the same final global model
"""
num_users = 10
global_lr = 1.0
fedbuff_config = FedAvgWithLRFedBuffAggregatorConfig(
lr=global_lr, buffer_size=1
)
error_msg = self._symmetry_test(
num_users=num_users, fedbuff_config=fedbuff_config
)
assertEmpty(error_msg, msg=error_msg)
def test_fedbuff_async_equivalence(self) -> None:
"""
To satisfy equivalence,
1. Assume both mechanisms have the same starting point
2. Denote N = number of users
3. Assume buffer_size is a factor of N
4. Pure async and fedbuff-async would reach the same final global model
For simplicity, we assume buffer_size = N
"""
num_users = 10
global_lr = 1.00
async_config = FedAvgWithLRAsyncAggregatorConfig(lr=global_lr)
fedbuff_config = FedAvgWithLRFedBuffAggregatorConfig(
lr=global_lr, buffer_size=10
)
error_msg = self._equivalence_test(
num_users=num_users,
fedbuff_config=fedbuff_config,
async_config=async_config,
)
assertEmpty(error_msg, msg=error_msg)
def test_global_update(self) -> None:
"""
Test the aggregator only updates global model if
threshold is reached
"""
num_epochs = 5
for _ in range(num_epochs):
num_total_users = np.random.randint(1, 20)
buffer_size = np.random.randint(1, num_total_users + 1)
fedbuff_config = FedAvgWithLRFedBuffAggregatorConfig(
lr=1.0, buffer_size=buffer_size
)
global_model = SampleNet(TwoFC())
fedbuff_aggregator = instantiate(fedbuff_config, global_model=global_model)
client_models = self._create_n_clients(num_total_users)
for client_num, client in enumerate(client_models):
is_global_model_updated = fedbuff_aggregator.on_client_training_end(
client.delta, client.after_train, weight=1
)
# client_num is 0th index hence we need the + 1
should_update_global_model = (client_num + 1) % buffer_size == 0
assertEqual(is_global_model_updated, should_update_global_model)
def train_async_with_zero_weight(
self,
initial_model: IFLModel,
client_models: List[MockClientModel],
num_epochs: int,
num_total_users: int,
momentum: float,
train_with_zero_weight_in_middle: bool,
) -> IFLModel:
"""'Train' initial model by applying randomly generated client model updates to
it, by repeatedly calling aggregator.on_client_training_end
We do it thrice:
a) Train for num_epochs/2
b) If train_with_zero_weight_in_middle, train for num_epochs with zero weight
c) Train for num_epochs/2
Return final model
"""
assert num_epochs % 2 == 0, "Training must be over even number of epochs"
# config = AsyncAggregatorFedSGDConfig(lr=1.0, momentum=momentum)
config = FedAvgWithLRWithMomentumAsyncAggregatorConfig(
lr=1.0, momentum=momentum
)
aggregator = instantiate(config, global_model=initial_model)
half_epochs = int(num_epochs / 2)
def print_debug(prefix: str):
for key, value in aggregator.optimizer.state.items():
print(f"{prefix}: {key}:{value}")
break
for _ in range(half_epochs):
for client in client_models:
aggregator.on_client_training_end(
client.delta, client.after_train, weight=1
)
print_debug("After first loop")
if train_with_zero_weight_in_middle:
# training with zero weight should change neither the model, nor
# the velocity computation inside the optimizer
for _ in range(half_epochs):
for client in client_models:
aggregator.on_client_training_end(
client.delta, client.after_train, weight=0
)
print_debug("After second loop")
for _ in range(half_epochs):
for client in client_models:
aggregator.on_client_training_end(
client.delta, client.after_train, weight=1
)
print_debug("After third loop")
return aggregator.global_model
@pytest.mark.parametrize(
"num_total_users,num_epochs, momentum",
[(1, 2, 0.5), (10, 10, 0.5), (10, 10, 0)],
)
def test_momentum_implementation_zero_weight(
self, num_total_users: int, num_epochs: int, momentum: float
) -> None:
"""In FedAsyncAggregatorWithMomentum.on_client_training_end, when weight=0,
neither velocity nor model should be updated
We test this by comparing two training runs:
RUN 1
a) Running a few FL rounds in FedSyncAggregatorWithMomentum
b) Calling on_client_training_end with weight=0
c) Running some more FL rounds with FedSyncAggregatorWithMomentum
RUN 2
Same as RUN 1, except no (b)
RUN 1 and RUN 2 should produce the same model
"""
# function starts here
initial_model = SampleNet(TwoFC())
client_models = self._create_n_clients(num_total_users)
torch.manual_seed(1)
np.random.seed(1)
global_model_trained1 = self.train_async_with_zero_weight(
initial_model=FLModelParamUtils.clone(initial_model),
client_models=client_models,
num_epochs=num_epochs,
num_total_users=num_total_users,
momentum=momentum,
train_with_zero_weight_in_middle=False,
)
torch.manual_seed(1)
np.random.seed(1)
global_model_trained2 = self.train_async_with_zero_weight(
initial_model=FLModelParamUtils.clone(initial_model),
client_models=client_models,
num_epochs=num_epochs,
num_total_users=num_total_users,
momentum=momentum,
train_with_zero_weight_in_middle=True,
)
error_msg = verify_models_equivalent_after_training(
global_model_trained1,
global_model_trained2,
initial_model,
rel_epsilon=1e-6,
abs_epsilon=1e-6,
)
assertEqual(error_msg, "")
@pytest.mark.parametrize(
"num_total_users,num_epochs, momentum, lr",
[(1, 2, 0.5, 10), (10, 10, 0.5, 10), (10, 10, 0, 10)],
)
def test_momentum_implementation_one_weight(
self, num_total_users: int, num_epochs: int, momentum: float, lr: float
) -> None:
"""FedAsyncAggregatorWithMomentum.on_client_training_end should behave
exactly like SGD with momentum when weight = 1
We test this by
a) Running SGD with momentum
b) Running AsyncFL in sequential model with momentum
Showing that (a) and (b) produce the same results
"""
momentum = 0.5
num_epochs = 10
num_total_users = 10
lr = 1.0
initial_model = SampleNet(TwoFC())
client_models = self._create_n_clients(num_total_users)
# run async training
torch.manual_seed(1)
np.random.seed(1)
config = FedAvgWithLRWithMomentumAsyncAggregatorConfig(lr=lr, momentum=momentum)
aggregator = instantiate(
config, global_model=FLModelParamUtils.clone(initial_model)
)
for _ in range(num_epochs):
for client in client_models:
aggregator.on_client_training_end(
client.delta, client.after_train, weight=1
)
# run SGD training
torch.manual_seed(1)
np.random.seed(1)
sgd_model = FLModelParamUtils.clone(initial_model)
sgd_optimizer = torch.optim.SGD(
sgd_model.fl_get_module().parameters(), lr=lr, momentum=momentum
)
for _ in range(num_epochs):
for client in client_models:
FLModelParamUtils.set_gradient(
model=sgd_model.fl_get_module(),
reference_gradient=client.delta.fl_get_module(),
)
sgd_optimizer.step()
error_msg = verify_models_equivalent_after_training(
aggregator.global_model,
sgd_model,
initial_model,
rel_epsilon=1e-6,
abs_epsilon=1e-6,
)
assertEqual(error_msg, "")
| canife-main | FLSim/flsim/optimizers/tests/test_async_aggregator.py |
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import pytest
from flsim.common.pytest_helper import assertEqual, assertTrue
from flsim.optimizers.async_aggregators import (
AsyncAggregatorConfig,
FedAdamAsyncAggregatorConfig,
FedAdamFedBuffAggregatorConfig,
FedAvgWithLRAsyncAggregatorConfig,
FedAvgWithLRFedBuffAggregatorConfig,
FedBuffAggregatorConfig,
)
from flsim.optimizers.local_optimizers import LocalOptimizerSGDConfig
from flsim.optimizers.sync_aggregators import (
FedAdamSyncAggregatorConfig,
FedAvgSyncAggregatorConfig,
FedAvgWithLRSyncAggregatorConfig,
SyncAggregatorConfig,
)
from flsim.optimizers.tests.test_optimizer_utils import OptimizerTestUtil
from flsim.utils.sample_model import TwoLayerNet
from hydra.utils import instantiate
from omegaconf import OmegaConf
@pytest.fixture(scope="class")
def prepare_optimizer_factory_test(request) -> None:
request.cls.model = TwoLayerNet(10, 5, 1)
@pytest.mark.usefixtures("prepare_optimizer_factory_test")
class TestOptimizerFactory:
@pytest.mark.parametrize(
"type_str,config", OptimizerTestUtil.provide_sync_factory_creation_dataset()
)
def test_sync_optimizer_config_creation_through_hydra(
self, type_str: str, config: SyncAggregatorConfig
) -> None:
if type_str == "FedAvg":
assertTrue(
# pyre-fixme[16]: Optional type has no attribute `__name__`.
OmegaConf.get_type(config).__name__,
FedAvgSyncAggregatorConfig.__name__,
)
if type_str == "FedAvgWithLR":
assertTrue(
OmegaConf.get_type(config).__name__,
FedAvgWithLRSyncAggregatorConfig.__name__,
)
# pyre-fixme[16]: `SyncAggregatorConfig` has no attribute `lr`.
assertEqual(config.lr, 0.1)
if type_str == "FedAdam":
assertTrue(
OmegaConf.get_type(config).__name__,
FedAdamSyncAggregatorConfig.__name__,
)
assertEqual(config.lr, 0.1)
# pyre-fixme[16]: `SyncAggregatorConfig` has no attribute `weight_decay`.
assertEqual(config.weight_decay, 0.9)
# pyre-fixme[16]: `SyncAggregatorConfig` has no attribute `eps`.
assertEqual(config.eps, 1e-8)
@pytest.mark.parametrize(
"type_str,config", OptimizerTestUtil.provide_async_factory_creation_dataset()
)
def test_async_optimizer_config_creation_through_hydra(
self, type_str: str, config: AsyncAggregatorConfig
) -> None:
if type_str == "FedAvgWithLR":
assertTrue(
# pyre-fixme[16]: Optional type has no attribute `__name__`.
OmegaConf.get_type(config).__name__,
FedAvgWithLRAsyncAggregatorConfig.__name__,
)
# pyre-fixme[16]: `AsyncAggregatorConfig` has no attribute `lr`.
assertEqual(config.lr, 0.1)
if type_str == "FedAdam":
assertTrue(
OmegaConf.get_type(config).__name__,
FedAdamAsyncAggregatorConfig.__name__,
)
assertEqual(config.lr, 0.1)
# pyre-fixme[16]: `AsyncAggregatorConfig` has no attribute `weight_decay`.
assertEqual(config.weight_decay, 0.9)
# pyre-fixme[16]: `AsyncAggregatorConfig` has no attribute `eps`.
assertEqual(config.eps, 1e-8)
@pytest.mark.parametrize(
"type_str,config", OptimizerTestUtil.provide_hybrid_factory_creation_dataset()
)
def test_hybrid_optimizer_config_creation_through_hydra(
self, type_str: str, config: FedBuffAggregatorConfig
) -> None:
if type_str == "FedAvgWithLR":
assertTrue(
# pyre-fixme[16]: Optional type has no attribute `__name__`.
OmegaConf.get_type(config).__name__,
FedAvgWithLRFedBuffAggregatorConfig.__name__,
)
# pyre-fixme[16]: `FedBuffAggregatorConfig` has no attribute `lr`.
assertEqual(config.lr, 0.1)
assertEqual(config.buffer_size, 3)
if type_str == "FedAdam":
assertTrue(
OmegaConf.get_type(config).__name__,
FedAdamFedBuffAggregatorConfig.__name__,
)
assertEqual(config.lr, 0.1)
# pyre-fixme[16]: `FedBuffAggregatorConfig` has no attribute `weight_decay`.
assertEqual(config.weight_decay, 0.9)
# pyre-fixme[16]: `FedBuffAggregatorConfig` has no attribute `eps`.
assertEqual(config.eps, 1e-8)
assertEqual(config.buffer_size, 3)
def test_local_optimizer_creation(self) -> None:
config = {
"_target_": LocalOptimizerSGDConfig._target_,
"lr": 1.0,
"weight_decay": 0.1,
}
# pyre-ignore[16]: for pytest fixture
local_optimizer = instantiate(config, model=self.model)
assertEqual(
OptimizerTestUtil.get_value_from_optimizer(local_optimizer, "lr"),
config["lr"],
)
assertEqual(
OptimizerTestUtil.get_value_from_optimizer(local_optimizer, "weight_decay"),
config["weight_decay"],
)
| canife-main | FLSim/flsim/optimizers/tests/test_optimizer_factories.py |
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
| canife-main | FLSim/flsim/optimizers/tests/__init__.py |
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import math
import pytest
from flsim.common.pytest_helper import assertEqual
from flsim.optimizers.sync_aggregators import (
FedAdamSyncAggregator,
FedAdamSyncAggregatorConfig,
FedLAMBSyncAggregator,
FedLAMBSyncAggregatorConfig,
)
from flsim.utils.test_utils import MockQuadratic1DFL, Quadratic1D
from omegaconf import OmegaConf
def adjust_learning_rate(optimizer, new_lr) -> None:
for param_group in optimizer.param_groups:
param_group["lr"] = new_lr
def _test_lamb_multiple_steps(test_case, weight_decay: int = 0) -> None:
"""
a toy optimization example:
min f(x) = 100 x^2 - 1
minima is x=0.0, x is initialized at 1.0.
"""
# set up quadratic parabola objective and optimizer
beta1 = 0.9
beta2 = 0.999
eps = 1e-8
dict_config_lamb = {
"lr": 0.01,
"beta1": beta1,
"beta2": beta2,
"eps": eps,
"weight_decay": weight_decay,
}
lamb_aggregator = FedLAMBSyncAggregator(
# pyre-fixme[6]: Expected `bool` for 1st param but got `float`.
# pyre-fixme[6]: Expected `IFLRoundReducerConfig` for 1st param but got `float`.
# pyre-fixme[6]: Expected `int` for 1st param but got `float`.
# pyre-fixme[6]: Expected `str` for 1st param but got `float`.
**OmegaConf.structured(FedLAMBSyncAggregatorConfig(**dict_config_lamb)),
global_model=test_case.quadratic1D_lamb,
)
dict_config_adam = {"lr": 0.01, "weight_decay": weight_decay, "eps": eps}
adam_aggregator = FedAdamSyncAggregator(
# pyre-fixme[6]: Expected `bool` for 1st param but got `float`.
# pyre-fixme[6]: Expected `IFLRoundReducerConfig` for 1st param but got `float`.
# pyre-fixme[6]: Expected `int` for 1st param but got `float`.
# pyre-fixme[6]: Expected `str` for 1st param but got `float`.
**OmegaConf.structured(FedAdamSyncAggregatorConfig(**dict_config_adam)),
global_model=test_case.quadratic1D_adam,
)
m_t = 0.0
v_t = 0.0
for i in range(1, 11):
lamb_aggregator.optimizer.zero_grad()
metrics = test_case.quadratic1D_lamb.fl_forward()
loss_lamb = metrics.loss
loss_lamb.backward()
original_param_value = (
test_case.quadratic1D_lamb.fl_get_module().state_dict()["x"].item()
)
weight_norm = abs(original_param_value)
lamb_aggregator.optimizer.step()
updated_param_value_lamb = (
test_case.quadratic1D_lamb.fl_get_module().state_dict()["x"].item()
)
g_t = list(test_case.quadratic1D_lamb.fl_get_module().parameters())[
0
].grad.data.item()
bias_correction1 = 1.0 - beta1**i
bias_correction2 = 1.0 - beta2**i
m_t = beta1 * m_t + (1.0 - beta1) * g_t
v_t = beta2 * v_t + (1.0 - beta2) * g_t**2
lamb_update_unnormalized = (m_t / bias_correction1) / (
math.sqrt(v_t / bias_correction2) + eps
)
lamb_update_norm = abs(lamb_update_unnormalized)
equivalent_adam_lr = dict_config_lamb["lr"] * weight_norm / lamb_update_norm
# we can't initialize a new adam each time because it would lose its momentum
adjust_learning_rate(adam_aggregator.optimizer, equivalent_adam_lr)
adam_aggregator.optimizer.zero_grad()
metrics = test_case.quadratic1D_adam.fl_forward()
loss_adam = metrics.loss
loss_adam.backward()
original_param_value = (
test_case.quadratic1D_adam.fl_get_module().state_dict()["x"].item()
)
adam_aggregator.optimizer.step()
updated_param_value_adam = (
test_case.quadratic1D_adam.fl_get_module().state_dict()["x"].item()
)
assertEqual(updated_param_value_lamb, updated_param_value_adam)
@pytest.fixture(scope="class")
def prepare_lamb_optimizer_test(request) -> None:
request.cls.quadratic1D_lamb = MockQuadratic1DFL(Quadratic1D())
request.cls.quadratic1D_adam = MockQuadratic1DFL(Quadratic1D())
@pytest.mark.usefixtures("prepare_lamb_optimizer_test")
class TestLambOptimizer:
def test_lamb_no_weight_decay(self) -> None:
# pyre-fixme[6]: Expected `int` for 2nd param but got `float`.
_test_lamb_multiple_steps(self, weight_decay=0.0)
def test_lamb_weight_decay(self) -> None:
# pyre-fixme[6]: Expected `int` for 2nd param but got `float`.
_test_lamb_multiple_steps(self, weight_decay=0.1)
| canife-main | FLSim/flsim/optimizers/tests/test_lamb_optimizer.py |
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import pytest
from flsim.common.pytest_helper import assertAlmostEqual, assertEqual
from flsim.optimizers.sync_aggregators import (
FedAvgWithLRSyncAggregator,
FedAvgWithLRSyncAggregatorConfig,
FedLARSSyncAggregator,
FedLARSSyncAggregatorConfig,
)
from flsim.utils.test_utils import MockQuadratic1DFL, Quadratic1D
from omegaconf import OmegaConf
@pytest.fixture(scope="class")
def prepare_lars_optimizer_test(request) -> None:
request.cls.quadratic1D_lars = MockQuadratic1DFL(Quadratic1D())
request.cls.quadratic1D_sgd = MockQuadratic1DFL(Quadratic1D())
@pytest.mark.usefixtures("prepare_lars_optimizer_test")
class TestLarsOptimizer:
def test_lars_multiple_steps(self) -> None:
"""
a toy optimization example:
min f(x) = 100 x^2 - 1
minima is x=0.0, x is initialized at 1.0.
"""
# set up quadratic parabola objective and optimizer
dict_config_lars = {"lr": 0.01, "weight_decay": 0}
lars_aggregator = FedLARSSyncAggregator(
# pyre-fixme[6]: Expected `bool` for 1st param but got `float`.
# pyre-fixme[6]: Expected `IFLRoundReducerConfig` for 1st param but got
# `float`.
# pyre-fixme[6]: Expected `int` for 1st param but got `float`.
# pyre-fixme[6]: Expected `str` for 1st param but got `float`.
**OmegaConf.structured(FedLARSSyncAggregatorConfig(**dict_config_lars)),
# pyre-fixme[16]: `TestLarsOptimizer` has no attribute `quadratic1D_lars`.
global_model=self.quadratic1D_lars,
)
for i in range(10):
lars_aggregator.optimizer.zero_grad()
metrics = self.quadratic1D_lars.fl_forward()
loss_lars = metrics.loss
loss_lars.backward()
original_param_value = (
self.quadratic1D_lars.fl_get_module().state_dict()["x"].item()
)
weight_norm = abs(original_param_value)
lars_aggregator.optimizer.step()
updated_param_value_lars = (
self.quadratic1D_lars.fl_get_module().state_dict()["x"].item()
)
lars_gradient = list(self.quadratic1D_lars.fl_get_module().parameters())[
0
].grad.data.item()
lars_gradient_norm = abs(lars_gradient)
if i == 0:
assertAlmostEqual(
abs((original_param_value - updated_param_value_lars)),
0.01,
delta=1e-4,
)
assertAlmostEqual(lars_gradient_norm, 200, delta=1e-4)
equivalent_sgd_lr = (
dict_config_lars["lr"] * weight_norm / lars_gradient_norm
)
dict_config_sgd = {"lr": equivalent_sgd_lr}
sgd_aggregator = FedAvgWithLRSyncAggregator(
**OmegaConf.structured(
# pyre-fixme[6]: Expected `bool` for 1st param but got `float`.
# pyre-fixme[6]: Expected `IFLRoundReducerConfig` for 1st param
# but got `float`.
# pyre-fixme[6]: Expected `int` for 1st param but got `float`.
# pyre-fixme[6]: Expected `str` for 1st param but got `float`.
FedAvgWithLRSyncAggregatorConfig(**dict_config_sgd)
),
# pyre-fixme[16]: `TestLarsOptimizer` has no attribute
# `quadratic1D_sgd`.
global_model=self.quadratic1D_sgd,
)
sgd_aggregator.optimizer.zero_grad()
metrics = self.quadratic1D_sgd.fl_forward()
loss_sgd = metrics.loss
loss_sgd.backward()
original_param_value = (
self.quadratic1D_sgd.fl_get_module().state_dict()["x"].item()
)
sgd_aggregator.optimizer.step()
updated_param_value_sgd = (
self.quadratic1D_sgd.fl_get_module().state_dict()["x"].item()
)
assertEqual(updated_param_value_lars, updated_param_value_sgd)
| canife-main | FLSim/flsim/optimizers/tests/test_lars_optimizer.py |
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import torch
from flsim.common.pytest_helper import assertEqual, assertTrue
from flsim.optimizers.optimizer_scheduler import (
ArmijoLineSearch,
ArmijoLineSearchSchedulerConfig,
)
from flsim.utils.test_utils import MockQuadratic1DFL, Quadratic1D
from omegaconf import OmegaConf
class TestOptimizerScheduler:
def test_armijo_line_search_on_parabola(self) -> None:
"""
a toy optimization example:
min f(x) = 100 x^2 - 1
minima is x=0.0, x is initialized at 1.0.
Gradient descent with constant step-size 0.01 will never
converge, and in fact, jump between -1 and +1 interleavingly.
In contrast, Armijo line-search reduces step-sizes to avoid
"over-shooting" and converges to 0.
"""
# set up quadratic parabola objective and optimizer
quadratic1D = MockQuadratic1DFL(Quadratic1D())
optimizer = torch.optim.SGD(
quadratic1D.fl_get_module().parameters(), lr=0.01, momentum=0.0
)
# run (deterministic) GD for 10 steps with step-size = 0.01,
# with constant step-size = 0.01, and x0 = 1.0, the iteration
# never converge and will jump between -1.0 and 1.0 interleavingly
for i in range(10):
optimizer.zero_grad()
metrics = quadratic1D.fl_forward()
quadratic_func_val = metrics.loss
quadratic_func_val.backward()
optimizer.step()
obj_val = quadratic1D.fl_get_module().state_dict()["x"].item()
assertEqual(obj_val, (-1.0) ** (i + 1))
# set up (again) quadratic parabola objective and optimizer
quadratic1D = MockQuadratic1DFL(Quadratic1D())
optimizer = torch.optim.SGD(
quadratic1D.fl_get_module().parameters(), lr=0.01, momentum=0.0
)
# use Armijo line-search for optimizer step-size selection
# same initial step-size
config = ArmijoLineSearchSchedulerConfig()
config.base_lr = 0.01
config.reset = True
armijo_line_search_scheduler = ArmijoLineSearch(
optimizer=optimizer, **OmegaConf.structured(config)
)
# run for 10 steps
for t in range(10):
optimizer.zero_grad()
metrics = quadratic1D.fl_forward()
quadratic_func_val = metrics.loss
quadratic_func_val.backward()
armijo_line_search_scheduler.step(metrics, quadratic1D, None, t)
optimizer.step()
# check converging to 0 (true answer)
assertTrue(quadratic1D.fl_get_module().state_dict()["x"].item() <= 1e-7)
| canife-main | FLSim/flsim/optimizers/tests/test_optimizer_scheduler.py |
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
from typing import Any, Dict, Union
from flsim.optimizers.async_aggregators import (
AsyncAggregator,
FedAdamAsyncAggregator,
FedAdamAsyncAggregatorConfig,
FedAdamFedBuffAggregatorConfig,
FedAvgWithLRAsyncAggregator,
FedAvgWithLRAsyncAggregatorConfig,
FedAvgWithLRFedBuffAggregatorConfig,
)
from flsim.optimizers.sync_aggregators import (
FedAdamSyncAggregator,
FedAdamSyncAggregatorConfig,
FedAvgSyncAggregator,
FedAvgSyncAggregatorConfig,
FedAvgWithLRSyncAggregator,
FedAvgWithLRSyncAggregatorConfig,
SyncAggregator,
)
from flsim.trainers.async_trainer import AsyncTrainer
from flsim.trainers.sync_trainer import SyncTrainer
from omegaconf import OmegaConf
from torch.optim.optimizer import Optimizer
# flake8: noqa
class OptimizerTestUtil:
SYNC_AGGREGATOR_TEST_CONFIGS = {
"avg": {"dict_config": {"_base_": "base_fed_avg_sync_aggregator"}},
"sgd": {
"dict_config": {
"_base_": "base_fed_avg_with_lr_sync_aggregator",
"lr": 0.1,
"momentum": 0.9,
}
},
"adam": {
"dict_config": {
"_base_": "base_fed_adam_sync_aggregator",
"lr": 0.1,
"weight_decay": 0.9,
"eps": 1e-8,
}
},
}
ASYNC_AGGREGATOR_TEST_CONFIGS = {
"sgd": {
"dict_config": {
"_base_": "base_fed_avg_with_lr_async_aggregator",
"lr": 0.1,
"momentum": 0.9,
}
},
"adam": {
"dict_config": {
"_base_": "base_fed_adam_async_aggregator",
"lr": 0.1,
"weight_decay": 0.9,
"eps": 1e-8,
}
},
}
@classmethod
def provide_sync_factory_creation_dataset(cls):
return [
(
"FedAvg",
OmegaConf.structured(FedAvgSyncAggregatorConfig),
),
(
"FedAvgWithLR",
OmegaConf.structured(
FedAvgWithLRSyncAggregatorConfig(lr=0.1, momentum=0.9)
),
),
(
"FedAdam",
OmegaConf.structured(
FedAdamSyncAggregatorConfig(lr=0.1, weight_decay=0.9, eps=1e-8)
),
),
]
@classmethod
def provide_async_factory_creation_dataset(cls):
return [
(
"FedAvgWithLR",
OmegaConf.structured(
FedAvgWithLRAsyncAggregatorConfig(lr=0.1, momentum=0.9)
),
),
(
"FedAdam",
OmegaConf.structured(
FedAdamAsyncAggregatorConfig(lr=0.1, weight_decay=0.9, eps=1e-8)
),
),
]
@classmethod
def provide_hybrid_factory_creation_dataset(cls):
return [
(
"FedAvgWithLR",
OmegaConf.structured(
FedAvgWithLRFedBuffAggregatorConfig(
lr=0.1, momentum=0.9, buffer_size=3
)
),
),
(
"FedAdam",
OmegaConf.structured(
FedAdamFedBuffAggregatorConfig(
lr=0.1,
weight_decay=0.9,
eps=1e-8,
buffer_size=3,
)
),
),
]
@classmethod
def get_value_from_optimizer(cls, optimizer: Optimizer, attribute_name: str):
for param_group in optimizer.param_groups:
return param_group[attribute_name]
raise TypeError(f"Optimizer does not have attribute: {attribute_name}")
@classmethod
def get_sync_aggregator_test_configs(cls):
return list(cls.SYNC_AGGREGATOR_TEST_CONFIGS.values())
@classmethod
def get_async_aggregator_test_configs(cls):
return list(cls.ASYNC_AGGREGATOR_TEST_CONFIGS.values())
@classmethod
def _verify_trainer_common_aggregators(
cls,
aggregator: Union[SyncAggregator, AsyncAggregator],
dict_config: Dict[str, Any],
):
if "fed_avg_with_lr" in dict_config["_base_"]:
assert isinstance(aggregator, FedAvgWithLRSyncAggregator) or isinstance(
aggregator, FedAvgWithLRAsyncAggregator
)
assert (
OptimizerTestUtil.get_value_from_optimizer(aggregator.optimizer, "lr")
== 0.1
), "lr for FedavgwithLr optimizer should be 0.1"
assert (
OptimizerTestUtil.get_value_from_optimizer(
aggregator.optimizer, "momentum"
)
== 0.9
), "momentum for FedavgwithLr optimizer should be 0.9"
if "fed_adam" in dict_config["_base_"]:
assert isinstance(aggregator, FedAdamSyncAggregator) or isinstance(
aggregator, FedAdamAsyncAggregator
)
assert (
OptimizerTestUtil.get_value_from_optimizer(aggregator.optimizer, "lr")
== 0.1
), "lr for adam optimizer should be 0.1"
assert (
OptimizerTestUtil.get_value_from_optimizer(
aggregator.optimizer, "weight_decay"
)
== 0.9
), "weight_decay for adam optimizer should be 0.9"
assert (
OptimizerTestUtil.get_value_from_optimizer(aggregator.optimizer, "eps")
== 1e-8
), "eps for adam optimizer should be 1e-8"
@classmethod
def verify_async_trainer_aggregator(
cls, trainer: AsyncTrainer, dict_config: Dict[str, Any]
):
aggregator = trainer.aggregator
assert isinstance(
aggregator, AsyncAggregator
), "aggregator should be an instance of AsyncAggregator for AsyncTrainer"
cls._verify_trainer_common_aggregators(aggregator, dict_config)
| canife-main | FLSim/flsim/optimizers/tests/test_optimizer_utils.py |
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
from __future__ import annotations
import logging
import math
from dataclasses import dataclass
from typing import Any, List, Optional, Tuple
from flsim.clients.async_client import AsyncClientDevice
from flsim.common.logger import Logger
from flsim.common.timeline import Timeline
from flsim.common.training_event_handler import AsyncTrainingEventHandler
from flsim.common.training_simulator import AsyncTrainingSimulator
from flsim.data.data_provider import IFLDataProvider
from flsim.interfaces.metrics_reporter import IFLMetricsReporter, Metric
from flsim.interfaces.model import IFLModel
from flsim.optimizers.async_aggregators import (
AsyncAggregator,
AsyncAggregatorConfig,
FedAvgWithLRAsyncAggregatorConfig,
FedBuffAggregator,
)
from flsim.trainers.trainer_base import FLTrainer, FLTrainerConfig
from flsim.utils.async_trainer.async_user_selector import (
AsyncUserSelectorFactory,
AsyncUserSelectorType,
)
from flsim.utils.async_trainer.async_weights import AsyncWeight, AsyncWeightConfig
from flsim.utils.async_trainer.training_event_generator import (
AsyncTrainingEventGeneratorConfig,
EventGeneratorConfig,
)
from flsim.utils.config_utils import fullclassname, init_self_cfg
from flsim.utils.cuda import CudaTransferMinimizer, GPUMemoryMinimizer
from hydra.utils import instantiate
from omegaconf import OmegaConf
from tqdm import tqdm
class AsyncTrainer(FLTrainer, AsyncTrainingEventHandler):
"""Implements Async version of FederatedAveraging
Implemented like AsynchronousSGD, except that we don't have real gradients,
but we use reconstructed gradients as if they're real gradients
Attributes:
epochs (int): Training epochs
report_train_metrics (bool): Whether metrics on training data should be
computed and reported.
Internal attributes:
"""
logger: logging.Logger = Logger.get_logger(__name__)
def __init__(
self,
*,
model: IFLModel,
cuda_enabled: bool = False,
**kwargs,
):
init_self_cfg(
self,
component_class=__class__, # pyre-fixme[10]: Name `__class__` is used but not defined.
config_class=AsyncTrainerConfig,
**kwargs,
)
super().__init__(model=model, cuda_enabled=cuda_enabled, **kwargs)
self._cuda_state_manager = (
CudaTransferMinimizer(cuda_enabled)
# pyre-fixme[16]: `AsyncTrainer` has no attribute `cfg`.
if self.cfg.minimize_cuda_transfer
else GPUMemoryMinimizer(cuda_enabled)
)
self._cuda_state_manager.on_trainer_init(model)
self.aggregator: AsyncAggregator = instantiate(
self.cfg.aggregator, global_model=model, channel=self.channel
)
AsyncTrainingEventHandler.__init__(self)
self._training_simulator: Optional[AsyncTrainingSimulator] = None
self.weight: AsyncWeight = instantiate(self.cfg.async_weight)
self._event_generator = instantiate(
self.cfg.training_event_generator,
)
# for pyre; declare instance variables (https://fburl.com/88n6i71r)
# pyre-fixme[8]: Attribute has type `IFLMetricsReporter`; used as `None`.
self.metrics_reporter: "IFLMetricsReporter" = None
self.best_metric = None
self.best_model_state = self.global_model().fl_get_module().state_dict()
@classmethod
def _set_defaults_in_cfg(cls, cfg):
if OmegaConf.is_missing(cfg.aggregator, "_target_"):
cfg.aggregator = FedAvgWithLRAsyncAggregatorConfig()
if OmegaConf.is_missing(cfg.training_event_generator, "_target_"):
cfg.training_event_generator = AsyncTrainingEventGeneratorConfig()
if OmegaConf.is_missing(cfg.async_weight, "_target_"):
cfg.async_weight = AsyncWeightConfig()
def global_model(self) -> IFLModel:
"""This function makes it explicit that self.global_model() is owned
by the aggregator, not by ASyncTrainer
"""
return self.aggregator.global_model
@property
def global_round(self):
return self.aggregator.global_seqnum + 1 # seqnum is 0 based.
def on_training_start(self, client: AsyncClientDevice) -> None:
r"""
Overrides `AsyncTrainingEventHandler.on_training_start` to mark a client starting to train
Marks the client state as training and copies the current global model
to the client's local model
Note:
-----
The actual training process doesn't start until on_training_end
"""
client.training_started(
model_seqnum=self.aggregator.global_seqnum, init_model=self.global_model()
)
def train_and_update_global_model(self, client: AsyncClientDevice) -> None:
r"""
Train a single client and aggregate update into the global model
"""
assert client.local_model is not None
# 1. zero grad global optimizer
self.aggregator.zero_grad()
# 2. train client on local data
client_delta, final_local_model, num_examples = client.train_local_model(
self.metrics_reporter
)
assert num_examples > 0, "Client must have more than one example"
# 3. get client staleness
client_staleness = self.aggregator.model_staleness(
model_seqnum=client.model_seqnum
)
# 4. check participation criteria
if self._can_participate(staleness=client_staleness):
weight = self.weight.weight(
num_examples=num_examples, staleness=client_staleness
)
# 5. maybe take a global step
is_global_model_updated = self.aggregator.on_client_training_end(
client_delta=client_delta,
final_local_model=final_local_model,
weight=weight,
)
if is_global_model_updated:
self._global_update_done()
def _can_participate(self, staleness: float):
# pyre-fixme[16]: `AsyncTrainer` has no attribute `cfg`.
return staleness <= self.cfg.max_staleness
def _num_global_steps_in_epoch(self):
if isinstance(self.aggregator, FedBuffAggregator):
return math.ceil(self.num_total_users / self.aggregator.cfg.buffer_size)
else:
return self.num_total_users
def _global_update_done(self) -> None:
timeline = self._get_timeline()
self._report_train_metrics(
model=self.global_model(),
timeline=timeline,
metrics_reporter=self.metrics_reporter,
extra_metrics=self._get_training_metrics(),
)
self._calc_post_epoch_communication_metrics(
self._get_timeline(), self.metrics_reporter
)
self.best_metric, self.best_model_state = FLTrainer._maybe_run_evaluation(
self,
timeline=timeline,
data_provider=self.data_provider,
metrics_reporter=self.metrics_reporter,
best_metric=self.best_metric,
best_model_state=self.best_model_state,
)
def _get_training_metrics(self) -> List[Metric]:
metrics = Metric.from_args(
# pyre-fixme[16]: `Optional` has no attribute `queue_stats`.
Concurrency_Rate=self._training_simulator.queue_stats.avg_pending_jobs(),
Seqnum_Diff_Mean=self.aggregator.seqnum_tracker.mean(),
Seqnum_Diff_Std=self.aggregator.seqnum_tracker.standard_deviation(),
Weight_Mean=self.weight.stats.mean(),
Weight_Std=self.weight.stats.standard_deviation(),
)
return (
# pyre-ignore[16]: if aggregator is private then we have privacy budget
metrics + Metric.from_dict(self.aggregator.privacy_budget._asdict())
if self.aggregator.is_private
else metrics
)
def _get_timeline(self):
return Timeline(
global_round=self.global_round,
rounds_per_epoch=self._num_global_steps_in_epoch(),
)
def _print_training_stats(self, timeline: Timeline):
super()._print_training_stats(timeline)
print("Async trainer stats:")
# pyre-fixme[16]: `Optional` has no attribute `print_stats`.
self._training_simulator.print_stats("\t")
self.aggregator.seqnum_tracker.print_stats()
def train(
self,
data_provider: IFLDataProvider,
metrics_reporter: IFLMetricsReporter,
num_total_users: int,
distributed_world_size: int = 1,
rank: int = 0,
) -> Tuple[IFLModel, Any]:
"""Train and eval a model, the model states will be modified.
Args:
train_iter (List[Iterable[Any]]): list of batch iterators of training data,
each batch iterator represents data from a single 'user'
eval_iter (Iterable[Any]): batch iterator of evaluation data
model (Model): model to be trained
metrics_reporter (IFLMetricsReporter): compute metric based on training
output and report results to console, file.. etc
Returns:
model, best_metric: the trained model together with the best metric
"""
assert (
rank == 0 and distributed_world_size == 1
), "Distributed training not supported yet for AsyncTrainer"
self.best_metric = None
self.best_model_state = self.global_model().fl_get_module().state_dict()
self.data_provider = data_provider
self.metrics_reporter = metrics_reporter
self.num_total_users = data_provider.num_train_users()
self.aggregator.set_num_total_users(self.num_total_users)
user_selector = AsyncUserSelectorFactory.create_users_selector(
# pyre-fixme[16]: `AsyncTrainer` has no attribute `cfg`.
self.cfg.async_user_selector_type,
data_provider,
)
self._training_simulator = AsyncTrainingSimulator(
event_generator=self._event_generator,
job_scheduler=self,
user_selector=user_selector,
num_train_end_events_per_epoch=self.num_total_users,
shared_client_config=self.cfg.client,
timeout_simulator=self._timeout_simulator,
channel=self.channel,
cuda_manager=self._cuda_state_manager,
)
num_int_epochs = math.ceil(self.cfg.epochs)
for _epoch in tqdm(range(1, num_int_epochs + 1), desc="Epoch", unit="epoch"):
# pyre-fixme[16]: `Optional` has no attribute `run_one_epoch`.
self._training_simulator.run_one_epoch()
# in k-async, up to (k-1) client updates may not have been aggregated
# into the global model
had_unaggregated_updates = self.aggregator.on_training_epoch_end()
if had_unaggregated_updates:
self._global_update_done()
self.global_model().fl_get_module().load_state_dict(self.best_model_state)
return self.global_model(), self.best_metric
@dataclass
class AsyncTrainerConfig(FLTrainerConfig):
_target_: str = fullclassname(AsyncTrainer)
aggregator: AsyncAggregatorConfig = AsyncAggregatorConfig()
training_event_generator: EventGeneratorConfig = EventGeneratorConfig()
# TODO: async_user_selector_type should be directly instantiable from json_config
async_user_selector_type: AsyncUserSelectorType = AsyncUserSelectorType.RANDOM
async_weight: AsyncWeightConfig = AsyncWeightConfig()
# any client with staleness greater than this number will be rejected
max_staleness: float = float("inf")
# Minimize CPU<-->GPU memory bandwidth at the cost of increasing GPU memory consumption,
# Turning this off will increase training time
minimize_cuda_transfer: bool = True
| canife-main | FLSim/flsim/trainers/async_trainer.py |
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
from __future__ import annotations
import logging
import math
import random
from dataclasses import dataclass
from time import time
from typing import Any, Dict, Iterable, List, Optional, Tuple
import torch
from flsim.channels.message import Message
from flsim.channels.product_quantization_channel import ProductQuantizationChannel
from flsim.clients.base_client import Client
from flsim.clients.dp_client import DPClient, DPClientConfig
from flsim.common.timeline import Timeline
from flsim.data.data_provider import IFLDataProvider
from flsim.interfaces.metrics_reporter import IFLMetricsReporter, Metric, TrainingStage
from flsim.interfaces.model import IFLModel
from flsim.servers.sync_dp_servers import SyncDPSGDServerConfig
from flsim.servers.sync_secagg_servers import SyncSecAggServerConfig
from flsim.servers.sync_servers import (
FedAvgOptimizerConfig,
ISyncServer,
SyncServerConfig,
)
from flsim.trainers.trainer_base import FLTrainer, FLTrainerConfig
from flsim.utils.config_utils import fullclassname, init_self_cfg, is_target
from flsim.utils.distributed.fl_distributed import FLDistributedUtils
from flsim.utils.fl.common import FLModelParamUtils
from flsim.utils.fl.stats import RandomVariableStatsTracker
from hydra.utils import instantiate
from omegaconf import OmegaConf
from tqdm import tqdm
class SyncTrainer(FLTrainer):
"""Implements synchronous Federated Learning Training.
Defaults to Federated Averaging (FedAvg): https://arxiv.org/abs/1602.05629
"""
def __init__(
self,
*,
model: IFLModel,
cuda_enabled: bool = False,
**kwargs,
):
init_self_cfg(
self,
component_class=__class__, # pyre-fixme[10]: Name `__class__` is used but not defined.
config_class=SyncTrainerConfig,
**kwargs,
)
super().__init__(model=model, cuda_enabled=cuda_enabled, **kwargs)
self.server: ISyncServer = instantiate(
# pyre-ignore[16]
self.cfg.server,
global_model=model,
channel=self.channel,
)
self.clients = {}
@classmethod
def _set_defaults_in_cfg(cls, cfg):
if OmegaConf.is_missing(cfg.server, "_target_"):
cfg.server = SyncServerConfig(optimizer=FedAvgOptimizerConfig())
def global_model(self) -> IFLModel:
"""self.global_model() is owned by the server, not by SyncTrainer"""
return self.server.global_model
@property
def is_user_level_dp(self):
return is_target(self.cfg.server, SyncDPSGDServerConfig)
@property
def is_sample_level_dp(self):
return is_target(self.cfg.client, DPClientConfig)
@property
def is_secure_aggregation_enabled(self):
return is_target(self.cfg.server, SyncSecAggServerConfig)
def create_or_get_client_for_data(self, dataset_id: int, datasets: Any):
"""Creates clients for a round. This function is called UPR * num_rounds
times per training run. Here, we use <code>OmegaConf.structured</code>
instead of <code>hydra.instantiate</code> to minimize the overhead of
hydra object creation.
"""
if self.is_sample_level_dp:
client = DPClient(
# pyre-ignore[16]
**OmegaConf.structured(self.cfg.client),
dataset=datasets.get_train_user(dataset_id),
name=f"client_{dataset_id}",
timeout_simulator=self._timeout_simulator,
store_last_updated_model=self.cfg.report_client_metrics,
channel=self.channel,
cuda_manager=self._cuda_state_manager,
)
else:
client = Client(
**OmegaConf.structured(self.cfg.client),
dataset=datasets.get_train_user(dataset_id),
name=f"client_{dataset_id}",
timeout_simulator=self._timeout_simulator,
store_last_updated_model=self.cfg.report_client_metrics,
channel=self.channel,
cuda_manager=self._cuda_state_manager,
)
# self.clients[dataset_id] = self.clients.get(dataset_id, client)
self.clients[dataset_id] = client
return self.clients[dataset_id]
def train(
self,
data_provider: IFLDataProvider,
metrics_reporter: IFLMetricsReporter,
num_total_users: int,
distributed_world_size: int,
rank: int = 0,
) -> Tuple[IFLModel, Any]:
"""Trains and evaluates the model, modifying the model state. Iterates over the
number of epochs specified in the config, and for each epoch iterates over the
number of rounds per epoch, i.e. the number of total users divided by the number
of users per round. For each round:
1. Trains the model in a federated way: different local models are trained
with local data from different clients, and are averaged into a new
global model at the end of each round.
2. Evaluates the new global model using evaluation data, if desired.
3. Calculates metrics based on evaluation results and selects the best model.
Args:
data_provider: provides training, evaluation, and test data iterables and
gets a user's data based on user ID
metrics_reporter: computes and reports metrics of interest such as accuracy
or perplexity
num_total_users: number of total users for training
Returns:
model, best_metric: the trained model together with the best metric
Note:
Depending on the chosen active user selector, we may not iterate over
all users in a given epoch.
"""
# set up synchronization utilities for distributed training
FLDistributedUtils.setup_distributed_training(
distributed_world_size, use_cuda=self.cuda_enabled
) # TODO do not call distributed utils here, this is upstream responsibility
self.logger.info(f" dist world size = {distributed_world_size}")
if rank != 0:
FLDistributedUtils.suppress_output()
# pyre-fixme[16]: `SyncTrainer` has no attribute `cfg`.
assert self.cfg.users_per_round % distributed_world_size == 0
best_metric = None
best_model_state = self.global_model().fl_get_module().state_dict()
# last_best_epoch = 0
users_per_round = min(self.cfg.users_per_round, num_total_users)
self.data_provider = data_provider
num_rounds_in_epoch = self.rounds_in_one_epoch(num_total_users, users_per_round)
num_users_on_worker = data_provider.num_train_users()
self.logger.debug(
f"num_users_on_worker: {num_users_on_worker}, "
f"users_per_round: {users_per_round}, "
f"num_total_users: {num_total_users}"
)
# torch.multinomial requires int instead of float, cast it as int
users_per_round_on_worker = int(users_per_round / distributed_world_size)
self._validate_users_per_round(users_per_round_on_worker, num_users_on_worker)
self.logger.info("Start training")
if self.logger.isEnabledFor(logging.DEBUG):
norm = FLModelParamUtils.debug_model_norm(
self.global_model().fl_get_module()
)
self.logger.debug(
self.cuda_enabled and distributed_world_size > 1,
f"from worker {rank}: model norm is {norm} after round {iter}",
)
# main training loop
num_int_epochs = math.ceil(self.cfg.epochs)
for epoch in tqdm(
range(1, num_int_epochs + 1), desc="Epoch", unit="epoch", position=0
):
for round in tqdm(
range(1, num_rounds_in_epoch + 1),
desc="Round",
unit="round",
position=0,
):
timeline = Timeline(
epoch=epoch, round=round, rounds_per_epoch=num_rounds_in_epoch
)
t = time()
mock_client_idx = random.randint(0, num_users_on_worker - 1)
clients = self._client_selection(
num_users_on_worker,
users_per_round_on_worker,
data_provider,
self.global_model(),
epoch,
mock_client_idx
)
self.logger.info(f"Client Selection took: {time() - t} s.")
agg_metric_clients = self._choose_clients_for_post_aggregation_metrics(
train_clients=clients,
num_total_users=num_users_on_worker,
users_per_round=users_per_round_on_worker,
)
# training on selected clients for the round
self.logger.info(f"# clients/round on worker {rank}: {len(clients)}.")
self._train_one_round(
timeline=timeline,
clients=clients,
agg_metric_clients=agg_metric_clients,
users_per_round=users_per_round,
metrics_reporter=metrics_reporter
if self.cfg.report_train_metrics
else None,
mock_client_idx=mock_client_idx,
)
if self.logger.isEnabledFor(logging.DEBUG):
norm = FLModelParamUtils.debug_model_norm(
self.global_model().fl_get_module()
)
self.logger.debug(
self.cuda_enabled and distributed_world_size > 1,
f"from worker {rank}: model norm: {norm} @ "
f"epoch:{epoch}, round:{round}",
)
# report training success rate and training time variance
if rank == 0:
if (
self._timeout_simulator.sample_mean_per_user != 0
or self._timeout_simulator.sample_var_per_user != 0
):
self.logger.info(
f"mean training time/user: "
f"{self._timeout_simulator.sample_mean_per_user}",
f"variance of training time/user: "
f"{self._timeout_simulator.sample_var_per_user}",
)
t = time()
(best_metric, best_model_state,) = self._maybe_run_evaluation(
timeline=timeline,
data_provider=data_provider,
metrics_reporter=metrics_reporter,
best_metric=best_metric,
best_model_state=best_model_state,
)
self.logger.info(f"Evaluation took {time() - t} s.")
if self.stop_fl_training(
epoch=epoch, round=round, num_rounds_in_epoch=num_rounds_in_epoch
):
break
# pyre-fixme[61]: `timeline` may not be initialized here.
self._report_post_epoch_client_metrics(timeline, metrics_reporter)
if self.stop_fl_training(
epoch=epoch,
round=round, # pyre-fixme[61]: `round` may not be initialized here.
num_rounds_in_epoch=num_rounds_in_epoch,
):
break
if rank == 0 and best_metric is not None:
self._save_model_and_metrics(self.global_model(), best_model_state)
return self.global_model(), best_metric
def stop_fl_training(self, *, epoch, round, num_rounds_in_epoch) -> bool:
"""Stops FL training when the necessary number of steps/epochs have been
completed in case of fractional epochs or if clients time out.
"""
global_round_num = (epoch - 1) * num_rounds_in_epoch + round
return (
(global_round_num / num_rounds_in_epoch)
>= self.cfg.epochs # pyre-fixme[16]: `SyncTrainer` has no attribute `cfg`.
or self._timeout_simulator.stop_fl()
)
def _drop_overselected_users(
self, clents_triggered: List[Client], num_users_keep: int
) -> List[Client]:
"""Sorts users by their training time, and only keeps num_users_keep users."""
all_training_times = [c.get_total_training_time() for c in clents_triggered]
all_training_times.sort()
# only select first num_users_keep userids sort by their finish time
num_users_keep = min([num_users_keep, len(all_training_times)])
last_user_time = all_training_times[num_users_keep - 1]
num_users_added = 0
clients_used = []
for c in clents_triggered:
# if two clients finished at the same time, order for entering
# the cohort is arbitrary
if (c.get_total_training_time() <= last_user_time) and (
num_users_added < num_users_keep
):
num_users_added += 1
clients_used.append(c)
return clients_used
def _client_selection(
self,
num_users: int,
users_per_round: int,
data_provider: IFLDataProvider,
global_model: IFLModel,
epoch: int,
client_to_exclude: int,
) -> List[Client]:
# pyre-fixme[16]: `SyncTrainer` has no attribute `cfg`.
num_users_overselected = math.ceil(users_per_round / self.cfg.dropout_rate)
user_indices_overselected = self.server.select_clients_for_training(
num_total_users=num_users,
users_per_round=num_users_overselected,
data_provider=data_provider,
epoch=epoch,
)
if client_to_exclude in user_indices_overselected:
user_indices_overselected.remove(client_to_exclude)
clients_triggered = [
self.create_or_get_client_for_data(i, self.data_provider)
for i in user_indices_overselected
]
clients_to_train = self._drop_overselected_users(
clients_triggered, users_per_round
)
return clients_to_train
def _mock_client_selection(self, client_idx):
return self.create_or_get_client_for_data(client_idx, self.data_provider)
def _save_model_and_metrics(self, model: IFLModel, best_model_state):
model.fl_get_module().load_state_dict(best_model_state)
def _train_one_round(
self,
timeline: Timeline,
clients: Iterable[Client],
agg_metric_clients: Iterable[Client],
users_per_round: int,
metrics_reporter: Optional[IFLMetricsReporter],
mock_client_idx: int,
) -> None:
"""Trains the global model for one training round.
Args:
timeline: information about the round, epoch, round number, ...
clients: clients for the round
agg_metric_clients: clients for evaluating the post-aggregation
training metrics
users_per_round: the number of participating users
metrics_reporter: the metric reporter to pass to other methods
"""
t = time()
self.server.init_round()
self.logger.info(f"Round initialization took {time() - t} s.")
def update(client):
client_delta, weight = client.generate_local_update(
self.global_model(), metrics_reporter
)
self.server.receive_update_from_client(Message(client_delta, weight))
if isinstance(self.channel, ProductQuantizationChannel):
mock_client = self._mock_client_selection(mock_client_idx)
# print(f"Selected mock client has {mock_client.dataset.num_train_examples()} samples")
mock_delta, _ = mock_client.generate_local_update(self.global_model())
self.server._init_and_boradcast_qparams(mock_delta)
t = time()
for client in clients:
update(client)
self.logger.info(f"Collecting round's clients took {time() - t} s.")
t = time()
self.server.step()
self.logger.info(f"Finalizing round took {time() - t} s.")
t = time()
self._report_train_metrics(
model=self.global_model(),
timeline=timeline,
metrics_reporter=metrics_reporter,
)
self._evaluate_global_model_after_aggregation_on_train_clients(
clients=agg_metric_clients,
model=self.global_model(),
timeline=timeline,
users_per_round=users_per_round,
metrics_reporter=metrics_reporter,
)
self._calc_post_epoch_communication_metrics(
timeline,
metrics_reporter,
)
self.logger.info(f"Aggregate round reporting took {time() - t} s.")
def _choose_clients_for_post_aggregation_metrics(
self,
train_clients: Iterable[Client],
num_total_users: int,
users_per_round: int,
) -> Iterable[Client]:
"""Chooses clients for the post-aggregation training metrics.
Depending on the config parameters, either returns the round's
training clients or new randomly drawn clients.
"""
# pyre-fixme[16]: `SyncTrainer` has no attribute `cfg`.
if self.cfg.use_train_clients_for_aggregation_metrics:
return train_clients
# For the post-aggregation metrics, evaluate on new users
agg_metric_client_idcs = torch.multinomial(
torch.ones(num_total_users, dtype=torch.float),
users_per_round,
replacement=False,
).tolist()
agg_metric_clients = [
self.create_or_get_client_for_data(i, self.data_provider)
for i in agg_metric_client_idcs
]
return agg_metric_clients
def _calc_privacy_metrics(
self,
clients: Iterable[Client],
model: IFLModel,
metrics_reporter: Optional[IFLMetricsReporter],
) -> List[Metric]:
"""Calculates privacy metrics."""
metrics = []
if self.is_user_level_dp:
user_eps = self.server.privacy_budget.epsilon # pyre-fixme
metrics.append(Metric("user level dp (eps)", user_eps))
if self.is_sample_level_dp:
# calculate sample level dp privacy loss statistics.
all_client_eps = torch.Tensor(
[c.privacy_budget.epsilon for c in clients] # pyre-fixme
)
mean_client_eps = all_client_eps.mean()
max_client_eps = all_client_eps.max()
min_client_eps = all_client_eps.min()
p50_client_eps = torch.median(all_client_eps)
sample_dp_metrics: List[Metric] = Metric.from_args(
mean=mean_client_eps,
min=min_client_eps,
max=max_client_eps,
median=p50_client_eps,
)
metrics.append(Metric("sample level dp (eps)", sample_dp_metrics))
return metrics
def _calc_overflow_metrics(
self,
clients: Iterable[Client],
model: IFLModel,
users_per_round: int,
metrics_reporter: Optional[IFLMetricsReporter],
) -> List[Metric]:
"""Calculates overflow metrics."""
metrics = []
if self.is_secure_aggregation_enabled:
for client in clients:
client.eval(model=model, metrics_reporter=metrics_reporter)
(
convert_overflow_perc,
aggregate_overflow_perc,
) = self.server.calc_avg_overflow_percentage( # pyre-fixme
users_per_round, model
)
overflow_metrics: List[Metric] = Metric.from_args(
convert_overflow_percentage=convert_overflow_perc,
aggregate_overflow_percentage=aggregate_overflow_perc,
)
metrics.append(Metric("overflow per round", overflow_metrics))
return metrics
def _calc_post_epoch_client_metrics(
self,
client_models: Dict[Client, IFLModel],
round_timeline: Timeline,
metrics_reporter: IFLMetricsReporter,
) -> List[List[Metric]]:
"""Calculates client-side metrics on the overall evaluation set."""
client_metrics = []
if metrics_reporter is not None:
for client, model in tqdm(client_models.items()):
metrics_reporter.reset()
client.eval(
model=model,
metrics_reporter=metrics_reporter,
)
# pyre-fixme[16]: `IFLMetricsReporter` has no attribute
# `compute_scores`.
score = metrics_reporter.compute_scores()
client_metrics.append(Metric.from_dict(score))
return client_metrics
def _evaluate_global_model_after_aggregation_on_train_clients(
self,
clients: Iterable[Client],
model: IFLModel,
timeline: Timeline,
users_per_round: int,
metrics_reporter: Optional[IFLMetricsReporter] = None,
):
if (
metrics_reporter is not None
# pyre-fixme[16]: `SyncTrainer` has no attribute `cfg`.
and self.cfg.report_train_metrics
and self.cfg.report_train_metrics_after_aggregation
and timeline.tick(1.0 / self.cfg.train_metrics_reported_per_epoch)
):
with torch.no_grad():
model.fl_get_module().eval()
for client in clients:
for batch in client.dataset.train_data():
batch_metrics = model.get_eval_metrics(batch)
if metrics_reporter is not None:
metrics_reporter.add_batch_metrics(batch_metrics)
model.fl_get_module().train()
privacy_metrics = self._calc_privacy_metrics(
clients, model, metrics_reporter
)
overflow_metrics = self._calc_overflow_metrics(
clients, model, users_per_round, metrics_reporter
)
metrics_reporter.report_metrics(
model=model,
reset=True,
stage=TrainingStage.AGGREGATION,
timeline=timeline,
epoch=timeline.global_round_num(), # for legacy
print_to_channels=True,
extra_metrics=privacy_metrics + overflow_metrics,
)
def _validate_users_per_round(
self, users_per_round_on_worker: int, num_users_on_worker: int
):
assert users_per_round_on_worker <= num_users_on_worker, (
"Users per round is greater than the number of users in the data provider for the worker."
"If you are using paged dataloader, increase your num_users_per_page >> users_per_round"
)
def _report_post_epoch_client_metrics(
self,
timeline: Timeline,
metrics_reporter: Optional[IFLMetricsReporter],
):
if (
metrics_reporter is not None
# pyre-fixme[16]: `SyncTrainer` has no attribute `cfg`.
and self.cfg.report_client_metrics
and self.cfg.report_client_metrics_after_epoch
and (timeline.epoch % self.cfg.client_metrics_reported_per_epoch == 0)
):
client_models = {
client: client.last_updated_model for client in self.clients.values()
}
client_scores = self._calc_post_epoch_client_metrics(
client_models, timeline, metrics_reporter
)
# Find stats over the client_metrics (mean, min, max, median, std)
client_stats_trackers = {}
score_names = [metric.name for metric in next(iter(client_scores))]
for score_name in score_names:
client_stats_trackers[score_name] = RandomVariableStatsTracker(
tracks_quantiles=True
)
for client_metric_list in client_scores:
for client_metric in client_metric_list:
client_stats_trackers[client_metric.name].update(
client_metric.value
)
reportable_client_metrics = []
for score_name in score_names:
for stat_name, stat_key in [
("Mean", "mean_val"),
("Median", "median_val"),
("Upper Quartile", "upper_quartile_val"),
("Lower Quartile", "lower_quartile_val"),
("Min", "min_val"),
("Max", "max_val"),
("Standard Deviation", "standard_deviation_val"),
("Num Samples", "num_samples"),
]:
score = client_stats_trackers[score_name].__getattribute__(stat_key)
reportable_client_metrics.append(Metric(stat_name, score))
metrics_reporter.report_metrics(
model=None,
reset=True,
stage=TrainingStage.PER_CLIENT_EVAL,
timeline=timeline,
epoch=timeline.global_round_num(), # for legacy
print_to_channels=True,
extra_metrics=reportable_client_metrics,
)
@staticmethod
def rounds_in_one_epoch(num_total_users: int, users_per_round: int) -> int:
return math.ceil(num_total_users / users_per_round)
def force_print(is_distributed: bool, *args, **kwargs) -> None:
if is_distributed:
try:
device_info = f" [device:{torch.cuda.current_device()}]"
# pyre-fixme[28]: Unexpected keyword argument `force`.
print(*args, device_info, **kwargs, force=True)
except TypeError:
pass
else:
print(*args, **kwargs)
@dataclass
class SyncTrainerConfig(FLTrainerConfig):
_target_: str = fullclassname(SyncTrainer)
server: SyncServerConfig = SyncServerConfig()
users_per_round: int = 10
# overselect users_per_round / dropout_rate users, only use first
# users_per_round updates
dropout_rate: float = 1.0
report_train_metrics_after_aggregation: bool = False
report_client_metrics_after_epoch: bool = False
# Whether client metrics on eval data should be computed and reported.
report_client_metrics: bool = False
# how many times per epoch should we report client metrics
# numbers greater than 1 help with plotting more precise training curves
client_metrics_reported_per_epoch: int = 1
| canife-main | FLSim/flsim/trainers/sync_trainer.py |
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
from __future__ import annotations
import copy
import math
import random
import sys
from dataclasses import dataclass
from time import time
from typing import Any, Dict, Iterable, List, Optional, Tuple
import numpy as np
import torch
import torch.nn.functional as F
from flsim.channels.message import Message
from flsim.clients.base_client import Client
from flsim.clients.dp_client import DPClient
from flsim.common.timeline import Timeline
from flsim.data.data_provider import IFLDataProvider
from flsim.interfaces.metrics_reporter import IFLMetricsReporter, Metric, TrainingStage
from flsim.interfaces.model import IFLModel
from flsim.servers.sync_servers import ISyncServer, SyncServerConfig
from flsim.trainers.sync_trainer import SyncTrainer
from flsim.trainers.trainer_base import FLTrainerConfig
from flsim.utils.config_utils import fullclassname, init_self_cfg
from flsim.utils.cuda import GPUMemoryMinimizer
from flsim.utils.distributed.fl_distributed import FLDistributedUtils
from flsim.utils.fl.stats import RandomVariableStatsTracker
from hydra.utils import instantiate
from omegaconf import OmegaConf
from opacus import GradSampleModule
from opacus.accountants.utils import get_noise_multiplier
from opacus.validators import ModuleValidator
from torch.utils.data import DataLoader, TensorDataset
from torchvision.transforms.functional import normalize
from tqdm import tqdm
sys.path.append("../../canife")
from canife import CanaryAnalyser, CanaryDesigner, CanaryDesignerNLP
from canife.utils import display_gpu_mem
class CanarySyncTrainer(SyncTrainer):
"""Implements synchronous Federated Learning Training.
Defaults to Federated Averaging (FedAvg): https://arxiv.org/abs/1602.05629
"""
def __init__(
self,
*,
model: IFLModel,
cuda_enabled: bool = False,
**kwargs,
):
init_self_cfg(
self,
component_class=__class__, # pyre-fixme[10]: Name `__class__` is used but not defined.
config_class=CanarySyncTrainerConfig,
**kwargs,
)
super().__init__(model=model, cuda_enabled=cuda_enabled, **kwargs)
if self.cfg.args.gpu_mem_minimiser:
self._cuda_state_manager = GPUMemoryMinimizer(cuda_enabled)
self._cuda_state_manager.on_trainer_init(model)
self.server: ISyncServer = instantiate(
# pyre-ignore[16]
self.cfg.server,
global_model=model,
channel=self.channel,
)
self.clients = {}
self.mock_clients = {} # For canary design
self.accuracy_metrics = {"train": [], "agg": [], "eval": [], "test": []}
self.insert_acc_achieved = False
self.initial_privacy_budget = None # For checkpointing priv budget
self.one_step_budget = None # For comparing empirical epsilon with single step epsilon
def global_model(self) -> IFLModel:
"""self.global_model() is owned by the server, not by SyncTrainer"""
return self.server.global_model
def create_or_get_client_for_data(self, dataset_id: int, datasets: Any):
"""Creates clients for a round. This function is called UPR * num_rounds
times per training run. Here, we use <code>OmegaConf.structured</code>
instead of <code>hydra.instantiate</code> to minimize the overhead of
hydra object creation.
"""
if self.is_sample_level_dp:
client = DPClient(
# pyre-ignore[16]
**OmegaConf.structured(self.cfg.client),
dataset=datasets.get_train_user(dataset_id),
name=f"client_{dataset_id}",
timeout_simulator=self._timeout_simulator,
store_last_updated_model=self.cfg.report_client_metrics,
channel=self.channel,
cuda_manager=self._cuda_state_manager,
)
else:
client = Client(
**OmegaConf.structured(self.cfg.client),
dataset=datasets.get_train_user(dataset_id),
name=f"client_{dataset_id}",
timeout_simulator=self._timeout_simulator,
store_last_updated_model=self.cfg.report_client_metrics,
channel=self.channel,
cuda_manager=self._cuda_state_manager,
)
# self.clients[dataset_id] = client
# return self.clients[dataset_id]
return self.clients.get(dataset_id, client)
def _create_mock_client(self, dataset_id: int, datasets: Any):
"""
"""
if self.is_sample_level_dp:
client = DPClient(
# pyre-ignore[16]
**OmegaConf.structured(self.cfg.client),
dataset=datasets.get_test_user(dataset_id),
name=f"mock_client_{dataset_id}",
timeout_simulator=self._timeout_simulator,
store_last_updated_model=self.cfg.report_client_metrics,
channel=self.channel,
cuda_manager=self._cuda_state_manager,
)
else:
client = Client(
**OmegaConf.structured(self.cfg.client),
dataset=datasets.get_test_user(dataset_id),
name=f"mock_client_{dataset_id}",
timeout_simulator=self._timeout_simulator,
store_last_updated_model=self.cfg.report_client_metrics,
channel=self.channel,
cuda_manager=self._cuda_state_manager,
)
return self.mock_clients.get(dataset_id, client)
def _get_checkpoint_path(self, epoch, round, final=False):
canary_type = self.cfg.args.canary_design_type
filename = self.cfg.args.checkpoint_path
if final:
filename += f"/FLSim_dp={self.cfg.args.dp_level}_model={self.cfg.args.model_arch}_dataset={self.cfg.args.dataset}_num_clients={self.cfg.args.users_per_round}_test_size={self.cfg.args.local_batch_size}"
filename += f"_insert_test_acc={self.cfg.args.canary_insert_test_acc}_insert_train_acc={self.cfg.args.canary_insert_train_acc}_client_epochs={self.cfg.args.client_epochs}"
else:
filename += "/" + "FLSim_" + self.cfg.args.dp_level + "_epoch=" +str(epoch) + "_round=" + str(round) + "_" + self.cfg.args.model_arch + "_" + canary_type
if self.cfg.args.epsilon != -1 or self.cfg.args.sigma != 0:
if self.cfg.args.epsilon != -1:
filename += f"_private_eps={self.cfg.args.epsilon}_delta={self.cfg.args.delta}"
else:
filename += f"_private_sigma={self.cfg.args.sigma}_delta={self.cfg.args.delta}"
filename += ".tar"
return filename
def _checkpoint_model(self, epoch, round, final=False, mock_client_indices=None):
privacy_budget = self.server._privacy_engine.get_privacy_spent()
train_acc = self.accuracy_metrics["train"][-1] if len(self.accuracy_metrics["train"]) > 0 else 0
test_acc = self.accuracy_metrics["test"][-1] if len(self.accuracy_metrics["test"]) > 0 else 0
checkpoint = {
"epoch": epoch,
"round": round,
"noise_multiplier": self.server._privacy_engine.noise_multiplier,
"epsilon": privacy_budget.epsilon,
"delta": privacy_budget.delta,
"steps": self.server._privacy_engine.steps,
"state_dict": self.global_model().fl_get_module().state_dict(),
"mock_client_indices": mock_client_indices,
"train_acc": train_acc,
"test_acc": test_acc,
}
checkpoint_path = self._get_checkpoint_path(epoch, round, final=True)
torch.save(checkpoint, checkpoint_path)
self.logger.info(f"Checkpoint saved to {checkpoint_path}")
# Client selection
def _client_selection(
self,
num_users: int,
users_per_round: int,
data_provider: IFLDataProvider,
global_model: IFLModel,
epoch: int,
clients_to_exclude: list,
) -> List[Client]:
# pyre-fixme[16]: `SyncTrainer` has no attribute `cfg`.
num_users_overselected = math.ceil(users_per_round / self.cfg.dropout_rate)
user_indices_overselected = []
user_indices_overselected = self.server.select_clients_for_training(
num_total_users=num_users-len(clients_to_exclude),
users_per_round=num_users_overselected,
data_provider=data_provider,
epoch=epoch,
)
for i, selected_client in enumerate(user_indices_overselected):
for j, excluded_client in enumerate(clients_to_exclude):
if selected_client == excluded_client:
user_indices_overselected[i] = num_users-len(clients_to_exclude) + j
clients_triggered = [
self.create_or_get_client_for_data(i, self.data_provider)
for i in user_indices_overselected
]
clients_to_train = self._drop_overselected_users(
clients_triggered, users_per_round
)
return clients_to_train
def _select_mock_client_indices(self, num_users, users_per_round):
num_users = self.data_provider.num_test_users()
batches_per_client, samples_per_batch = -1,-1
if self.cfg.args.canary_design_type == "model_updates" and self.cfg.args.canary_design_pool_size > 0:
num_mock_clients = self.cfg.args.canary_design_pool_size
mock_client_indices = np.random.choice(range(0, num_users), num_mock_clients, replace=False)
else:
shuffled_clients = np.random.permutation(range(0, num_users))
total_samples = 0
for i, client_id in enumerate(shuffled_clients):
client = self._create_mock_client(client_id, self.data_provider)
total_samples += client.dataset.num_eval_examples()
if total_samples >= self.cfg.args.canary_design_sample_size:
break
mock_client_indices = shuffled_clients[:i+1]
self.logger.info(f"Design Pool Size: {self.cfg.args.canary_design_pool_size}, Number of mock clients needed: {len(mock_client_indices)}")
self.logger.info(f"Local updates per client {batches_per_client}, Samples per update {samples_per_batch}")
return mock_client_indices, batches_per_client, samples_per_batch
def _select_mock_clients(self, client_indices):
return [self._create_mock_client(client_idx, self.data_provider) for client_idx in client_indices]
# Designing and Pushing the canary
def _create_canary_design_loader(self, mock_clients, exact_testing=False):
# If the design requires model updates, batch per design client
if self.cfg.args.canary_design_type != "sample_grads":
design_loader = []
for i, mock_client in enumerate(mock_clients):
if exact_testing:
local_batch = [(batch["features"], batch["labels"].long()) for batch in mock_client.dataset._user_batches] # Note: If exact, mock clients are real clients from train set
else:
local_batch = [(batch["features"], batch["labels"].long()) for batch in mock_client.dataset._eval_batches] # Note: Mock clients are from the test set so only contain _eval_batches (not _user_batches)
design_loader.append(local_batch)
self.logger.info(f"Design Loader {len(design_loader)}")
else:
if not exact_testing:
design_x = torch.vstack([torch.vstack([mock_client.dataset._eval_batches[i]["features"] for i in range(len(mock_client.dataset._eval_batches))]) for mock_client in mock_clients])
design_y = torch.hstack([torch.hstack([mock_client.dataset._eval_batches[i]["labels"] for i in range(len(mock_client.dataset._eval_batches))]) for mock_client in mock_clients]).long()
design_x = design_x[:self.cfg.args.canary_design_sample_size]
design_y = design_y[:self.cfg.args.canary_design_sample_size]
batch_size = self.cfg.args.canary_design_minibatch_size
else:
design_x = torch.vstack([torch.vstack([mock_client.dataset._user_batches[i]["features"] for i in range(len(mock_client.dataset._user_batches))]) for mock_client in mock_clients])
design_y = torch.hstack([torch.hstack([mock_client.dataset._user_batches[i]["labels"] for i in range(len(mock_client.dataset._user_batches))]) for mock_client in mock_clients]).long()
batch_size = design_x.shape[0]
design_loader = DataLoader(TensorDataset(design_x, design_y), batch_size=batch_size , shuffle=True)
self.logger.info(f"Canary Design Pool x {design_x.shape}, Canary Design Pool y {design_y.shape}")
return design_loader
def _push_canary_to_client(self, clients, canary, target_batch_idx=-1, insert_as_batch=False, type="canary"):
canary_data = canary.data if type=="canary" else canary.init_data
canary_class = canary.class_label
# select target client at random
target_client = random.choice(clients)
if self.cfg.args.canary_setup == "exact":
target_client = clients[0] # For debugging
self.logger.info(f"Inserting canary to client {target_client.name}")
if insert_as_batch: # replace the batch with a single canary sample
old_data = copy.deepcopy(target_client.dataset._user_batches)
target_client.dataset._user_batches= [{"features": canary_data, "labels": torch.tensor(canary_class).unsqueeze(0)}]
else: # insert canary as the first sample in the target_batch_idx batch
# test canary shape
batch_sizes = target_client.dataset._user_batches[target_batch_idx]["features"].shape
assert canary_data.shape[1:] == batch_sizes[1:], "Wrong canary size"
# test target batch idx
num_batches = len(target_client.dataset._user_batches)
assert target_batch_idx < num_batches, "Wrong target_batch_idx"
old_x, old_y = target_client.dataset._user_batches[target_batch_idx]["features"][0].clone(), target_client.dataset._user_batches[target_batch_idx]["labels"][0].clone()
old_data = (old_x, old_y)
target_client.dataset._user_batches[target_batch_idx]["features"][0] = canary_data
target_client.dataset._user_batches[target_batch_idx]["labels"][0] = canary_class
target_client.flag_as_canary_inserted() # Forces canary client to take a single local epoch
return target_client, old_data
def _design_canary_from_mock_clients(self, mock_clients, canary_designer, model=None, exact_testing=False):
"""
Generates a canary from a clients local dataset using a CanaryDesigner
Args:
- mock_clients: List of clients to design the canary from (will be excluded from training)
- canary_designer: CanaryDesigner
"""
# Create design batches from chosen mock client(s)
self.logger.info(f"Designing canary from mock clients {[client.name for client in mock_clients]}")
canary_design_loader = self._create_canary_design_loader(mock_clients, exact_testing=exact_testing)
# Craft canary
model = self.global_model().fl_get_module() if model is None else model
canary = canary_designer.design(model, F.cross_entropy, canary_design_loader, clients_per_round=self.cfg.users_per_round, canary_design_minibatch_size=self.cfg.args.canary_design_minibatch_size, varying_local_batches=(self.cfg.args.num_local_updates==-1), device=self.global_model().device)
return canary
def count_canaries(self, clients, canary):
canary_count = 0
for client in clients:
for batch in client.dataset._user_batches:
for sample in batch["features"]:
if torch.equal(sample, canary.data[0]):
canary_count += 1
self.logger.debug(f"Client {client.name} has a canary")
self.logger.debug(f"Client has {len(batch['features'])} samples")
self.logger.debug(f"Canary count {canary_count}")
def _design_canary(self, canary_designer, mock_clients, exact_testing=False):
# Note: Deepcopying the fl_get_module() between epochs breaks Opacus' GradSampleModule()... safer to load state_dict
model_args = {"num_classes": self.cfg.args.num_classes, "dropout_rate": self.cfg.args.fl_dropout} | vars(self.global_model().fl_get_module())
if self.cfg.args.dataset == "femnist":
model_args["in_channels"] = 1
design_model = self.global_model().fl_get_module().__class__(**model_args)
design_model.load_state_dict(self.global_model().fl_get_module().state_dict())
design_model = ModuleValidator.fix_and_validate(design_model)
canary_designer.set_grad_sample_module(GradSampleModule(copy.deepcopy(design_model)))
# Generate canary
canary = self._design_canary_from_mock_clients(mock_clients, canary_designer, model=design_model, exact_testing=exact_testing)
return canary
def _get_global_round_num(self, epoch, round, num_rounds_in_epoch):
return (epoch - 1) * num_rounds_in_epoch + round
def _setup_canary_designer(self, checkpoint=None):
self.cfg.args.server_clip_const = float("inf")
if self.is_user_level_dp or not self.cfg.args.canary_design_reverse_server_clip or "privacy_setting" in self.server.cfg:
self.cfg.args.server_clip_const = self.server.cfg.privacy_setting.clipping_value
self.image_size = 32
self.in_channels = 3
if self.cfg.args.dataset in ["sent140", "shakespeare"]:
canary_designer = CanaryDesignerNLP(None, logger=self.logger, local_updates=self.cfg.args.num_local_updates, local_epochs=self.cfg.args.client_epochs, optimizer_config=self.cfg.client.optimizer, client_lr=self.cfg.client.optimizer.lr,
**self.cfg.args)
else:
if self.cfg.args.dataset == "CIFAR10":
canary_preprocess = lambda x: normalize(x, (0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)) # noqa
elif self.cfg.args.dataset == "femnist":
canary_preprocess = lambda x: normalize(x, (0.1307,), (0.3081, )) # noqa
self.image_size = 28
self.in_channels=1
else:
canary_preprocess = lambda x: normalize(x, (0.5, 0.5, 0.5), (0.5, 0.5, 0.5)) # noqa
if self.cfg.args.prettify_samples:
canary_preprocess = lambda x: x # noqa
self.image_size = 128 if self.cfg.args.dataset == "celeba" else self.image_size
canary_designer = CanaryDesigner(None, logger=self.logger, local_updates=self.cfg.args.num_local_updates, local_epochs=self.cfg.args.client_epochs, optimizer_config=self.cfg.client.optimizer, client_lr=self.cfg.client.optimizer.lr,
in_channels=self.in_channels, image_size=self.image_size, canary_preprocess=canary_preprocess, **self.cfg.args)
# Create analyser
args = OmegaConf.to_container(copy.deepcopy(self.cfg.args))
args.pop("plot_path")
analyser_args = args | canary_designer.get_analyser_args() #py3.9
analyser_args["grad_sample_module"] = None
if checkpoint:
analyser_args["checkpoint_train_acc"] = checkpoint["train_acc"]
analyser_args["checkpoint_test_acc"] = checkpoint["test_acc"]
canary_analyser = CanaryAnalyser(plot_path=self.cfg.plot_path, result_path=self.cfg.result_path, **analyser_args)
return canary_designer, canary_analyser
def _cleanup_post_canary_test(self, clients, target_client, replaced_data):
if self.cfg.args.insert_canary_as_batch:
target_client.dataset._user_batches = replaced_data
else:
target_client.dataset._user_batches[self.cfg.args.canary_insert_batch_index]["features"][0] = replaced_data[0]
target_client.dataset._user_batches[self.cfg.args.canary_insert_batch_index]["labels"][0] = replaced_data[1]
target_client.has_canary_inserted = False
for client in clients:
client.disable_canary_testing()
def _initialise_privacy_budget(self, checkpoint, num_rounds_in_epoch, num_int_epochs):
if self.cfg.args.epsilon > 0 and not self.cfg.args.fl_load_checkpoint: # For calibrating sigma to training a model for the full epochs
steps = num_int_epochs * num_rounds_in_epoch if not checkpoint else checkpoint["steps"]
noise_multiplier = get_noise_multiplier(steps=steps, target_epsilon=self.cfg.args.epsilon, target_delta=self.server._privacy_engine.target_delta, sample_rate=self.server._privacy_engine.user_sampling_rate)
self.logger.info(f"Noise multiplier updated={noise_multiplier}")
self.server._privacy_engine.noise_multiplier = noise_multiplier
self.cfg.args.sigma = noise_multiplier
else:
self.server._privacy_engine.noise_multiplier = self.cfg.args.sigma # Otherwise use noise multiplier
# Restore privacy parameters from checkpoint
if checkpoint:
self.server._privacy_engine.target_delta = checkpoint["delta"]
self.server._privacy_engine.noise_multiplier = checkpoint["noise_multiplier"]
self.server._privacy_engine.steps = checkpoint["steps"] if checkpoint["steps"] != -1 else (checkpoint["epoch"]-1) * num_rounds_in_epoch + (checkpoint["round"]-1)
priv_budget = self.server._privacy_engine.get_privacy_spent() # Checkpointed priv budget
self.initial_privacy_budget = {"epsilon": priv_budget.epsilon, "delta": priv_budget.delta, "sigma": self.server._privacy_engine.noise_multiplier}
self.logger.info(f"Initial priv budget {self.initial_privacy_budget}, sigma arg={self.cfg.args.sigma}")
if self.cfg.args.override_noise_multiplier: # If we are overriding the noise multiplier
self.server._privacy_engine.noise_multiplier = self.cfg.args.sigma
priv_budget = self.server._privacy_engine.get_privacy_spent() # Update priv budget
self.server.privacy_budget = priv_budget
sampling_rate = self.server._privacy_engine.user_sampling_rate
self.server._privacy_engine.user_sampling_rate = 1
self.server._privacy_engine.steps = 1
one_step_budget = self.server._privacy_engine.get_privacy_spent()
self.one_step_budget = {"epsilon": one_step_budget.epsilon, "delta": one_step_budget.delta, "sigma": self.server._privacy_engine.noise_multiplier} # Update priv budget
self.server._privacy_engine.user_sampling_rate = sampling_rate
self.server._privacy_engine.steps = 0
self.canary_analyser.sample_rate = self.server._privacy_engine.user_sampling_rate # Set sample rate in analyser for checkpointing
def train(
self,
data_provider: IFLDataProvider,
metrics_reporter: IFLMetricsReporter,
num_total_users: int,
distributed_world_size: int,
rank: int = 0,
checkpoint=None,
) -> Tuple[IFLModel, Any]:
"""Trains and evaluates the model, modifying the model state. Iterates over the
number of epochs specified in the config, and for each epoch iterates over the
number of rounds per epoch, i.e. the number of total users divided by the number
of users per round. For each round:
1. Trains the model in a federated way: different local models are trained
with local data from different clients, and are averaged into a new
global model at the end of each round.
2. Evaluates the new global model using evaluation data, if desired.
3. Calculates metrics based on evaluation results and selects the best model.
Args:
data_provider: provides training, evaluation, and test data iterables and
gets a user's data based on user ID
metrics_reporter: computes and reports metrics of interest such as accuracy
or perplexity
num_total_users: number of total users for training
Returns:
model, best_metric: the trained model together with the best metric
Note:
Depending on the chosen active user selector, we may not iterate over
all users in a given epoch.
"""
self.logger.info("Starting training...")
self.logger.info(f"Client LR {self.cfg.client.optimizer.lr}, Server LR {self.cfg.server.server_optimizer.lr}")
# set up synchronization utilities for distributed training
FLDistributedUtils.setup_distributed_training(
distributed_world_size, use_cuda=self.cuda_enabled
) # TODO do not call distributed utils here, this is upstream responsibility
self.logger.info(f" dist world size = {distributed_world_size}")
if rank != 0:
FLDistributedUtils.suppress_output()
# pyre-fixme[16]: `SyncTrainer` has no attribute `cfg`.
assert self.cfg.users_per_round % distributed_world_size == 0
best_metric = None
best_model_state = self.global_model().fl_get_module().state_dict()
users_per_round = min(self.cfg.users_per_round, num_total_users)
self.data_provider = data_provider
num_rounds_in_epoch = self.rounds_in_one_epoch(num_total_users, users_per_round)
num_users_on_worker = data_provider.num_train_users()
self.logger.info(
f"num_users_on_worker: {num_users_on_worker}, "
f"users_per_round: {users_per_round}, "
f"num_total_users: {num_total_users}"
)
# torch.multinomial requires int instead of float, cast it as int
users_per_round_on_worker = int(users_per_round / distributed_world_size)
self.logger.debug("Validating users per round...")
self._validate_users_per_round(users_per_round_on_worker, num_users_on_worker)
self.logger.debug("Users validated...")
mock_client_indices, batches_per_client, samples_per_batch = self._select_mock_client_indices(num_users_on_worker, users_per_round)
mock_clients = self._select_mock_clients(mock_client_indices)
self.logger.debug(f"Mock clients selected {[client.name for client in mock_clients]}")
canary_designer, canary_analyser = self._setup_canary_designer(checkpoint=checkpoint)
self.canary_analyser = canary_analyser
# main training loop
num_int_epochs = math.ceil(self.cfg.epochs)
# starting_epoch = checkpoint["epoch"] if checkpoint else 1
# self.logger.info(f"Starting from epoch={starting_epoch}")
starting_epoch = 1
for epoch in tqdm(
range(starting_epoch, num_int_epochs + 1), desc="Epoch", unit="epoch", position=0
):
for r in tqdm(
range(1, num_rounds_in_epoch + 1),
desc="Round",
unit="round",
position=0,
):
display_gpu_mem(self.global_model().device, prefix=f"FLSim e={epoch}, r={r}", logger=self.logger)
timeline = Timeline(
epoch=epoch, round=r, rounds_per_epoch=num_rounds_in_epoch
)
t = time()
freeze_model = True
while freeze_model and not self.stop_fl_training(epoch=epoch, round=r, num_rounds_in_epoch=num_rounds_in_epoch):
freeze_model = self._is_freeze_round(epoch, r, num_rounds_in_epoch)
clients = self._client_selection(
num_users_on_worker,
users_per_round_on_worker,
data_provider,
self.global_model(),
epoch,
mock_client_indices
)
if epoch == starting_epoch and r==1 and self.canary_analyser.num_tests == 0: # Initialise once
self._initialise_privacy_budget(checkpoint, num_rounds_in_epoch, num_int_epochs)
display_gpu_mem(device=self.global_model().device, prefix=f"After client selection e={epoch}, r={r}", logger=self.logger)
self.logger.info(f"\n ====== Epoch={epoch}, Round r={r}, model frozen={freeze_model}, frozen round={self.canary_analyser.num_tests+1} ====== ")
self.logger.info(f"Client Selection took: {time() - t} s.")
self.logger.debug(f"Selected clients: {[client.name for client in clients]}")
self.logger.debug(f"Model Hash: {sum([p.flatten().sum() for p in self.global_model().fl_get_module().parameters()]).item()}")
self.logger.info(f"Total number of canary tests - {self.canary_analyser.num_tests}")
self._output_design_rounds(epoch, r, num_rounds_in_epoch)
# Check whether to end training and checkpoint model
if self.cfg.checkpoint_only and self._is_canary_design_round(epoch=epoch, round=r, num_rounds_in_epoch=num_rounds_in_epoch):
self._checkpoint_model(epoch, r, final=True, mock_client_indices=mock_client_indices)
self.logger.info("Model has reached final checkpoint, checkpoint saved, exiting FLSim...")
return self.global_model(), best_metric
# Canary design round
if self._is_canary_design_round(epoch, r, num_rounds_in_epoch): # Generate canary and insert into client
self.logger.info(f"\n ===== Designing canary at epoch={epoch}, round={r} =====")
exact_testing = False
if self.cfg.args.canary_setup == "exact": # For debugging purposes
mock_clients = clients[1:]
exact_testing = True
canary = self._design_canary(canary_designer, mock_clients, exact_testing=exact_testing)
self.canary_analyser.set_canary(canary)
# TODO: Rework this
self.canary_analyser.canary_losses = canary_designer.canary_losses
self.canary_analyser.canary_norms = canary_designer.canary_norms
self.canary_analyser.canary_design_bias = canary_designer.canary_design_bias
self.canary_analyser.benchmark_times = canary_designer.benchmark_times
self.canary_analyser.actual_minibatch_size = canary_designer.canary_design_minibatch_size
self.canary_analyser.actual_pool_size = canary_designer.canary_design_pool_size
self.canary_analyser.actual_sample_size = canary_designer.canary_design_sample_size
if self._is_canary_insert_round(epoch, r, num_rounds_in_epoch):
type = "init" if self._is_init_canary_insert_round(epoch, r, num_rounds_in_epoch) else "canary"
self.logger.debug(f"Inserting canary client, epoch={epoch}, round r={r}, type={type}")
self.logger.info(f"Clients {[client.name for client in clients]}")
target_client, replaced_data = self._push_canary_to_client(clients, canary, target_batch_idx=self.cfg.args.canary_insert_batch_index, insert_as_batch=self.cfg.args.insert_canary_as_batch, type=type)
self.count_canaries(clients, canary)
agg_metric_clients = self._choose_clients_for_post_aggregation_metrics(
train_clients=clients,
num_total_users=num_users_on_worker,
users_per_round=users_per_round_on_worker,
)
# training on selected clients for the round
self.logger.info(f"# clients/round on worker {rank}: {len(clients)}.")
aggregated_model = self._train_one_round(
timeline=timeline,
clients=clients,
agg_metric_clients=agg_metric_clients,
users_per_round=users_per_round,
metrics_reporter=metrics_reporter
if self.cfg.report_train_metrics
else None,
freeze_model=freeze_model,
)
# Testing for canary
if self._is_canary_test_round(epoch, r, num_rounds_in_epoch):
type = "with" if (self.canary_analyser.num_tests % 2 == 0) else "without"
type = "with_init" if self._is_init_canary_insert_round(epoch, r, num_rounds_in_epoch) else type
self.logger.info(f"==== Testing aggregated server update for canary presence... type={type} ====")
num_clients = len(clients)
self.canary_analyser.test_against_agg_grad(canary, aggregated_model, self.cfg.client.optimizer.lr, num_clients, clip_factor=1, type=type)
self.logger.debug(f"Current dot prods {self.canary_analyser.canary_dot_prods, len(self.canary_analyser.canary_dot_prods['with_canary']), len(self.canary_analyser.canary_dot_prods['without_canary'])}")
if hasattr(self.server, "_clip_factors"):
self.canary_analyser.add_clip_rate((len(self.server._clip_factors)-(np.array(self.server._clip_factors) == 1).sum()) / len(clients))
self.logger.info(f"Clip rate of client updates: {self.canary_analyser.clip_rates[-1]}")
if hasattr(self.server, "_clip_factors"):
self.server._clip_factors = [] # Reset
# report training success rate and training time variance
if rank == 0:
if (
self._timeout_simulator.sample_mean_per_user != 0
or self._timeout_simulator.sample_var_per_user != 0
):
self.logger.info(
f"mean training time/user: "
f"{self._timeout_simulator.sample_mean_per_user}",
f"variance of training time/user: "
f"{self._timeout_simulator.sample_var_per_user}",
)
t = time()
(eval_metric, best_metric, best_model_state,) = self._maybe_run_evaluation(
timeline=timeline,
data_provider=data_provider,
metrics_reporter=metrics_reporter,
best_metric=best_metric,
best_model_state=best_model_state,
)
if eval_metric:
self.accuracy_metrics["eval"].append(eval_metric["Accuracy"])
self.logger.info(f"Evaluation took {time() - t} s. \n")
if self._analyse_attack(epoch, r, num_rounds_in_epoch):
if not self.cfg.args.skip_acc:
test_metrics = self.test(data_provider=data_provider, metrics_reporter=metrics_reporter)
self.accuracy_metrics["test"].append(test_metrics["Accuracy"])
self.logger.info("Analysing canary tests...")
self.canary_analyser.set_accuracy_metrics(self.accuracy_metrics)
disable_metrics = (self.cfg.args.canary_test_type == "continuous" or self.cfg.args.canary_test_type == "train_and_freeze")
priv_budget = {"epsilon": self.server.privacy_budget.epsilon, "delta": self.server.privacy_budget.delta, "sigma": self.server._privacy_engine.noise_multiplier}
self.logger.info(f"Final priv budget {priv_budget}")
self.canary_analyser.analyse(initial_privacy_budget=self.initial_privacy_budget, final_privacy_budget=priv_budget, one_step_budget=self.one_step_budget, global_round=self._get_global_round_num(epoch, r, num_rounds_in_epoch), disable_bias_metrics=disable_metrics, disable_init_metrics=(disable_metrics or self.cfg.args.canary_setup == "exact"), plot_losses=(not disable_metrics), plot_hists=(not disable_metrics))
freeze_model = False
self._extend_canary_attack(epoch, r, num_rounds_in_epoch) # Whether or not to extend the canary attack (i.e doing train_and_freeze or continuous attack)
if self.cfg.args.canary_test_type == "train_and_freeze" and self._analyse_attack(epoch, r, num_rounds_in_epoch):
self.canary_analyser.reset() # Reset analyser between attack periods
if self._is_canary_insert_round(epoch, r, num_rounds_in_epoch):
self._cleanup_post_canary_test(clients, target_client, replaced_data) # Removes inserted canary from target client + disables canary testing flag
if self.stop_fl_training(epoch=epoch, round=r, num_rounds_in_epoch=num_rounds_in_epoch):
break
self._report_post_epoch_client_metrics(timeline, metrics_reporter)
if self.stop_fl_training(epoch=epoch, round=r, num_rounds_in_epoch=num_rounds_in_epoch):
break
if rank == 0 and best_metric is not None:
self._save_model_and_metrics(self.global_model(), best_model_state)
return self.global_model(), best_metric
def _output_design_rounds(self, epoch, round, num_rounds_in_epoch):
current_round = self._get_global_round_num(epoch, round, num_rounds_in_epoch)
self.logger.info(f"Current global round {current_round}")
self.logger.info(f"Design round {self._get_design_round(current_round, num_rounds_in_epoch)}")
self.logger.debug(f"Is canary design round? {self._is_canary_design_round(epoch, round, num_rounds_in_epoch)}")
self.logger.debug(f"Is canary insert round? {self._is_canary_insert_round(epoch, round, num_rounds_in_epoch)}")
self.logger.debug(f"Is canary insert init round? {self._is_init_canary_insert_round(epoch, round, num_rounds_in_epoch)}")
self.logger.debug(f"Is canary test round? {self._is_canary_test_round(epoch, round, num_rounds_in_epoch)}")
self.logger.debug(f"Stop testing ? {self._stop_canary_testing(epoch, round, num_rounds_in_epoch)}")
# Canary conditions
def _is_freeze_round(self, epoch, current_round, num_rounds_in_epoch):
return self._is_canary_test_round(epoch, current_round, num_rounds_in_epoch) and (not self.cfg.args.canary_test_type == "continuous") # Never freeze model when doing continuous testing
def _extend_canary_attack(self, epoch, current_round, num_rounds_in_epoch):
current_global_round = self._get_global_round_num(epoch, current_round, num_rounds_in_epoch)
if self.cfg.args.canary_insert_global_round != -1 and self.cfg.args.canary_test_type == "train_and_freeze" and self.canary_analyser.num_tests >= self.cfg.args.canary_num_test_batches:
self.cfg.args.canary_insert_global_round = current_global_round + self.cfg.args.canary_insert_offset
if self.cfg.args.canary_test_type == "continuous" and current_global_round+1 > self.cfg.args.canary_insert_global_round:
self.cfg.args.canary_insert_global_round = current_global_round + self.cfg.args.canary_insert_offset
def _get_design_round(self, current_global_round, num_rounds_in_epoch):
if self.cfg.args.canary_insert_epsilon != -1 and current_global_round > 1: # Second condition is needed as server.privacy_budget.epsilon is inf to begin with
raise NotImplementedError("Canary insert epsilon not supported yet")
# if self.server.privacy_budget.epsilon >= self.cfg.args.canary_insert_epsilon:
# self.cfg.args.canary_insert_global_round = current_global_round
# else:
# self.cfg.args.canary_insert_global_round = float("inf")
if (self.cfg.args.canary_insert_train_acc != -1 or self.cfg.args.canary_insert_test_acc != -1) and not self.insert_acc_achieved:
acc_threshold = self.cfg.args.canary_insert_test_acc if self.cfg.args.canary_insert_test_acc != -1 else self.cfg.args.canary_insert_train_acc
acc_list = self.accuracy_metrics["eval"] if self.cfg.args.canary_insert_test_acc != -1 else self.accuracy_metrics["train"]
if len(acc_list) > 0 and acc_list[-1] >= acc_threshold:
self.cfg.args.canary_insert_global_round = current_global_round+1
self.cfg.args.canary_insert_epoch = math.ceil((current_global_round+1)/num_rounds_in_epoch)
self.insert_acc_achieved = True
else:
self.cfg.args.canary_insert_global_round = float("inf")
if self.cfg.args.canary_insert_global_round != -1:
return self.cfg.args.canary_insert_global_round
else:
return self._get_global_round_num(self.cfg.args.canary_insert_epoch, 1, num_rounds_in_epoch)
def _is_canary_design_round(self, epoch, round, num_rounds_in_epoch):
current_round = self._get_global_round_num(epoch, round, num_rounds_in_epoch)
design_round = self._get_design_round(current_round, num_rounds_in_epoch)
if self.cfg.args.canary_setup == "exact":
return current_round >= design_round
else:
return current_round == design_round and (self.canary_analyser.num_tests == 0 or self.cfg.args.canary_test_type == 'continuous')
def _is_canary_insert_round(self, epoch, round, num_rounds_in_epoch):
current_round = self._get_global_round_num(epoch, round, num_rounds_in_epoch)
design_round = self._get_design_round(current_round, num_rounds_in_epoch)
return current_round >= design_round and (self.canary_analyser.num_tests % 2) == 0
def _is_init_canary_insert_round(self, epoch, round, num_rounds_in_epoch):
if self.cfg.args.canary_test_type == "continuous" or self.cfg.args.canary_test_type == "train_and_freeze" or self.cfg.args.canary_setup == "exact":
return False
else:
return self.canary_analyser.num_tests >= self.cfg.args.canary_num_test_batches
def _is_canary_test_round(self, epoch, round, num_rounds_in_epoch):
current_round = self._get_global_round_num(epoch, round, num_rounds_in_epoch)
design_round = self._get_design_round(current_round, num_rounds_in_epoch)
return current_round == design_round
def _analyse_attack(self, epoch, round, num_rounds_in_epoch):
return (self.cfg.args.canary_test_type == "continuous" and self.canary_analyser.num_tests >= 2 and self._is_canary_test_round(epoch, round, num_rounds_in_epoch)) \
or (self.cfg.args.canary_test_type == "train_and_freeze" and self.canary_analyser.num_tests >= self.cfg.args.canary_num_test_batches) \
or self._stop_canary_testing(epoch, round, num_rounds_in_epoch)
def _stop_canary_testing(self, epoch, round, num_rounds_in_epoch):
round_factor = 1 if self.cfg.args.canary_setup == "exact" else 1.5
if self.cfg.args.canary_test_type == "continuous" or self.cfg.args.canary_test_type == "train_and_freeze": # Train until total number of epochs reached
# return current_round >= self.cfg.args.canary_num_test_batches*self.cfg.args.canary_insert_offset # For debugging
return False
else:
return self.canary_analyser.num_tests >= self.cfg.args.canary_num_test_batches*round_factor # current round + 1 > design round
def stop_fl_training(self, *, epoch, round, num_rounds_in_epoch) -> bool:
"""Stops FL training when the necessary number of steps/epochs have been
completed in case of fractional epochs or if clients time out.
"""
global_round_num = (epoch - 1) * num_rounds_in_epoch + round
return (
(global_round_num / num_rounds_in_epoch)
>= self.cfg.epochs # pyre-fixme[16]: `SyncTrainer` has no attribute `cfg`.
or self._timeout_simulator.stop_fl()
or self._stop_canary_testing(epoch, round, num_rounds_in_epoch)
)
def _train_one_round(
self,
timeline: Timeline,
clients: Iterable[Client],
agg_metric_clients: Iterable[Client],
users_per_round: int,
metrics_reporter: Optional[IFLMetricsReporter],
freeze_model: bool,
) -> None:
"""Trains the global model for one training round.
Args:
timeline: information about the round, epoch, round number, ...
clients: clients for the round
agg_metric_clients: clients for evaluating the post-aggregation
training metrics
users_per_round: the number of participating users
metrics_reporter: the metric reporter to pass to other methods
"""
t = time()
self.server.init_round()
self.logger.info(f"Round initialization took {time() - t} s.")
def update(client):
client_delta, weight = client.generate_local_update(
self.global_model(), metrics_reporter
)
self.server.receive_update_from_client(Message(client_delta, weight), enable_clipping= not (freeze_model and self.cfg.args.canary_design_reverse_server_clip))
if self.is_user_level_dp and hasattr(self.server, "_clip_factors"): # If user-level DP
self.logger.debug(f"Server recieved update from {client.name}, clipping factor {(self.server._clip_factors or [None])[-1]}\n")
t = time()
for client in clients:
if freeze_model: client.enable_canary_testing()
update(client)
self.logger.info(f"Collecting round's clients took {time() - t} s.")
t = time()
aggregated_model = None
if freeze_model:
aggregated_model = self.server._aggregator.aggregate()
self.server._privacy_engine.steps -= 1 # Freezing the model, fake step
self.logger.info(f"Noise sensitivity {self.server._clipping_value} / {self.server._aggregator.sum_weights.item()}")
self.server._privacy_engine.add_noise(
aggregated_model,
self.server._clipping_value / self.server._aggregator.sum_weights.item(),
)
self.server._privacy_engine.get_privacy_spent()
else:
aggregated_model = self.server.step()
self.logger.info(f"Finalizing round took {time() - t} s.")
t = time()
train_metrics = self._report_train_metrics(
model=self.global_model(),
timeline=timeline,
metrics_reporter=metrics_reporter,
)
if train_metrics:
self.accuracy_metrics["train"].append(train_metrics["Accuracy"])
eval_metrics = self._evaluate_global_model_after_aggregation_on_train_clients(
clients=agg_metric_clients,
model=self.global_model(),
timeline=timeline,
users_per_round=users_per_round,
metrics_reporter=metrics_reporter,
)
if eval_metrics:
self.accuracy_metrics["agg"].append(eval_metrics["Accuracy"])
self._calc_post_epoch_communication_metrics(
timeline,
metrics_reporter,
)
self.logger.info(f"Aggregate round reporting took {time() - t} s.\n")
return aggregated_model
def _choose_clients_for_post_aggregation_metrics(
self,
train_clients: Iterable[Client],
num_total_users: int,
users_per_round: int,
) -> Iterable[Client]:
"""Chooses clients for the post-aggregation training metrics.
Depending on the config parameters, either returns the round's
training clients or new randomly drawn clients.
"""
# pyre-fixme[16]: `SyncTrainer` has no attribute `cfg`.
if self.cfg.use_train_clients_for_aggregation_metrics:
return train_clients
# For the post-aggregation metrics, evaluate on new users
agg_metric_client_idcs = torch.multinomial(
torch.ones(num_total_users, dtype=torch.float),
users_per_round,
replacement=False,
).tolist()
agg_metric_clients = [
self.create_or_get_client_for_data(i, self.data_provider)
for i in agg_metric_client_idcs
] # noqa
return train_clients
def _evaluate_global_model_after_aggregation_on_train_clients(
self,
clients: Iterable[Client],
model: IFLModel,
timeline: Timeline,
users_per_round: int,
metrics_reporter: Optional[IFLMetricsReporter] = None,
):
if (
metrics_reporter is not None
# pyre-fixme[16]: `SyncTrainer` has no attribute `cfg`.
and self.cfg.report_train_metrics
and self.cfg.report_train_metrics_after_aggregation
and timeline.tick(1.0 / self.cfg.train_metrics_reported_per_epoch)
):
with torch.no_grad():
self._cuda_state_manager.before_train_or_eval(model)
model.fl_get_module().eval()
for client in clients:
for batch in client.dataset.train_data():
batch_metrics = model.get_eval_metrics(batch)
if metrics_reporter is not None:
metrics_reporter.add_batch_metrics(batch_metrics)
model.fl_get_module().train()
privacy_metrics = self._calc_privacy_metrics(
clients, model, metrics_reporter
)
overflow_metrics = self._calc_overflow_metrics(
clients, model, users_per_round, metrics_reporter
)
eval_metrics, best_replaced = metrics_reporter.report_metrics(
model=model,
reset=True,
stage=TrainingStage.AGGREGATION,
timeline=timeline,
epoch=timeline.global_round_num(), # for legacy
print_to_channels=True,
extra_metrics=privacy_metrics + overflow_metrics,
)
self._cuda_state_manager.after_train_or_eval(model)
return eval_metrics
def _report_post_epoch_client_metrics(
self,
timeline: Timeline,
metrics_reporter: Optional[IFLMetricsReporter],
):
if (
metrics_reporter is not None
# pyre-fixme[16]: `SyncTrainer` has no attribute `cfg`.
and self.cfg.report_client_metrics
and self.cfg.report_client_metrics_after_epoch
and (timeline.epoch % self.cfg.client_metrics_reported_per_epoch == 0)
):
client_models = {
client: client.last_updated_model for client in self.clients.values()
}
client_scores = self._calc_post_epoch_client_metrics(
client_models, timeline, metrics_reporter
)
# Find stats over the client_metrics (mean, min, max, median, std)
client_stats_trackers = {}
score_names = [metric.name for metric in next(iter(client_scores))]
for score_name in score_names:
client_stats_trackers[score_name] = RandomVariableStatsTracker(
tracks_quantiles=True
)
for client_metric_list in client_scores:
for client_metric in client_metric_list:
client_stats_trackers[client_metric.name].update(
client_metric.value
)
reportable_client_metrics = []
for score_name in score_names:
for stat_name, stat_key in [
("Mean", "mean_val"),
("Median", "median_val"),
("Upper Quartile", "upper_quartile_val"),
("Lower Quartile", "lower_quartile_val"),
("Min", "min_val"),
("Max", "max_val"),
("Standard Deviation", "standard_deviation_val"),
("Num Samples", "num_samples"),
]:
score = client_stats_trackers[score_name].__getattribute__(stat_key)
reportable_client_metrics.append(Metric(stat_name, score))
metrics_reporter.report_metrics(
model=None,
reset=True,
stage=TrainingStage.PER_CLIENT_EVAL,
timeline=timeline,
epoch=timeline.global_round_num(), # for legacy
print_to_channels=True,
extra_metrics=reportable_client_metrics,
)
@dataclass
class CanarySyncTrainerConfig(FLTrainerConfig):
_target_: str = fullclassname(CanarySyncTrainer)
server: SyncServerConfig = SyncServerConfig()
users_per_round: int = 10
# overselect users_per_round / dropout_rate users, only use first
# users_per_round updates
dropout_rate: float = 1.0
report_train_metrics_after_aggregation: bool = False
report_client_metrics_after_epoch: bool = False
# Whether client metrics on eval data should be computed and reported.
report_client_metrics: bool = False
# how many times per epoch should we report client metrics
# numbers greater than 1 help with plotting more precise training curves
client_metrics_reported_per_epoch: int = 1
plot_path: str = "./"
result_path: str = "./test.tar"
checkpoint_round: float = 0.1
save_checkpoint: bool = False
checkpoint_only: bool = True
load_checkpoint: bool = True
args: Optional[Dict[str, Any]] = None | canife-main | FLSim/flsim/trainers/canary_sync_trainer.py |
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
from hydra.core.config_store import ConfigStore
from .async_trainer import AsyncTrainerConfig
from .canary_sync_trainer import CanarySyncTrainerConfig
from .private_sync_trainer import PrivateSyncTrainerConfig
from .sync_trainer import SyncTrainerConfig
ConfigStore.instance().store(
name="base_canary_sync_trainer",
node=CanarySyncTrainerConfig,
group="trainer",
)
ConfigStore.instance().store(
name="base_sync_trainer",
node=SyncTrainerConfig,
group="trainer",
)
ConfigStore.instance().store(
name="base_async_trainer",
node=AsyncTrainerConfig,
group="trainer",
)
ConfigStore.instance().store(
name="base_private_sync_trainer",
node=PrivateSyncTrainerConfig,
group="trainer",
)
| canife-main | FLSim/flsim/trainers/__init__.py |
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
from __future__ import annotations
import abc
import logging
import sys
from dataclasses import dataclass
from typing import Any, List, Optional, Tuple
import torch
from flsim.channels.base_channel import FLChannelConfig
from flsim.channels.communication_stats import ChannelDirection
from flsim.clients.base_client import ClientConfig
from flsim.common.fine_tuner import FineTuner
from flsim.common.logger import Logger
from flsim.common.timeline import Timeline
from flsim.common.timeout_simulator import (
NeverTimeOutSimulatorConfig,
TimeOutSimulatorConfig,
)
from flsim.data.data_provider import IFLDataProvider
from flsim.interfaces.metrics_reporter import IFLMetricsReporter, Metric, TrainingStage
from flsim.interfaces.model import IFLModel
from flsim.utils.config_utils import init_self_cfg
from flsim.utils.cuda import CudaTransferMinimizer
from hydra.utils import instantiate
from omegaconf import MISSING, OmegaConf
class FLTrainer(abc.ABC):
"""Base class for Federated Learning Training"""
logger: logging.Logger = Logger.get_logger(__name__)
def __init__(
self,
*,
model: IFLModel,
cuda_enabled: bool = False,
**kwargs,
):
init_self_cfg(
self,
component_class=__class__, # pyre-fixme[10]: Name `__class__` is used but not defined.
config_class=FLTrainerConfig,
**kwargs,
)
assert (
# pyre-fixme[16]: `FLTrainer` has no attribute `cfg`.
self.cfg.eval_epoch_frequency
<= self.cfg.epochs
), "We expect to do at least one eval. However, eval_epoch_frequency:"
f" {self.cfg.eval_epoch_frequency} > total epochs: {self.cfg.epochs}"
# self._cuda_state_manager = GPUMemoryMinimizer(cuda_enabled)
self._cuda_state_manager = CudaTransferMinimizer(cuda_enabled)
self._cuda_state_manager.on_trainer_init(model)
self.cuda_enabled = cuda_enabled
self._timeout_simulator = instantiate(self.cfg.timeout_simulator)
self.channel = instantiate(self.cfg.channel)
self.data_provider = None
self.num_total_users: int = -1
# Initialize tracker for measuring communication between the clients and
# the server if communication metrics are enabled
self.clients = {}
self.eval_clients = {}
@classmethod
def _set_defaults_in_cfg(cls, cfg):
if OmegaConf.is_missing(cfg.timeout_simulator, "_target_"):
cfg.timeout_simulator = NeverTimeOutSimulatorConfig()
if OmegaConf.is_missing(cfg.client, "_target_"):
cfg.client = ClientConfig()
if OmegaConf.is_missing(cfg.channel, "_target_"):
cfg.channel = FLChannelConfig()
@abc.abstractmethod
def train(
self,
data_provider: IFLDataProvider,
metrics_reporter: IFLMetricsReporter,
num_total_users: int,
distributed_world_size: int = 1,
rank: int = 0,
) -> Tuple[IFLModel, Any]:
pass
def test(
self, data_provider: IFLDataProvider, metrics_reporter: IFLMetricsReporter
) -> Any:
return self._test(
timeline=Timeline(global_round=1),
data_provider=data_provider,
model=self.global_model(),
metrics_reporter=metrics_reporter,
)
def global_model(self) -> IFLModel:
pass
def _maybe_run_evaluation(
self,
timeline: Timeline,
data_provider,
metrics_reporter: IFLMetricsReporter,
best_metric,
best_model_state,
):
# pyre-fixme[16]: `FLTrainer` has no attribute `cfg`.
if not self.cfg.do_eval:
return None, best_metric, best_model_state # TODO: Canary modification
if not timeline.tick(self.cfg.eval_epoch_frequency):
return None, best_metric, best_model_state # TODO: Canary modification
personalized_metrics = {}
if self.cfg.personalized:
# personalized eval on eval users
# TODO: Fix metrics reporting in order to report personalized metrics
personalized_metrics = self._evaluate_personalized_eval_users( # noqa
timeline=timeline,
data_provider=data_provider,
global_model=self.global_model(),
metrics_reporter=metrics_reporter,
)
# evaluate global model on eval users
eval_metric, eval_metric_better_than_prev = self._evaluate(
timeline=timeline,
data_provider=data_provider,
global_model=self.global_model(),
metrics_reporter=metrics_reporter,
)
# 1) keep the best model so far if metrics_reporter.compare_metrics is specified
# 2) if self.always_keep_trained_model is set as true, ignore the metrics and
# keep the trained model for each epoch
if self.cfg.always_keep_trained_model or eval_metric_better_than_prev:
best_metric = eval_metric
model_state = self.global_model().fl_get_module().state_dict()
best_model_state = model_state
sys.stdout.flush()
return eval_metric, best_metric, best_model_state # TODO: Canary modification
def _print_training_stats(self, timeline: Timeline) -> None:
print(f"Train finished Global Round: {timeline.global_round_num()}")
def _report_train_metrics(
self,
model: IFLModel,
timeline: Timeline,
metrics_reporter: Optional[IFLMetricsReporter] = None,
extra_metrics: Optional[List[Metric]] = None,
) -> Any[str, Any]:
train_metrics = None
if (
# pyre-fixme[16]: `FLTrainer` has no attribute `cfg`.
self.cfg.report_train_metrics
and metrics_reporter is not None
and timeline.tick(1.0 / self.cfg.train_metrics_reported_per_epoch)
):
self._print_training_stats(timeline)
train_metrics, is_best_updated = metrics_reporter.report_metrics(
model=model,
reset=True,
stage=TrainingStage.TRAINING,
timeline=timeline,
epoch=timeline.global_round_num(), # for legacy
print_to_channels=True,
extra_metrics=extra_metrics,
)
return train_metrics
def _calc_post_epoch_communication_metrics(
self, timeline: Timeline, metrics_reporter: Optional[IFLMetricsReporter]
):
if (
metrics_reporter is not None
and self.channel.cfg.report_communication_metrics
# pyre-fixme[16]: `FLTrainer` has no attribute `cfg`
and timeline.tick(1.0 / self.cfg.train_metrics_reported_per_epoch)
):
extra_metrics = [
Metric(
"Client to Server Bytes Sent"
if name == ChannelDirection.CLIENT_TO_SERVER
else "Server to Client Bytes Sent",
tracker.mean(),
)
for name, tracker in self.channel.stats_collector.get_channel_stats().items()
]
metrics_reporter.report_metrics(
model=None,
reset=False,
stage=TrainingStage.TRAINING,
timeline=timeline,
epoch=timeline.global_round_num(), # for legacy
print_to_channels=True,
extra_metrics=extra_metrics,
)
self.channel.stats_collector.reset_channel_stats()
def _evaluate_personalized_eval_users(
self,
timeline: Timeline,
data_provider,
global_model: IFLModel, # a global model
metrics_reporter: IFLMetricsReporter,
) -> Any:
"""Finetunes global model for each eval user on their train data
and then evaluates performance on local eval data
"""
print(
f"{timeline}: \t Evaluate global model w/ finetune on validation data of eval users"
)
personalized_metrics, _ = FineTuner.fine_tune_and_evaluate(
data=self.data_provider.eval_users(),
global_model=global_model,
# pyre-ignore[16]
client_config=self.cfg.client,
metrics_reporter=metrics_reporter,
cuda_state_manager=self._cuda_state_manager,
training_stage=TrainingStage.PERSONALIZED_EVAL,
timeline=timeline,
epochs=self.cfg.personalized_epochs,
)
return personalized_metrics
def _evaluate(
self,
timeline: Timeline,
data_provider: IFLDataProvider,
global_model: IFLModel,
metrics_reporter: IFLMetricsReporter,
) -> Tuple[Any, bool]:
"""
Evaluate global model on eval users
"""
with torch.no_grad():
self._cuda_state_manager.before_train_or_eval(global_model)
global_model.fl_get_module().eval()
# print(f"{timeline}: \t Evaluates global model on all data of eval users")
for user in data_provider.eval_users():
for batch in user.eval_data():
batch_metrics = global_model.get_eval_metrics(batch)
metrics_reporter.add_batch_metrics(batch_metrics)
metrics, found_best_model = metrics_reporter.report_metrics(
model=global_model,
reset=True,
stage=TrainingStage.EVAL,
timeline=timeline,
epoch=timeline.global_round_num(), # for legacy
print_to_channels=True,
)
self._cuda_state_manager.after_train_or_eval(global_model)
return metrics, found_best_model
def _test(
self,
timeline: Timeline,
data_provider: IFLDataProvider,
model: IFLModel,
metrics_reporter: IFLMetricsReporter,
) -> Any:
personalized_metrics = {}
# pyre-ignore[16]
if self.cfg.personalized:
personalized_metrics, _ = FineTuner.fine_tune_and_evaluate( # noqa
data=self.data_provider.test_users(),
global_model=model,
client_config=self.cfg.client,
metrics_reporter=metrics_reporter,
cuda_state_manager=self._cuda_state_manager,
training_stage=TrainingStage.PERSONALIZED_TEST,
timeline=timeline,
epochs=self.cfg.personalized_epochs,
)
with torch.no_grad():
self._cuda_state_manager.before_train_or_eval(model)
model.fl_get_module().eval()
print(f"Running {timeline} for {TrainingStage.TEST.name.title()}")
for test_user in data_provider.test_users():
for batch in test_user.eval_data():
batch_metrics = model.get_eval_metrics(batch)
metrics_reporter.add_batch_metrics(batch_metrics)
metrics, _ = metrics_reporter.report_metrics(
model=model,
reset=True,
stage=TrainingStage.TEST,
timeline=timeline,
epoch=timeline.global_round_num(), # for legacy
print_to_channels=True,
)
self._cuda_state_manager.after_train_or_eval(model)
return metrics
@dataclass
class FLTrainerConfig:
_target_: str = MISSING
_recursive_: bool = False
# Training epochs
epochs: float = 1000.0
# Whether to do evaluation and model selection based on it.
do_eval: bool = True
# don't use metric reporter to choose: always keep trained model
always_keep_trained_model: bool = False
# client training timeout
timeout_simulator: TimeOutSimulatorConfig = TimeOutSimulatorConfig()
# how many times per epoch should we report training metrics
# numbers greater than 1 help with plotting more precise training curves
train_metrics_reported_per_epoch: int = 1
# perform eval to do model selection in every eval_epoch_frequency epochs
eval_epoch_frequency: float = 1.0
# Whether metrics on training data should be computed and reported.
report_train_metrics: bool = True
report_train_metrics_after_aggregation: bool = False
use_train_clients_for_aggregation_metrics: bool = True
# config for the clients
client: ClientConfig = ClientConfig()
# config for the channels
channel: FLChannelConfig = FLChannelConfig()
personalized: bool = False # flag to personalized global model by locally fine tuning before evaluation
personalized_epochs: int = 1 # number of fine tune epochs to run
| canife-main | FLSim/flsim/trainers/trainer_base.py |
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
from __future__ import annotations
from dataclasses import dataclass
from typing import Any, Iterable, List, Optional, Tuple
import torch
from flsim.clients.base_client import Client
from flsim.clients.dp_client import DPClient, DPClientConfig
from flsim.data.data_provider import IFLDataProvider
from flsim.interfaces.metrics_reporter import IFLMetricsReporter, Metric
from flsim.interfaces.model import IFLModel
from flsim.reducers.dp_round_reducer import DPRoundReducerConfig
from flsim.trainers.sync_trainer import SyncTrainer, SyncTrainerConfig
from flsim.utils.config_utils import fullclassname, init_self_cfg
from hydra.utils import instantiate
from omegaconf import OmegaConf
class PrivateSyncTrainer(SyncTrainer):
r"""
A ``SyncTrainer`` that supports both sample level
and user level differential privacy (DP).
"""
def __init__(
self,
*,
model: IFLModel,
cuda_enabled: bool = False,
**kwargs,
):
init_self_cfg(
self,
component_class=__class__, # pyre-fixme[10]: Name `__class__` is used but not defined.
config_class=PrivateSyncTrainerConfig,
**kwargs,
)
raise ValueError(
"PrivateSyncTrainer is deprecated. Please use SyncTrainer with SyncDPServerConfig for user-level dp or with DPClientConfig for sample-level DP"
)
super().__init__(model=model, cuda_enabled=cuda_enabled, **kwargs)
@classmethod
def _set_defaults_in_cfg(cls, cfg):
pass
def train(
self,
data_provider: IFLDataProvider,
metrics_reporter: IFLMetricsReporter,
num_total_users: int,
distributed_world_size: int,
rank: int = 0,
) -> Tuple[IFLModel, Any]:
dp_round_reducer = instantiate(
# pyre-fixme[16]: `PrivateSyncTrainer` has no attribute `cfg`.
self.cfg.reducer,
global_model=self.global_model(),
num_users_per_round=self.cfg.users_per_round,
total_number_of_users=num_total_users,
channel=self.channel,
)
# pyre-ignore[16]
self.aggregator.init_round(dp_round_reducer)
return super().train(
data_provider,
metrics_reporter,
num_total_users,
distributed_world_size,
rank,
)
def create_or_get_client_for_data(self, dataset_id: int, datasets: Any):
"""This function is used to create clients in a round. Thus, it
is called UPR * num_rounds times per training run. Here, we use
<code>OmegaConf.structured</code> instead of <code>hydra.instantiate</code>
to minimize the overhead of hydra object creation.
"""
self.clients[dataset_id] = DPClient(
# pyre-ignore [16]: `PrivateSyncTrainer` has no attribute `cfg`
**OmegaConf.structured(self.cfg.client),
dataset=datasets[dataset_id],
name=f"client_{dataset_id}",
timeout_simulator=self._timeout_simulator,
store_last_updated_model=self.cfg.report_client_metrics,
channel=self.channel,
)
return self.clients[dataset_id]
def calc_post_aggregation_train_metrics(
self,
clients: Iterable[Client],
model: IFLModel,
metrics_reporter: Optional[IFLMetricsReporter],
) -> List[Metric]:
metrics = super()._calc_privacy_metrics(clients, model, metrics_reporter)
# calculate sample level dp privacy loss statistics.
all_client_eps = torch.Tensor(
[c.privacy_budget.epsilon for c in clients] # pyre-ignore
)
mean_client_eps = all_client_eps.mean()
max_client_eps = all_client_eps.max()
min_client_eps = all_client_eps.min()
p50_client_eps = torch.median(all_client_eps)
sample_dp_metrics = Metric.from_args(
mean=mean_client_eps,
min=min_client_eps,
max=max_client_eps,
median=p50_client_eps,
)
# calculate user level dp privacy loss statistics.
# pyre-ignore[16]
dp_round_reducer = self.aggregator.reducer
aggr_eps = dp_round_reducer.privacy_budget.epsilon
return metrics + [
Metric("sample level dp (eps)", sample_dp_metrics),
Metric("user level dp (eps)", aggr_eps),
]
@dataclass
class PrivateSyncTrainerConfig(SyncTrainerConfig):
_target_: str = fullclassname(PrivateSyncTrainer)
client: DPClientConfig = DPClientConfig() # For sample-level DP
# To disable sample-level DP, set DPClientConfig.clipping_value = float("inf")
reducer: DPRoundReducerConfig = DPRoundReducerConfig() # For user-level DP
# To disable user-level DP, set DPRoundReducerConfig.clipping_value = float("inf")
| canife-main | FLSim/flsim/trainers/private_sync_trainer.py |
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
from typing import Any, Dict, List
import numpy as np
import pytest
import torch
from flsim.common.pytest_helper import (
assertAlmostEqual,
assertEqual,
assertLess,
assertListEqual,
assertTrue,
)
from flsim.common.timeline import Timeline
from flsim.common.timeout_simulator import GaussianTimeOutSimulatorConfig
from flsim.interfaces.metrics_reporter import Channel, TrainingStage
from flsim.interfaces.model import IFLModel
from flsim.metrics_reporter.tensorboard_metrics_reporter import FLMetricsReporter
from flsim.optimizers.async_aggregators import (
AsyncAggregatorConfig,
FedAdamAsyncAggregatorConfig,
FedAvgWithLRAsyncAggregatorConfig,
FedAvgWithLRFedBuffAggregatorConfig,
)
from flsim.optimizers.optimizer_scheduler import LRBatchSizeNormalizerSchedulerConfig
from flsim.utils.async_trainer.async_example_weights import (
EqualExampleWeightConfig,
LinearExampleWeightConfig,
)
from flsim.utils.async_trainer.training_event_generator import (
AsyncTrainingEventGeneratorConfig,
AsyncTrainingEventGeneratorFromListConfig,
ConstantAsyncTrainingStartTimeDistrConfig,
EventTimingInfo,
)
from flsim.utils.fl.common import FLModelParamUtils
from flsim.utils.sample_model import ConstantGradientFLModel, DummyAlphabetFLModel
from flsim.utils.test_utils import (
MetricsReporterWithMockedChannels,
RandomEvalMetricsReporter,
verify_models_equivalent_after_training,
)
from flsim.utils.tests.helpers.test_async_trainer_utils import (
assert_fl_nonfl_same,
assert_fl_nonfl_same_equal_data_split,
async_train_one_user,
create_async_trainer,
create_event_generator_config,
get_data,
get_equal_split_data,
get_fl_data_provider,
get_safe_global_lr,
get_unequal_split_data,
run_fl_training,
run_fl_training_with_event_generator,
)
from flsim.utils.timing.training_duration_distribution import (
PerExampleGaussianDurationDistributionConfig,
PerUserUniformDurationDistributionConfig,
)
class TestMetricsReporter(MetricsReporterWithMockedChannels):
"""Don't reset metrics when reported. Else, at the end of
training, metrics have zeroes
"""
def reset(self) -> None:
pass
class ConcurrencyMetricsReporter(FLMetricsReporter):
ACCURACY = "Accuracy"
def __init__(self, channels: List[Channel]) -> None:
self.concurrency_metrics = []
self.eval_rounds = []
super().__init__(channels)
def compare_metrics(self, eval_metrics, best_metrics):
print(f"Current eval accuracy: {eval_metrics}%, Best so far: {best_metrics}%")
if best_metrics is None:
return True
return eval_metrics > best_metrics
def compute_scores(self) -> Dict[str, Any]:
# compute accuracy
correct = torch.Tensor([0])
for i in range(len(self.predictions_list)):
all_preds = self.predictions_list[i]
pred = all_preds.data.max(1, keepdim=True)[1]
assert pred.device == self.targets_list[i].device, (
f"Pred and targets moved to different devices: "
f"pred >> {pred.device} vs. targets >> {self.targets_list[i].device}"
)
if i == 0:
correct = correct.to(pred.device)
correct += pred.eq(self.targets_list[i].data.view_as(pred)).sum()
# total number of data
total = sum(len(batch_targets) for batch_targets in self.targets_list)
accuracy = 100.0 * correct.item() / total
return {self.ACCURACY: accuracy}
def create_eval_metrics(
self, scores: Dict[str, Any], total_loss: float, **kwargs
) -> Any:
return scores[self.ACCURACY]
def report_metrics(self, reset: bool, stage, extra_metrics=None, **kwargs):
if stage != TrainingStage.EVAL:
assert (
extra_metrics is not None
), "Async Trainer metrics reporting should have extra metrics"
metrics = [m for m in extra_metrics if m.name == "Concurrency_Rate"]
assert (
len(metrics) == 1
), "Concurrency rate should be one of the extra metrics"
concurrency_rate = metrics[0]
self.concurrency_metrics.append(concurrency_rate.value)
else:
timeline: Timeline = kwargs.get("timeline", Timeline(global_round=1))
self.eval_rounds.append(timeline.global_round)
return super().report_metrics(reset, stage, extra_metrics, **kwargs)
class TestAsyncTrainer:
# TODO: add test_one_user_sequential_user_same
# TODO: add test where each user has unequal amount of data, both SGD and adam
def _assert_fl_nonfl_same_unequal_data_split(
self,
fl_batch_size: int,
num_examples: int,
num_fl_users: int,
max_examples_per_user: int,
epochs: int,
local_lr: float,
aggregator_config: AsyncAggregatorConfig,
training_rate: int = 1,
training_duration_mean: int = 0,
training_duration_sd: int = 0,
) -> None:
torch.manual_seed(1)
# create dummy FL model on alphabet
global_model = DummyAlphabetFLModel()
# if aggregator is FedAdam, each user should have only one batch
# otherwise non-FL and FL training won't be the same
one_batch_per_user_only = "FedAdam" in aggregator_config._target_
fl_data_provider, nonfl_data_loader = get_unequal_split_data(
num_examples=num_examples,
num_fl_users=num_fl_users,
max_examples_per_user=max_examples_per_user,
fl_batch_size=fl_batch_size,
model=global_model,
one_batch_per_user_only=one_batch_per_user_only,
)
assertEqual(
assert_fl_nonfl_same(
global_model=global_model,
fl_data_provider=fl_data_provider,
nonfl_data_loader=nonfl_data_loader,
epochs=epochs,
local_lr=local_lr,
aggregator_config=aggregator_config,
training_rate=training_rate,
training_duration_mean=training_duration_mean,
training_duration_sd=training_duration_sd,
),
"",
)
def assert_fl_nonfl_same_one_user_sgd(self) -> None:
"""
Given:
data_for_fl={user1:batchA, batchB, batchC, ...}
data_for_non_fl={batchA, batchB, batchC, ...}
Check that the following produce the same trained model:
1. FL training, global_opt=SGD, local_lr=x, global_lr=x
2. Non-FL training, opt=SGD, lr=x
"""
# try 3 different batch sizes:
# 4=> user has multiple batches
# 32=> user has exactly 1 batch, same as #examples
# 128=> user has exactly 1 batch, but bigger than #examples
num_examples = 32
for fl_batch_size in [4, 32, 128]:
# choose random learning rate between 0 and 10
local_lr = np.random.random_sample() * 10
global_lr = get_safe_global_lr(
fl_batch_size=fl_batch_size, max_examples_per_user=num_examples
)
assertEqual(
assert_fl_nonfl_same_equal_data_split(
num_examples=num_examples,
num_fl_users=1,
fl_batch_size=fl_batch_size,
training_rate=1,
training_duration_mean=0,
training_duration_sd=0,
epochs=5,
local_lr=local_lr,
aggregator_config=FedAvgWithLRAsyncAggregatorConfig(lr=global_lr),
),
"",
)
def assert_fl_nonfl_same_multiple_users_sgd(self) -> None:
"""
Given:
data_for_fl={user1:batch1A, batch1B..., user2: batch2A, batch2B,...}
data_for_non_fl={batch1A, batch1B, ..., batch2A, batch2B, ...}
Check that the following produce the same trained model:
1. AsyncFL training, training_rate=1 (sequential), global_opt=SGD, local_lr=x, global_lr=x
2. Non-FL training, opt=SGD, lr=x
"""
num_fl_users = 4
num_examples = 32
# equal data split
# try 3 different batch size
# 4=> each user has multiple batches
# 16=> each user has exactly 1 batch, same as #examples_per_user
# 128=> each user has exactly 1 batch, but bigger than #examples_per_user
equal_split_examples_per_user = num_examples / num_fl_users
for fl_batch_size in [4, 16, 32]:
# random learning rate between 0 and 10
local_lr = np.random.random_sample() * 10
global_lr = get_safe_global_lr(
fl_batch_size=fl_batch_size,
# pyre-fixme[6]: Expected `int` for 2nd param but got `float`.
max_examples_per_user=equal_split_examples_per_user,
)
assertEqual(
assert_fl_nonfl_same_equal_data_split(
num_examples=num_examples,
num_fl_users=num_fl_users,
fl_batch_size=fl_batch_size,
training_rate=1,
training_duration_mean=0,
training_duration_sd=0,
epochs=5,
local_lr=local_lr,
aggregator_config=FedAvgWithLRAsyncAggregatorConfig(lr=global_lr),
),
"",
)
# unequal data split
# user1 = 24 examples, user2 = 8 examples
# 4=> each user has multiple batches
# 8=> user1 has multiple batches, user 2 has 1 batch
# cannot try batch size larger than 8
unequal_split_max_examples_per_user = 24
for fl_batch_size in [4, 8]:
# random learning rate between 0 and 10
local_lr = np.random.random_sample() * 10
global_lr = get_safe_global_lr(
fl_batch_size=fl_batch_size,
max_examples_per_user=unequal_split_max_examples_per_user,
)
self._assert_fl_nonfl_same_unequal_data_split(
num_examples=num_examples,
num_fl_users=2,
max_examples_per_user=unequal_split_max_examples_per_user,
fl_batch_size=fl_batch_size,
training_rate=1,
training_duration_mean=0,
training_duration_sd=0,
epochs=5,
local_lr=local_lr,
aggregator_config=FedAvgWithLRAsyncAggregatorConfig(lr=global_lr),
)
def assert_fl_nonfl_same_one_user_adam(self) -> None:
"""
Given:
data_for_fl={user1:batchA, batchB, batchC, ...}
data_for_non_fl={batchA, batchB, batchC, ...}
Check that the following produce the same trained model:
1. FL training, global_opt=Adam, local_lr=x, global_lr=x
2. Non-FL training, opt=Adam, lr=x
"""
# for Adam, for FL and non-FL to be equivalent,
# each user must have maximum one batch of data,
# because local optimizer on each user has to be SGD
# 32=> user has exactly 1 batch, same as #examples_per_user
# 128=> user has exactly 1 batch, but bigger than #examples_per_user
for fl_batch_size in [32, 128]:
# random learning rate between 0 and 0.01
global_lr = np.random.random_sample() * 0.01
assertEqual(
assert_fl_nonfl_same_equal_data_split(
num_examples=32,
num_fl_users=1,
fl_batch_size=fl_batch_size,
training_rate=1,
training_duration_mean=0,
training_duration_sd=0,
epochs=5,
local_lr=1.0,
aggregator_config=FedAdamAsyncAggregatorConfig(
lr=global_lr, eps=1e-2
),
),
"",
)
def assert_fl_nonfl_same_multiple_users_adam(self) -> None:
"""
Given:
data_for_fl={user1:batch1A, batch1B..., user2: batch2A, batch2B,...}
data_for_non_fl={batch1A, batch1B, ..., batch2A, batch2B, ...}
Check that the following produce the same trained model:
1. AsyncFL training, training_rate=1 (sequential), global_opt=Adam, local_lr=x, global_lr=x
2. Non-FL training, opt=Adam, lr=x
"""
# for Adam, for FL and non-FL to be equivalent,
# each user must have maximum one batch of data,
# because local optimizer on each user has to be SGD
# 8=> each user has exactly 1 batch, same as #examples_per_user
# 128=> each user has exactly 1 batch, but bigger than #examples_per_user
for fl_batch_size in [8, 128]:
# random learning rate between 0 and 0.001
global_lr = np.random.random_sample() * 0.001
assertEqual(
assert_fl_nonfl_same_equal_data_split(
num_examples=32,
num_fl_users=4,
fl_batch_size=fl_batch_size,
training_rate=1,
training_duration_mean=0,
training_duration_sd=0,
epochs=5,
local_lr=1.0,
aggregator_config=FedAdamAsyncAggregatorConfig(
lr=global_lr, eps=1e-2
),
),
"",
)
def _test_local_lr_normalization(self, num_users: int) -> None:
"""Run training for two tasks:
Assumption: batch_size > data on any user (so all users have incomplete batches)
AsyncFLTask1: data = {user1:data1, user2:data2},
local_lr_normalization = False, weighting scheme = linear, lr = base_lr
AsyncFLTask2: data = {user1:data1, user2:data2},
local_lr_normalization = True, weighting scheme = equal, lr = base_lr
Task1, Step 1:
LR = base_lr
Grad = (grad_user1*base_lr)
Wt = linear = num_ex_user1
Model_new = Model_old - (grad_user1*base_lr) * num_ex_user1
Task2, Step 1:
LR = base_lr * batch_size
Grad = grad_user1 *(num_ex_user_1/batch_size) * (base_lr*batch_size)
Wt = equal = 1
Model_new = Model_old - (grad_user1*num_ex_user1*base_lr)
So both models should take the same step
Verify that the trained models have the same parameters
"""
fl_batch_size = 128
base_lr = 0.1
# local_lr_normalization scales learning rate by num_examples_in_batch
# we upweight the learning rate so that after local_lr_normalization, the
# fl-task1 has the same learning rate as fl-task2
upweighted_lr = base_lr * fl_batch_size
num_total_examples = 32
epochs = 5
# create dummy FL model on alphabet
torch.manual_seed(1)
init_model = DummyAlphabetFLModel()
fl_data_provider, _ = get_equal_split_data(
num_examples=num_total_examples,
num_fl_users=num_users,
fl_batch_size=fl_batch_size,
model=init_model,
)
def get_base_optimizer_and_trained_model():
init_model_local = FLModelParamUtils.clone(init_model)
async_trainer = create_async_trainer(
model=init_model_local,
local_lr=base_lr,
epochs=epochs,
aggregator_config=FedAvgWithLRAsyncAggregatorConfig(lr=1.0),
example_weight_config=LinearExampleWeightConfig(),
event_generator_config=create_event_generator_config(
training_rate=1.0, training_duration_mean=0, training_duration_sd=0
),
)
trained_fl_model, _ = async_trainer.train(
data_provider=fl_data_provider,
metrics_reporter=MetricsReporterWithMockedChannels(),
num_total_users=num_users,
distributed_world_size=1,
)
return trained_fl_model
def get_lrnorm_optimizer_and_trained_model():
init_model_local = FLModelParamUtils.clone(init_model)
async_trainer = create_async_trainer(
model=init_model_local,
local_lr=upweighted_lr,
epochs=epochs,
aggregator_config=FedAvgWithLRAsyncAggregatorConfig(lr=1.0),
example_weight_config=EqualExampleWeightConfig(),
local_lr_scheduler_config=LRBatchSizeNormalizerSchedulerConfig(
base_lr=upweighted_lr, local_lr_normalizer=fl_batch_size
),
event_generator_config=create_event_generator_config(
training_rate=1.0, training_duration_mean=0, training_duration_sd=0
),
)
trained_fl_model, _ = async_trainer.train(
data_provider=fl_data_provider,
metrics_reporter=MetricsReporterWithMockedChannels(),
num_total_users=num_users,
distributed_world_size=1,
)
return trained_fl_model
base_trained_model = get_base_optimizer_and_trained_model()
lrnorm_trained_model = get_lrnorm_optimizer_and_trained_model()
assertEqual(
verify_models_equivalent_after_training(
base_trained_model,
lrnorm_trained_model,
init_model,
rel_epsilon=1e-4,
abs_epsilon=1e-6,
),
"",
)
def test_local_lr_normalization_single_user(self) -> None:
self._test_local_lr_normalization(num_users=1)
def test_local_lr_normalization_multiple_users(self) -> None:
self._test_local_lr_normalization(num_users=4)
def test_async_training_metrics_reporting(self) -> None:
"""Verify that in async training, metrics are only reported
training_end event fires, not when training_start event fires.
We do this by:
a) Creating data where user1=8 examples, user2=1 example
b) Creating training event order as following:
t1: user1 starts training
t2: user2 starts training
t3: user2 finishes training (should cause metric reporting with #examples=1)
t11: user1 finishes training (should cause metric reporting with #examples=8)
We verify the order in which metrics were reported.
metrics_reporter.num_examples_list should be = [1, 8]
b) Creating training event order as following:
t1: user1 starts training
t2: user2 starts training
t3: user1 finishes training (should cause metric reporting with #examples=8)
t10: user2 finishes training (should cause metric reporting with #examples=1)
We verify the order in which metrics were reported.
metrics_reporter.num_examples_list should be = [8, 1]
"""
# first entry in tuple: time gap between training start and previous training start
# second entry in tuple: training duration
num_examples = 9
num_fl_users = 2
max_examples_per_user = 8
fl_batch_size = 8
num_epochs = 2
global_model = DummyAlphabetFLModel()
fl_data_provider = get_fl_data_provider(
num_examples,
num_fl_users,
max_examples_per_user,
fl_batch_size,
global_model,
)
def _verify_training(
user_1_start_time_delta: int,
user1_training_duration: int,
user2_start_time_delta: int,
user2_training_duration: int,
expected_num_examples: List[int],
):
"""Given start time and duration for user1 and user2, verify that that
num_examples in training has order expected_num_examples
"""
user1_training_events = EventTimingInfo(
prev_event_start_to_current_start=user_1_start_time_delta,
duration=user1_training_duration,
)
user2_training_events = EventTimingInfo(
prev_event_start_to_current_start=user2_start_time_delta,
duration=user2_training_duration,
)
async_trainer = create_async_trainer(
model=global_model,
local_lr=1.0,
epochs=num_epochs,
aggregator_config=FedAvgWithLRAsyncAggregatorConfig(lr=1.0),
event_generator_config=AsyncTrainingEventGeneratorFromListConfig(
training_events=[user1_training_events, user2_training_events]
),
)
metrics_reporter = TestMetricsReporter()
fl_model, _ = async_trainer.train(
data_provider=fl_data_provider,
metrics_reporter=metrics_reporter,
num_total_users=fl_data_provider.num_train_users(),
distributed_world_size=1,
)
assertEqual(
metrics_reporter.num_examples_list, expected_num_examples * num_epochs
)
# user2 completes training before user1
_verify_training(
user_1_start_time_delta=1,
user1_training_duration=3,
user2_start_time_delta=1,
user2_training_duration=1,
expected_num_examples=[1, 8],
)
# user1 completes training before user2
_verify_training(
user_1_start_time_delta=1,
user1_training_duration=1,
user2_start_time_delta=10,
user2_training_duration=2,
expected_num_examples=[8, 1],
)
def _run_sequential_training_two_users(
self,
initial_model: IFLModel,
local_lr: int,
first_user_batches: List[Any],
second_user_batches: List[Any],
) -> IFLModel:
"""Sequential training.
user1 = user with data: first_user_batches
user2 = user with data: second_user_batches
order of events:
user1 starts training
user1 finishes training. Global model update
user2 starts training
user2 finishes training. Global model update
"""
global_model = FLModelParamUtils.clone(initial_model)
# sequential training. user1 finishes first. user2 takes user1 trained model, and trains
for batches in [first_user_batches, second_user_batches]:
updated_global_model = async_train_one_user(
global_model_at_training_start=global_model,
global_model_at_training_end=global_model,
batches=batches,
local_lr=local_lr,
)
global_model = FLModelParamUtils.clone(updated_global_model)
return global_model
def _run_parallel_training_two_users(
self,
initial_model: IFLModel,
local_lr: int,
first_user_batches: List[Any],
second_user_batches: List[Any],
) -> IFLModel:
"""Parallel training.
user1 = user with data: first_user_batches
user2 = user with data: second_user_batches
order of events:
user1 starts training
user2 start training
user1 finishes training. Global model update
user2 finishes training. Global model update
NB: this function and _run_sequential_training_two_users share a lot of code, but
they are much easier to read as separate functions. Trading off code duplication
for readability.
"""
global_model = FLModelParamUtils.clone(initial_model)
for batches in [first_user_batches, second_user_batches]:
# both users start training with the same initial model
# loop iteration 1 will handle first_user_batches, which will change
# global_model. so global_model in loop iteration 2 will already include
# changes from loop iteration 1
updated_global_model = async_train_one_user(
global_model_at_training_start=initial_model,
global_model_at_training_end=global_model,
batches=batches,
local_lr=local_lr,
)
global_model = FLModelParamUtils.clone(updated_global_model)
return global_model
def test_num_examples_computation(self) -> None:
r"""
Test that num_examples for each user affects training duration. Test this for different batch sizes
Two users: U1 and U2. U2 always has 2 examples. U1 may have 2, 4, or 6 examples
Training duration = number of examples
Timing:
U1 has 2 examples. Sequential training
t=0 = U1 starts training.
t=2 = U1 ends training
t=3 = U2 starts training
t=5 = U2 ends training
U1 has 4 examples. U1 and U2 train in parallel. U1 ends first
t=0 = U1 starts training.
t=3 = U2 starts training
t=4 = U1 ends training
t=5 = U2 ends training
U1 has 6 examples. U1 and U2 train in parallel. U2 ends first
t=0 = U1 starts training.
t=3 = U2 starts training
t=5 = U2 ends training
t=6 = U1 ends training
Verify by comparing with non-FL training that the above timing is correct.
Do this for batch_size=1 and batch_size=2
"""
num_epochs = 1
torch.manual_seed(1)
initial_model = DummyAlphabetFLModel()
local_lr = 1.0
global_lr = 1.0
num_examples_user2 = 2
batch_size = 2
for num_examples_user1 in [2, 4, 6]:
fl_model = FLModelParamUtils.clone(initial_model)
fl_data_provider, nonfl_data_loader = get_data(
num_examples=num_examples_user1 + num_examples_user2,
num_fl_users=2,
examples_per_user=num_examples_user1,
fl_batch_size=batch_size,
nonfl_batch_size=batch_size,
model=initial_model,
)
fl_trained_model = run_fl_training_with_event_generator(
fl_model=fl_model,
fl_data_provider=fl_data_provider,
epochs=num_epochs,
local_lr=local_lr,
aggregator_config=FedAvgWithLRAsyncAggregatorConfig(lr=global_lr),
training_event_generator_config=create_event_generator_config(
# pyre-fixme[6]: Expected `int` for 1st param but got `float`.
training_rate=0.33,
training_duration_mean=1,
training_duration_sd=0,
),
)
# first num_examples_user1/batch_size batches are for user1. Rest for user2
all_batches = list(nonfl_data_loader)
num_user1_batches = num_examples_user1 // batch_size
user1_batches = all_batches[:num_user1_batches]
user2_batches = all_batches[num_user1_batches:]
if num_examples_user1 == 2:
# U1 training takes 2 units of time
# sequence:
# t=0 U1 starts training
# t=2 U1 finishes training
# t=3 U2 starts training
# t=5 U2 finishes training
simulated_global_model = self._run_sequential_training_two_users(
first_user_batches=user1_batches,
second_user_batches=user2_batches,
initial_model=initial_model,
# pyre-fixme[6]: Expected `int` for 4th param but got `float`.
local_lr=local_lr,
)
elif num_examples_user1 == 4:
# U1 training takes 4 units of time
# sequence:
# t=0 U1 starts training
# t=3 U2 starts training
# t=4 U1 finishes training
# t=5 U2 finishes training
simulated_global_model = self._run_parallel_training_two_users(
first_user_batches=user1_batches,
second_user_batches=user2_batches,
initial_model=initial_model,
# pyre-fixme[6]: Expected `int` for 4th param but got `float`.
local_lr=local_lr,
)
elif num_examples_user1 == 6:
# U1 training takes 6 units of time
# sequence:
# t=0 U1 starts training
# t=3 U2 starts training
# t=5 U2 finishes training
# t=6 U1 finishes training
simulated_global_model = self._run_parallel_training_two_users(
first_user_batches=user2_batches,
second_user_batches=user1_batches,
initial_model=initial_model,
# pyre-fixme[6]: Expected `int` for 4th param but got `float`.
local_lr=local_lr,
)
error_msg = verify_models_equivalent_after_training(
fl_trained_model,
# pyre-fixme[61]: `simulated_global_model` is undefined, or not
# always defined.
simulated_global_model,
initial_model,
rel_epsilon=1e-4,
abs_epsilon=1e-6,
)
assertEqual(error_msg, "")
def test_async_training_with_timeout(self) -> None:
"""
Test async training with timeout on.
Under the condition when mean_per_example=1, std_per_example=0
The total number of examples trained should be less than the timeout limit
The test does the following:
1. Create a data provider with 10 users and each user with 10 examples
2. Set batch size = 1, hence the metrics reporter losses = num_trained_examples
3. Check if num_trained_examples <= timeout_limit * num_users
"""
local_lr = 1.0
global_lr = 1.0
epochs = 1
init_model_local = DummyAlphabetFLModel()
metrics_reporter = TestMetricsReporter()
num_users = 10
num_examples = 100
timeout_limit = np.random.randint(1, 10)
async_trainer = create_async_trainer(
model=init_model_local,
local_lr=local_lr,
epochs=epochs,
aggregator_config=FedAvgWithLRAsyncAggregatorConfig(lr=global_lr),
event_generator_config=create_event_generator_config(
# pyre-fixme[6]: Expected `int` for 1st param but got `float`.
training_rate=1.0,
training_duration_mean=0.0,
training_duration_sd=0.0,
),
timeout_simulator_config=GaussianTimeOutSimulatorConfig(
timeout_wall_per_round=timeout_limit,
fl_stopping_time=1e4,
duration_distribution_generator=PerExampleGaussianDurationDistributionConfig(
training_duration_mean=1.0, training_duration_sd=0.0
),
),
)
data_provider = get_fl_data_provider(
num_examples=num_examples,
num_fl_users=num_users,
examples_per_user=10,
batch_size=1,
model=init_model_local,
)
final_model, _ = async_trainer.train(
data_provider=data_provider,
metrics_reporter=metrics_reporter,
num_total_users=num_users,
distributed_world_size=1,
)
total_examples_trained = len(metrics_reporter.losses)
total_time = timeout_limit * num_users
assertTrue(total_examples_trained <= total_time)
def test_max_staleness_cutoff(self) -> None:
"""
Test for max staleness cut off
There are two scenario:
1. Users are trained in parallel with training_mean = 1,
and max_staleness is some small number then not all users will participate in training
2. Users are trained sequentially, which means there is no staleness hence all users can participate
"""
global_model = DummyAlphabetFLModel()
metrics_reporter = TestMetricsReporter()
num_users = 10
num_examples = num_users
training_rate = num_users
max_staleness = np.random.randint(1, 5)
# training in parallel so not all clients can participate
async_trainer = create_async_trainer(
model=global_model,
local_lr=1.0,
epochs=1,
aggregator_config=FedAvgWithLRAsyncAggregatorConfig(lr=1.0),
event_generator_config=create_event_generator_config(
training_rate=training_rate,
training_duration_mean=1.0,
training_duration_sd=0,
),
max_staleness=max_staleness,
)
data_provider = get_fl_data_provider(
num_examples=num_examples,
num_fl_users=num_users,
examples_per_user=1,
batch_size=1,
model=global_model,
)
final_model, _ = async_trainer.train(
data_provider=data_provider,
metrics_reporter=metrics_reporter,
num_total_users=num_users,
distributed_world_size=1,
)
assertLess(async_trainer.global_round, num_users)
# train sequentially then all clients can participate
async_trainer = create_async_trainer(
model=global_model,
local_lr=1.0,
epochs=1,
aggregator_config=FedAvgWithLRAsyncAggregatorConfig(lr=1.0),
event_generator_config=create_event_generator_config(
training_rate=training_rate,
training_duration_mean=0,
training_duration_sd=0,
),
max_staleness=0,
)
data_provider = get_fl_data_provider(
num_examples=num_examples,
num_fl_users=num_users,
examples_per_user=1,
batch_size=1,
model=global_model,
)
final_model, _ = async_trainer.train(
data_provider=data_provider,
metrics_reporter=metrics_reporter,
num_total_users=num_users,
distributed_world_size=1,
)
assertTrue(async_trainer.global_round - 1 == num_users)
def test_number_of_steps(self) -> None:
"""This test checks that async training takes the same number of optimizer.step()
as #epochs * #users * #batches_per_user
"""
num_epochs = 5
torch.manual_seed(1)
batch_size = 10
for num_fl_users in [1, 2]:
for batches_per_user in [1, 2]:
fl_model = ConstantGradientFLModel()
num_total_examples = num_fl_users * batch_size * batches_per_user
examples_per_user = batch_size * batches_per_user
fl_data_provider = get_fl_data_provider(
num_examples=num_total_examples,
num_fl_users=num_fl_users,
examples_per_user=examples_per_user,
batch_size=batch_size,
model=fl_model,
)
run_fl_training(
fl_model=fl_model,
fl_data_provider=fl_data_provider,
epochs=num_epochs,
local_lr=1.0,
aggregator_config=FedAvgWithLRAsyncAggregatorConfig(lr=1.0),
training_rate=1,
training_duration_mean=0,
training_duration_sd=0,
)
# Async FL training is sequential since mean_training_time is 0
num_optimizer_steps = num_epochs * num_fl_users * batches_per_user
# ConstantGradientFLModel has a property that its bias term = #of times optimizer.step() is called
assertTrue(
np.isclose(
# pyre-fixme[29]: `Union[BoundMethod[typing.Callable(torch.Te...
fl_model.fl_get_module().bias.detach().item(),
num_optimizer_steps,
),
# pyre-fixme[29]: `Union[BoundMethod[typing.Callable(torch.Tensor...
f"Expected: {num_optimizer_steps}, Found: {fl_model.fl_get_module().bias.detach().item()}",
)
def test_best_eval_model_is_kept(self) -> None:
"""This test checks that AsyncTrainer retains the model with the best eval performance
To check for this, we use a special MetricsReporter that tracks which model produced
the best eval results
"""
num_epochs = 5
torch.manual_seed(1)
np.random.seed(1)
# ConstantGradientFLModel has a property that its bias term = #of times optimizer.step() is called
batch_size = 10
num_fl_users = 10
batches_per_user = 1
fl_model = ConstantGradientFLModel()
num_total_examples = num_fl_users * batch_size * batches_per_user
examples_per_user = batch_size * batches_per_user
for buffer_size in [1, 2, 3]:
fl_data_provider = get_fl_data_provider(
num_examples=num_total_examples,
num_fl_users=num_fl_users,
examples_per_user=examples_per_user,
batch_size=batch_size,
model=fl_model,
)
metrics_reporter = RandomEvalMetricsReporter()
# run_fl_training returns test results. However,
# RandomEvalMetricsReporter() fakes it so test results are always
# the best_eval_results
best_model, best_eval_results = run_fl_training(
fl_model=fl_model,
fl_data_provider=fl_data_provider,
epochs=num_epochs,
local_lr=1.0,
aggregator_config=FedAvgWithLRFedBuffAggregatorConfig(
lr=1.0, buffer_size=buffer_size
),
metrics_reporter=metrics_reporter,
do_eval=True,
report_train_metrics_after_aggregation=True,
eval_epoch_frequency=0.001, # report every global model update
)
assertEqual(best_eval_results, metrics_reporter.best_eval_result)
# TODO: also check that best_model matches metrics_reporter.best_eval_model
# after fixing code
@pytest.mark.parametrize(
"num_users,training_rate,num_epochs", [(100, 10, 2), (50, 10, 2)]
)
def test_constant_concurrency(
self, num_users: int, training_rate, num_epochs: int
) -> None:
"""
Test for constant concurrency from one epoch to another
We expect training_rate #users to be training simultaneously
In this test, we expect these invariants to hold
1. Concurrency rate ~ training rate from epoch 1 to epoch n during convergence
2. Mean converging concurrency for all of the epochs ~ training rate
3. Async trainer will stop after each epoch to run the eval loop therefore
we will evaluate every num_users rounds
"""
torch.manual_seed(2)
global_model = DummyAlphabetFLModel()
num_examples = num_users
event_generator_config = AsyncTrainingEventGeneratorConfig(
training_start_time_distribution=ConstantAsyncTrainingStartTimeDistrConfig(
training_rate=training_rate
),
duration_distribution_generator=PerUserUniformDurationDistributionConfig(
training_duration_mean=1.0,
training_duration_min=0.01,
),
)
metrics_reporter = ConcurrencyMetricsReporter([Channel.STDOUT])
# training in parallel so not all clients can participate
async_trainer = create_async_trainer(
model=global_model,
local_lr=1.0,
epochs=num_epochs,
aggregator_config=FedAvgWithLRAsyncAggregatorConfig(lr=1.0),
event_generator_config=event_generator_config,
do_eval=True,
train_metrics_reported_per_epoch=num_users,
)
data_provider = get_fl_data_provider(
num_examples=num_examples,
num_fl_users=num_users,
examples_per_user=1,
batch_size=1,
model=global_model,
)
final_model, _ = async_trainer.train(
data_provider=data_provider,
metrics_reporter=metrics_reporter,
num_total_users=num_users,
)
# invariant 1, after check that concurrency eventually reaches
# steady state and is similar to training rate
steady_state_seqnum = int(0.9 * len(metrics_reporter.concurrency_metrics))
for c in metrics_reporter.concurrency_metrics[steady_state_seqnum:]:
assertAlmostEqual(c, training_rate, delta=1.0)
# invariant 2
assertAlmostEqual(
np.mean(metrics_reporter.concurrency_metrics[steady_state_seqnum:]),
training_rate,
delta=1.0,
)
# invariant 3
assertEqual(len(metrics_reporter.eval_rounds), num_epochs)
assertListEqual(
metrics_reporter.eval_rounds,
[(i * num_users) for i in range(1, num_epochs + 1)],
)
| canife-main | FLSim/flsim/trainers/tests/test_async_trainer.py |
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
| canife-main | FLSim/flsim/trainers/tests/__init__.py |
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import copy
from typing import Tuple
import numpy as np
import torch
from flsim.common.pytest_helper import assertEmpty, assertEqual
from flsim.data.data_provider import FLDataProviderFromList, IFLDataProvider
from flsim.data.data_sharder import RoundRobinSharder
from flsim.data.dataset_data_loader import FLDatasetDataLoaderWithBatch
from flsim.interfaces.model import IFLModel
from flsim.optimizers.async_aggregators import (
FedAvgWithLRAsyncAggregatorConfig,
FedAvgWithLRFedBuffAggregatorConfig,
)
from flsim.utils.async_trainer.async_example_weights import (
AsyncExampleWeightConfig,
EqualExampleWeightConfig,
ExampleWeight,
LinearExampleWeightConfig,
Log10ExampleWeightConfig,
SqrtExampleWeightConfig,
)
from flsim.utils.async_trainer.async_staleness_weights import (
ConstantStalenessWeightConfig,
PolynomialStalenessWeightConfig,
ThresholdStalenessWeightConfig,
)
from flsim.utils.async_trainer.training_event_generator import (
AsyncTrainingEventGeneratorFromListConfig,
EventTimingInfo,
)
from flsim.utils.fl.common import FLModelParamUtils
from flsim.utils.sample_model import DummyAlphabetFLModel
from flsim.utils.test_utils import verify_models_equivalent_after_training
from flsim.utils.tests.helpers.test_async_trainer_utils import (
async_train_one_user,
get_equal_split_data,
get_fl_data_provider,
run_fl_nonfl_training,
run_fl_training,
run_fl_training_with_event_generator,
)
from flsim.utils.tests.helpers.test_data_utils import DummyAlphabetDataset
from hydra.utils import instantiate
class TestAsyncTrainerWeights:
def _get_fl_local_lr_and_example_weights_config(
self,
example_weight_config: AsyncExampleWeightConfig,
num_examples_per_user: int,
normalize_wts: bool,
) -> Tuple[float, AsyncExampleWeightConfig]:
"""Compute the fl_local_learning_rate and example_weight_config
that will cause FL and Non-FL training to produce the same results.
Normalize weights: when this option is true, example weight is divided
by the average example weight
"""
return_config = copy.deepcopy(example_weight_config)
if normalize_wts:
# effectively equal to not having any example weight, since all users
# in the test have the same number of examples
return_config.avg_num_examples = num_examples_per_user
return (1.0, return_config)
else:
# create un-normalized example weight object
return_config.avg_num_examples = 1
example_weight_obj: ExampleWeight = instantiate(return_config)
example_weight = example_weight_obj.weight(num_examples_per_user)
fl_local_lr = 1.0 / example_weight
return (fl_local_lr, return_config)
def compare_num_examples_wt(
self,
base_learning_rate: float,
example_weight_config: AsyncExampleWeightConfig,
rel_epsilon: float,
normalize_wts: bool,
) -> None:
"""Run training for two tasks:
data1: 2 training examples, same for data2 and data3
example_weight_type: defines how number of examples affect weight
Example_weight_str is used to chose a function ExWt(x)
When example_weight_str = 'linear', ExWt(x) = x
When example_weight_str = 'sqrt', ExWt(x) = sqrt(x)
When example_weight_str = 'log10', ExWt(x) = log10(x)
Task1: Non-FL task, data = {data1, data2, data3}, LR=base_lr
Task2: FL async task, training_duration = 0,
LR=base_lr/ExWt(2), example_weight_type=LINEAR
data = {user1: data1, user2: data2, user3: data3}
Use user selector,
so we train in order user1-->user2-->user3
user_epochs_per_round=1
Train Task1, Task2. Show that you end up with the same model
"""
num_training_examples_per_user = 2
num_users = 3
num_examples = 6
torch.manual_seed(1)
global_model = DummyAlphabetFLModel()
# will be used later to verify training indeed took place
global_model_init_copy = FLModelParamUtils.clone(global_model)
nonfl_model = FLModelParamUtils.clone(global_model_init_copy)
(
fl_local_learning_rate,
example_weight_config,
) = self._get_fl_local_lr_and_example_weights_config(
example_weight_config=example_weight_config,
num_examples_per_user=num_training_examples_per_user,
normalize_wts=normalize_wts,
)
fl_aggregator_config = FedAvgWithLRAsyncAggregatorConfig(lr=base_learning_rate)
nonfl_optimizer = torch.optim.SGD(
nonfl_model.fl_get_module().parameters(), lr=base_learning_rate
)
num_epochs = 1
fl_data_provider, nonfl_data_loader = get_equal_split_data(
num_examples=num_examples,
num_fl_users=num_users,
fl_batch_size=num_training_examples_per_user,
model=global_model,
one_batch_per_user_only=False,
)
fl_trained_model, nonfl_trained_model = run_fl_nonfl_training(
fl_model=global_model,
nonfl_model=nonfl_model,
nonfl_optimizer=nonfl_optimizer,
fl_data_provider=fl_data_provider,
nonfl_data_loader=nonfl_data_loader,
epochs=num_epochs,
fl_local_lr=fl_local_learning_rate,
fl_aggregator_config=fl_aggregator_config,
training_rate=1,
training_duration_mean=0,
training_duration_sd=0,
example_weight_config=example_weight_config,
)
assertEqual(
verify_models_equivalent_after_training(
fl_trained_model,
nonfl_trained_model,
global_model_init_copy,
rel_epsilon=1e-4,
abs_epsilon=1e-6,
),
"",
)
def test_linear_example_wts_sgd(self) -> None:
for example_weight_config in (
EqualExampleWeightConfig(),
LinearExampleWeightConfig(),
SqrtExampleWeightConfig(),
Log10ExampleWeightConfig(),
):
for normalize_wts in [True, False]:
# choose a random learning rate between 0 and 1
learning_rate = np.random.random_sample()
# adding a printf to debug flaky test failure in Sandcastle
print(f"Learning rate: {learning_rate}")
self.compare_num_examples_wt(
base_learning_rate=learning_rate,
rel_epsilon=1e-6,
example_weight_config=example_weight_config,
normalize_wts=normalize_wts,
)
# TODO: add test_linear_example_wts_adam once we know how to scale
# momentum and adaptive learning rate appropriately with weights
def _get_fl_data_round_robin_sharding(
self, num_examples: int, num_fl_users: int, fl_batch_size: int, model: IFLModel
) -> IFLDataProvider:
dummy_dataset = DummyAlphabetDataset(num_examples)
fl_data_sharder = RoundRobinSharder(num_shards=num_fl_users)
fl_data_loader = FLDatasetDataLoaderWithBatch(
dummy_dataset,
dummy_dataset,
dummy_dataset,
fl_data_sharder,
fl_batch_size,
fl_batch_size,
fl_batch_size,
)
fl_data_provider = FLDataProviderFromList(
fl_data_loader.fl_train_set(),
fl_data_loader.fl_eval_set(),
fl_data_loader.fl_test_set(),
model,
)
assert fl_data_loader.num_total_users == num_fl_users, "Error in data sharding"
return fl_data_provider
def test_default_example_weights(self) -> None:
"""Create an FL Async task with default values for example weight (equal example weight)
Note: In async, weight of a user update = example_weight * staleness_weight
Equal example weight => example_weight = 1.0 irrespective of #examples
Verify that weight doesn't depend on number of examples. Train 2 FL tasks:
Task1: {user1:data1}, {user2:data2}
Task2: {user1:data1,data1}, {user2:data2,data2} (each user gets duplicate copy of data)
Both tasks are trained in "sequential" order
Task1 and Task2 produce the same model
"""
num_epochs = 1
num_users = 2
torch.manual_seed(1)
# create dummy FL model on alphabet
trained_models = []
initial_model = DummyAlphabetFLModel()
local_lr = np.random.random_sample() * 10
global_lr = np.random.random_sample() * 10
dataset_size = 26 # number of examples in DummyAlphabetDataset
for replication_factor in [1, 2]:
num_examples = replication_factor * dataset_size # either 26 or 52
fl_model = FLModelParamUtils.clone(initial_model)
fl_data_provider = self._get_fl_data_round_robin_sharding(
num_examples=num_examples,
num_fl_users=num_users,
fl_batch_size=dataset_size,
model=fl_model,
)
fl_trained_model, _ = run_fl_training(
fl_model=fl_model,
fl_data_provider=fl_data_provider,
epochs=num_epochs,
local_lr=local_lr,
aggregator_config=FedAvgWithLRAsyncAggregatorConfig(lr=global_lr),
training_rate=1,
training_duration_mean=0,
training_duration_sd=0,
)
trained_models.append(fl_trained_model)
assertEqual(len(trained_models), 2, "Expected to train two models")
assertEqual(
verify_models_equivalent_after_training(
trained_models[0],
trained_models[1],
initial_model,
rel_epsilon=1e-4,
abs_epsilon=1e-6,
),
"",
)
def test_default_staleness_weights(self) -> None:
r"""Create an FL Async task with default values for staleness weight (no staleness weight)
Verify that weight doesn't depend on staleness. Train on two users, where training events
are as follows. Assume initial model=M
T0: UserA starts training. Gets initial model M.
T1: UserB starts training. Gets initial model M.
T2: UserA finishes training. Local training: M->M_A. Global model updated to M_A
T3: User B finishes training. Local training: M->M_B.
Global model update: M_A->M_Final
M_Final = M_A + (M_B - M)*staleness. **Since staleness=1**
M_Final = M_A + M_B - M
"""
num_epochs = 1
torch.manual_seed(1)
# create dummy FL model on alphabet
initial_model = DummyAlphabetFLModel()
local_lr = np.random.random_sample() * 10
global_lr = 1.0
fl_model = FLModelParamUtils.clone(initial_model)
num_examples = 26
num_fl_users = 2
fl_data_provider, nonfl_data_loader = get_equal_split_data(
num_examples=num_examples,
num_fl_users=num_fl_users,
fl_batch_size=num_examples // num_fl_users, # each user has exactly 1 batch
model=initial_model,
one_batch_per_user_only=False,
)
fl_trained_model, _ = run_fl_training(
fl_model=fl_model,
fl_data_provider=fl_data_provider,
epochs=num_epochs,
local_lr=local_lr,
aggregator_config=FedAvgWithLRAsyncAggregatorConfig(lr=global_lr),
training_rate=1,
training_duration_mean=100,
training_duration_sd=0,
)
# Simulate FL training: train on user1, update global model. Then train
# on user 2, update global model.
data_loader_iter = iter(nonfl_data_loader)
user1_batch = next(data_loader_iter)
user2_batch = next(data_loader_iter)
simulated_global_model = FLModelParamUtils.clone(initial_model)
# train user1
simulated_global_model = async_train_one_user(
global_model_at_training_start=simulated_global_model,
global_model_at_training_end=simulated_global_model,
batches=[user1_batch],
local_lr=local_lr,
)
# train user2
# key difference from training user1: global_model_at_training_start
# is different from global_model_at_training_end
simulated_global_model = async_train_one_user(
global_model_at_training_start=initial_model,
global_model_at_training_end=simulated_global_model,
batches=[user2_batch],
local_lr=local_lr,
)
error_msg = verify_models_equivalent_after_training(
fl_trained_model,
simulated_global_model,
initial_model,
rel_epsilon=1e-4,
abs_epsilon=1e-6,
)
assertEqual(error_msg, "")
def test_threshold_staleness_weights(self) -> None:
r"""
Run training for two tasks:
Task1: FL async task, training_duration = 100, training_rate=1
data = {user1: data1, user2: data2, user3: data3}
staleness_weight = {threshold. if staleness>0, weight=0}
timeline is:
t=0, user 1 starts training
t=1, user 2 strats training
t=3, user 3 starts training
t=100, user 1 finishes training. staleness=0. staleness_wt=1.0
t=101, user 2 finishes training. staleness=1. staleness_wt=0
t=102, user 3 finishes training. staleness=2. stalenes_wt=0
Because users 2 and 3 have non-zero staleness, their model updates
get no weight at all, so it's as if they didn't even exist
Task2: {user1: data1}
Train Task1, Task2. Show that we end up with the same model
"""
num_epochs = 1
num_users = 3
num_examples_per_user = 8
# training config when 3 users, with staleness
staleness_training_rate = 1
staleness_training_duration_mean = 100
staleness_cutoff = 0
staleness_wt_after_cutoff = 0.0
# training config that forces sequential training
sequential_training_rate = 1
sequential_training_duration_mean = 0
local_lr = 1.0
aggregator_config = FedAvgWithLRAsyncAggregatorConfig(lr=1.0)
# set seed to 1 before model init
torch.manual_seed(1)
initial_model = DummyAlphabetFLModel()
fl_data_provider_with_staleness, _ = get_equal_split_data(
num_examples=num_users * num_examples_per_user,
num_fl_users=num_users,
fl_batch_size=num_examples_per_user,
model=initial_model,
one_batch_per_user_only=False,
)
fl_trained_model_with_staleness, _ = run_fl_training(
fl_model=FLModelParamUtils.clone(initial_model),
fl_data_provider=fl_data_provider_with_staleness,
epochs=num_epochs,
local_lr=local_lr,
aggregator_config=aggregator_config,
training_rate=staleness_training_rate,
training_duration_mean=staleness_training_duration_mean,
training_duration_sd=0,
staleness_weight_config=ThresholdStalenessWeightConfig(
avg_staleness=0,
cutoff=staleness_cutoff,
value_after_cutoff=staleness_wt_after_cutoff,
),
)
fl_data_provider_sequential, _ = get_equal_split_data(
num_examples=num_examples_per_user,
num_fl_users=1,
fl_batch_size=num_examples_per_user,
model=initial_model,
one_batch_per_user_only=False,
)
fl_trained_model_sequential, _ = run_fl_training(
fl_model=FLModelParamUtils.clone(initial_model),
fl_data_provider=fl_data_provider_sequential,
epochs=num_epochs,
local_lr=local_lr,
aggregator_config=aggregator_config,
training_rate=sequential_training_rate,
training_duration_mean=sequential_training_duration_mean,
training_duration_sd=0,
staleness_weight_config=ConstantStalenessWeightConfig(),
)
assertEqual(
verify_models_equivalent_after_training(
fl_trained_model_with_staleness,
fl_trained_model_sequential,
initial_model,
rel_epsilon=1e-4,
abs_epsilon=1e-6,
),
"",
)
def test_polynomial_staleness_weights(self) -> None:
r"""
Integration test for polynomial staleness weight
Train task where polynomial staleness weight cancels out example_weight,
resulting in total_wt=1.0
training_data: {
data1 = 26 examples
user1:{data1, data1},
user2:{data1,}
Task1: FL async task, training_duration = 100, training_rate=1
staleness_weight = {
polynomial, exponent=1.0 => wt = 1/(staleness+1)
staleness=0, wt=1.
staleness=1, wt=1/2,
}
example_weight = linear, avg_num_examples=26
timeline is:
t=1, user 1 starts training
t=2, user 2 starts training
t=3, user 2 finishes training
staleness=0. staleness_wt=1.0, example_wt=26/26, total_wt=1
t=101, user 1 finishes training.
staleness=1. staleness_wt=0.5, example_wt=52/26, total_wt=1
Task2: FL async task, training_duration = 100, training_rate=1
staleness_weight = constant
example_weight = constant
all users get total_weight = 1.0
Train Task1, Task2. Show that we end up with the same model
"""
# common configs
num_epochs = 1
# set seed to 1 before model init
torch.manual_seed(1)
# train fl fask
local_lr = 1.0
aggregator_config = FedAvgWithLRAsyncAggregatorConfig(lr=1.0)
num_users = 2
# user1 has dataset_size examples, user2 has 2*dataset_size examples
dataset_size = 26
max_num_examples_per_user = dataset_size * num_users
total_num_examples = dataset_size * 3
# create config such that training events happen in the following order
# t=1, user 1 starts training
# t=2, user 2 starts training
# t=3, user 2 finishes training
# t=101, user 1 finishes training.
user_1_start_time_delta = 1
user_2_start_time_delta = 1
user_1_training_duration = 100
user_2_training_duration = 1
user1_training_events = EventTimingInfo(
prev_event_start_to_current_start=user_1_start_time_delta,
duration=user_1_training_duration,
)
user2_training_events = EventTimingInfo(
prev_event_start_to_current_start=user_2_start_time_delta,
duration=user_2_training_duration,
)
event_generator_config = AsyncTrainingEventGeneratorFromListConfig(
training_events=[user1_training_events, user2_training_events]
)
# set seed to 1 before model init
torch.manual_seed(1)
initial_model = DummyAlphabetFLModel()
# split data such that
# user1: {a,b,c...z,a,b,c...z}, user2:{a,b,c...z}
data_provider = get_fl_data_provider(
num_examples=total_num_examples,
num_fl_users=2,
examples_per_user=max_num_examples_per_user,
batch_size=max_num_examples_per_user,
model=initial_model,
)
poly_staleness_wt_model = run_fl_training_with_event_generator(
fl_model=FLModelParamUtils.clone(initial_model),
fl_data_provider=data_provider,
epochs=num_epochs,
local_lr=local_lr,
aggregator_config=aggregator_config,
training_event_generator_config=event_generator_config,
staleness_weight_config=PolynomialStalenessWeightConfig(
avg_staleness=0, exponent=1.0
),
example_weight_config=LinearExampleWeightConfig(
avg_num_examples=dataset_size
),
)
equal_staleness_wt_model = run_fl_training_with_event_generator(
fl_model=FLModelParamUtils.clone(initial_model),
fl_data_provider=data_provider,
epochs=num_epochs,
local_lr=local_lr,
aggregator_config=aggregator_config,
training_event_generator_config=event_generator_config,
staleness_weight_config=ConstantStalenessWeightConfig(avg_staleness=0),
example_weight_config=LinearExampleWeightConfig(
avg_num_examples=dataset_size
),
)
assertEqual(
verify_models_equivalent_after_training(
poly_staleness_wt_model,
equal_staleness_wt_model,
initial_model,
rel_epsilon=1e-4,
abs_epsilon=1e-6,
),
"",
)
def test_example_wts_and_staleness_wts_lr_scale(self) -> None:
"""
Test for same number of examples and a fixed global learning rate.
Async with no example weight should be the same as linear example weight where
ex_local_lr= 1, ex_global_lr = 1
no_ex_local_lr = ex_local_lr, no_ex_global_lr = ex_global_lr / max_num_examples
"""
total_num_examples = 100
num_users = 10
examples_per_user = total_num_examples // num_users
batch_size = 3
init_model = DummyAlphabetFLModel()
data_provider = get_fl_data_provider(
num_examples=total_num_examples,
num_fl_users=num_users,
examples_per_user=examples_per_user,
batch_size=batch_size,
model=init_model,
)
local_lr = 1.0
epochs = 5
training_mean = 1
training_std = 1
buffer_size = 5
example_wt_off_model = FLModelParamUtils.clone(init_model)
example_wt_off_global_lr = 1.0
example_wt_off_model, _ = run_fl_training(
fl_model=example_wt_off_model,
fl_data_provider=data_provider,
epochs=epochs,
local_lr=local_lr,
aggregator_config=FedAvgWithLRFedBuffAggregatorConfig(
lr=example_wt_off_global_lr, buffer_size=buffer_size
),
training_rate=1,
training_duration_mean=training_mean,
training_duration_sd=training_std,
staleness_weight_config=PolynomialStalenessWeightConfig(
avg_staleness=0, exponent=0.5
),
example_weight_config=EqualExampleWeightConfig(avg_num_examples=1),
)
example_wt_on_model = FLModelParamUtils.clone(init_model)
example_wt_on_global_lr = example_wt_off_global_lr / examples_per_user
example_wt_on_model, _ = run_fl_training(
fl_model=example_wt_on_model,
fl_data_provider=data_provider,
epochs=epochs,
local_lr=local_lr,
aggregator_config=FedAvgWithLRFedBuffAggregatorConfig(
lr=example_wt_on_global_lr, buffer_size=buffer_size
),
training_rate=1,
training_duration_mean=training_mean,
training_duration_sd=training_std,
staleness_weight_config=PolynomialStalenessWeightConfig(
avg_staleness=0, exponent=0.5
),
example_weight_config=LinearExampleWeightConfig(avg_num_examples=1),
)
error_msg = verify_models_equivalent_after_training(
example_wt_on_model,
example_wt_off_model,
init_model,
rel_epsilon=1e-4,
abs_epsilon=1e-6,
)
assertEmpty(error_msg, msg=error_msg)
| canife-main | FLSim/flsim/trainers/tests/test_async_trainer_weights.py |
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import copy
import json
import math
from typing import List
import flsim.configs # noqa
import pkg_resources
import pytest
import torch
from flsim.active_user_selectors.simple_user_selector import (
SequentialActiveUserSelectorConfig,
)
from flsim.clients.base_client import ClientConfig
from flsim.common.pytest_helper import (
assertEmpty,
assertEqual,
assertIsInstance,
assertTrue,
)
from flsim.common.timeout_simulator import GaussianTimeOutSimulatorConfig
from flsim.data.data_provider import FLDataProviderFromList
from flsim.data.data_sharder import SequentialSharder
from flsim.data.dataset_data_loader import FLDatasetDataLoaderWithBatch
from flsim.interfaces.metrics_reporter import TrainingStage
from flsim.optimizers.async_aggregators import FedAdamAsyncAggregatorConfig
from flsim.optimizers.local_optimizers import (
LocalOptimizerFedProxConfig,
LocalOptimizerSGDConfig,
)
from flsim.optimizers.optimizer_scheduler import ArmijoLineSearchSchedulerConfig
from flsim.optimizers.server_optimizers import (
FedAdamOptimizerConfig,
FedAvgWithLROptimizerConfig,
)
from flsim.servers.sync_servers import SyncServerConfig
from flsim.trainers.async_trainer import AsyncTrainer, AsyncTrainerConfig
from flsim.trainers.sync_trainer import SyncTrainer, SyncTrainerConfig
from flsim.utils.config_utils import fl_config_from_json
from flsim.utils.fl.common import FLModelParamUtils
from flsim.utils.sample_model import DummyAlphabetFLModel
from flsim.utils.test_utils import (
FakeMetricReporter,
MetricsReporterWithMockedChannels,
MockRecord,
SimpleMetricReporter,
verify_models_equivalent_after_training,
)
from flsim.utils.tests.helpers.test_data_utils import DummyAlphabetDataset
from flsim.utils.tests.helpers.test_sync_trainer_utils import create_sync_trainer
from flsim.utils.tests.helpers.test_utils import FLTestUtils
from flsim.utils.timing.training_duration_distribution import (
PerExampleGaussianDurationDistributionConfig,
)
from hydra.experimental import compose, initialize
from hydra.utils import instantiate
from omegaconf import OmegaConf
CONFIG_PATH = "test_resources"
SYNC_TRAINER_JSON = f"{CONFIG_PATH}/sync_trainer.json"
ASYNC_TRAINER_JSON = f"{CONFIG_PATH}/async_trainer.json"
SYNC_TRAINER_WITH_DP_JSON = f"{CONFIG_PATH}/sync_trainer_with_dp.json"
SYNC_TRAINER_WRONG_JSON = f"{CONFIG_PATH}/sync_trainer_wrong_dp_config.json"
SYNC_TRAINER_WITH_SECAGG_JSON = f"{CONFIG_PATH}/sync_trainer_with_secagg.json"
SYNC_TRAINER_YAML = "sync_trainer"
ASYNC_TRAINER_YAML = "async_trainer"
SYNC_TRAINER_WITH_DP_YAML = "sync_trainer_with_dp"
SYNC_TRAINER_WRONG_YAML = "sync_trainer_wrong_dp_config"
SYNC_TRAINER_WITH_SECAGG_YAML = "sync_trainer_with_secagg"
class TestTrainer:
@pytest.mark.parametrize(
"json_file_name,trainer_class ",
[
(SYNC_TRAINER_JSON, SyncTrainer),
(ASYNC_TRAINER_JSON, AsyncTrainer),
(SYNC_TRAINER_WITH_DP_JSON, SyncTrainer),
(SYNC_TRAINER_WITH_SECAGG_JSON, SyncTrainer),
],
)
def test_trainer_creation_from_json_config(
self, json_file_name: str, trainer_class: type
) -> None:
trainer = None
file_path = pkg_resources.resource_filename(__name__, json_file_name)
with open(file_path, "r") as parameters_file:
json_cfg = json.load(parameters_file)
cfg = fl_config_from_json(json_cfg)
trainer = instantiate(
cfg.trainer,
model=DummyAlphabetFLModel(),
cuda_enabled=False,
)
assertIsInstance(trainer, trainer_class)
def test_trainer_sync_server_creation_from_json_config(self) -> None:
file_path = pkg_resources.resource_filename(__name__, SYNC_TRAINER_JSON)
with open(file_path, "r") as parameters_file:
json_cfg = json.load(parameters_file)
cfg = fl_config_from_json(json_cfg)
trainer = instantiate(
cfg.trainer,
model=DummyAlphabetFLModel(),
cuda_enabled=False,
)
assertIsInstance(trainer.server._optimizer, torch.optim.Adam)
@pytest.mark.parametrize(
"yaml_file_name,trainer_class ",
[
(SYNC_TRAINER_YAML, SyncTrainer),
(ASYNC_TRAINER_YAML, AsyncTrainer),
(SYNC_TRAINER_WITH_DP_YAML, SyncTrainer),
(SYNC_TRAINER_WITH_SECAGG_YAML, SyncTrainer),
],
)
def test_trainer_creation_from_yaml_config(
self, yaml_file_name: str, trainer_class: type
) -> None:
trainer = None
with initialize(config_path=CONFIG_PATH):
cfg = compose(config_name=yaml_file_name)
trainer = instantiate(
cfg.trainer,
model=DummyAlphabetFLModel(),
cuda_enabled=False,
)
assertIsInstance(trainer, trainer_class)
def test_async_trainer_with_dp_creation_from_json_config(self) -> None:
trainer = None
file_path = pkg_resources.resource_filename(__name__, ASYNC_TRAINER_JSON)
with open(file_path, "r") as parameters_file:
json_cfg = json.load(parameters_file)
cfg = fl_config_from_json(json_cfg)
trainer = instantiate(
cfg.trainer,
model=DummyAlphabetFLModel(),
cuda_enabled=False,
)
assertIsInstance(trainer, AsyncTrainer)
assertTrue(trainer.aggregator.is_private)
def test_async_trainer_with_dp_creation_from_yaml_config(self) -> None:
trainer = None
with initialize(config_path=CONFIG_PATH):
cfg = compose(config_name=ASYNC_TRAINER_YAML)
trainer = instantiate(
cfg.trainer,
model=DummyAlphabetFLModel(),
cuda_enabled=False,
)
assertIsInstance(trainer, AsyncTrainer)
assertTrue(trainer.aggregator.is_private)
def test_global_model_unchanged_after_metrics_reporting(self) -> None:
"""
reporting metrics after aggregation should NOT update the global model
"""
# mock 26 rows
shard_size = 4
local_batch_size = 4
world_size = 1
dummy_dataset = DummyAlphabetDataset()
fl_data_sharder = SequentialSharder(examples_per_shard=shard_size)
data_loader = FLDatasetDataLoaderWithBatch(
dummy_dataset,
dummy_dataset,
dummy_dataset,
fl_data_sharder,
local_batch_size,
local_batch_size,
local_batch_size,
)
global_model = DummyAlphabetFLModel()
local_optimizer_lr = 0.1
metrics_reporter = FakeMetricReporter()
users_per_round = 2
sync_trainer_no_report = create_sync_trainer(
model=global_model,
local_lr=local_optimizer_lr,
users_per_round=users_per_round,
epochs=3,
user_epochs_per_round=2,
)
sync_trainer_report = create_sync_trainer(
model=FLModelParamUtils.clone(global_model),
local_lr=local_optimizer_lr,
users_per_round=users_per_round,
epochs=3,
user_epochs_per_round=2,
report_train_metrics=True,
report_train_metrics_after_aggregation=True,
)
data_provider = FLDataProviderFromList(
data_loader.fl_train_set(),
data_loader.fl_eval_set(),
data_loader.fl_test_set(),
global_model,
)
# training with reporting the train metrics after aggregation
modules = []
for trainer in [sync_trainer_report, sync_trainer_no_report]:
model, _ = trainer.train(
data_provider,
metrics_reporter,
num_total_users=data_provider.num_train_users(),
distributed_world_size=world_size,
)
modules.append(model.fl_get_module())
# make sure metrics reporting after aggregation does not change global model
assertEqual(FLModelParamUtils.get_mismatched_param(modules), "")
def test_evaluate_global_model_after_aggregation(self):
(
data_provider,
_,
) = DummyAlphabetDataset.create_data_provider_and_loader_train_and_eval_users(
num_train_examples=10,
num_eval_examples=20,
examples_per_user=1,
train_batch_size=1,
eval_batch_size=1,
)
global_model = DummyAlphabetFLModel()
metrics_reporter = MetricsReporterWithMockedChannels()
trainer = create_sync_trainer(
model=FLModelParamUtils.clone(global_model),
local_lr=1.0,
users_per_round=1,
epochs=3,
user_epochs_per_round=1,
report_train_metrics=True,
report_train_metrics_after_aggregation=True,
train_metrics_reported_per_epoch=10,
)
trainer.train(
data_provider,
metrics_reporter,
num_total_users=data_provider.num_train_users(),
distributed_world_size=1,
)
agg_metrics = [
m[0] for m in metrics_reporter.stdout_results if "Aggregation" in m[0]
]
eval_metrics = [m[0] for m in metrics_reporter.stdout_results if "Eval" in m[0]]
assertEqual(len(agg_metrics), 30)
assertEqual(len(eval_metrics), 3)
def test_client_optimizer_creation_from_config(self) -> None:
"""
Test if trainer can instantiate the correct client optimizer from config
"""
for optimizer_config, trainer_type in zip(
[LocalOptimizerSGDConfig(lr=1.0), LocalOptimizerFedProxConfig(mu=1.0)],
[SyncTrainer, AsyncTrainer],
):
config = (
SyncTrainerConfig(client=ClientConfig(optimizer=optimizer_config))
if trainer_type == SyncTrainer
else AsyncTrainerConfig(client=ClientConfig(optimizer=optimizer_config))
)
trainer = instantiate(
config, model=DummyAlphabetFLModel(), cuda_enabled=False
)
assertTrue(isinstance(trainer, trainer_type))
@pytest.mark.parametrize(
"config,trainer_type ",
[
(
SyncServerConfig(
server_optimizer=FedAvgWithLROptimizerConfig(lr=1.0, momentum=0.0)
),
SyncTrainer,
),
(FedAdamAsyncAggregatorConfig(beta1=0.1), AsyncTrainer),
],
)
def test_server_optimizer_creation_from_config(self, config, trainer_type) -> None:
"""
Test if trainer can instantiate correct aggregator config
"""
config = (
SyncTrainerConfig(server=config)
if trainer_type == SyncTrainer
else AsyncTrainerConfig(aggregator=config)
)
trainer = instantiate(config, model=DummyAlphabetFLModel(), cuda_enabled=False)
assertTrue(isinstance(trainer, trainer_type))
def test_same_training_results_with_post_aggregation_reporting(self) -> None:
"""
create two training instances,
one with report_train_metrics_after_aggregation=True, another with False,
check training results match
"""
torch.manual_seed(1)
# mock 26 rows
shard_size = 4
local_batch_size = 4
world_size = 1
dummy_dataset = DummyAlphabetDataset()
fl_data_sharder = SequentialSharder(examples_per_shard=shard_size)
data_loader = FLDatasetDataLoaderWithBatch(
dummy_dataset,
dummy_dataset,
dummy_dataset,
fl_data_sharder,
local_batch_size,
local_batch_size,
local_batch_size,
)
users_per_round = 2
local_optimizer_lr = 0.1
torch.manual_seed(1)
# first training instance
global_model_1 = DummyAlphabetFLModel()
metrics_reporter_1 = FakeMetricReporter()
sync_trainer_1 = create_sync_trainer(
model=global_model_1,
local_lr=local_optimizer_lr,
users_per_round=users_per_round,
epochs=3,
user_epochs_per_round=2,
)
data_provider = FLDataProviderFromList(
data_loader.fl_train_set(),
data_loader.fl_eval_set(),
data_loader.fl_test_set(),
global_model_1,
)
assertEqual(data_provider.num_train_users(), data_loader.num_total_users)
# training with reporting the train metrics after aggregation
sync_trainer_1.cfg.report_train_metrics = True
sync_trainer_1.cfg.report_train_metrics_after_aggregation = True
global_model_1, best_metric_1 = sync_trainer_1.train(
data_provider,
metrics_reporter_1,
num_total_users=data_provider.num_train_users(),
distributed_world_size=world_size,
)
torch.manual_seed(1)
# second training instance
global_model_2 = DummyAlphabetFLModel()
metrics_reporter_2 = FakeMetricReporter()
sync_trainer_2 = create_sync_trainer(
model=global_model_2,
users_per_round=users_per_round,
local_lr=local_optimizer_lr,
epochs=3,
user_epochs_per_round=2,
)
# training without reporting the train metrics after aggregation
sync_trainer_2.cfg.report_train_metrics = True
sync_trainer_2.cfg.report_train_metrics_after_aggregation = False
global_model_2, best_metric_2 = sync_trainer_2.train(
data_provider,
metrics_reporter_2,
num_total_users=data_provider.num_train_users(),
distributed_world_size=world_size,
)
# check two training instance produce same results
assertEqual(
FLModelParamUtils.get_mismatched_param(
[global_model_1.fl_get_module(), global_model_2.fl_get_module()]
),
"",
)
def test_different_metrics_with_aggregation_client_reporting(self) -> None:
"""
create two training instances,
one with use_train_clients_for_aggregation_metrics=True, another with False,
check that the aggregation eval metrics are different
"""
torch.manual_seed(1)
# mock 26 rows
shard_size = 4
local_batch_size = 4
world_size = 1
dummy_dataset = DummyAlphabetDataset()
fl_data_sharder = SequentialSharder(examples_per_shard=shard_size)
data_loader = FLDatasetDataLoaderWithBatch(
dummy_dataset,
dummy_dataset,
dummy_dataset,
fl_data_sharder,
local_batch_size,
local_batch_size,
local_batch_size,
)
users_per_round = 2
local_optimizer_lr = 0.1
torch.manual_seed(1)
# first training instance
global_model_1 = DummyAlphabetFLModel()
metrics_reporter_1 = SimpleMetricReporter()
sync_trainer_1 = create_sync_trainer(
model=global_model_1,
local_lr=local_optimizer_lr,
users_per_round=users_per_round,
epochs=3,
user_epochs_per_round=2,
)
data_provider = FLDataProviderFromList(
data_loader.fl_train_set(),
data_loader.fl_eval_set(),
data_loader.fl_test_set(),
global_model_1,
)
assertEqual(data_provider.num_train_users(), data_loader.num_total_users)
# training with using training clients for aggregation training metrics
sync_trainer_1.cfg.report_train_metrics_after_aggregation = True
sync_trainer_1.cfg.use_train_clients_for_aggregation_metrics = True
sync_trainer_1.cfg.report_train_metrics = True
global_model_1, best_metric_1 = sync_trainer_1.train(
data_provider,
metrics_reporter_1,
num_total_users=data_provider.num_train_users(),
distributed_world_size=world_size,
)
torch.manual_seed(1)
# second training instance
global_model_2 = DummyAlphabetFLModel()
metrics_reporter_2 = SimpleMetricReporter()
sync_trainer_2 = create_sync_trainer(
model=global_model_2,
users_per_round=users_per_round,
local_lr=local_optimizer_lr,
epochs=3,
user_epochs_per_round=2,
)
# training with using training clients for aggregation training metrics
sync_trainer_2.cfg.report_train_metrics_after_aggregation = True
sync_trainer_2.cfg.use_train_clients_for_aggregation_metrics = False
sync_trainer_2.cfg.report_train_metrics = True
global_model_2, best_metric_2 = sync_trainer_2.train(
data_provider,
metrics_reporter_2,
num_total_users=data_provider.num_train_users(),
distributed_world_size=world_size,
)
# Check that the reported metrics are different
for batch_metrics_1, batch_metrics_2 in zip(
metrics_reporter_1.batch_metrics, metrics_reporter_2.batch_metrics
):
if (
batch_metrics_1.loss != batch_metrics_2.loss
or batch_metrics_1.num_examples != batch_metrics_2.num_examples
):
return
assert True, "Batch metrics same whether using training or random clients"
def test_one_user_sequential_user_equivalent(self) -> None:
"""
test equivalence of the following scenario,
1. one user who got all the example, and BS = all examples,
local optimizer LR = 1.0
2. number of users = number of examples, and one example per user,
users per round = all users, use FedAvg
"""
torch.manual_seed(1)
# create dummy FL model on alphabet
global_model = DummyAlphabetFLModel()
global_model_init = FLModelParamUtils.clone(global_model)
# will be used later to verify training indeed took place
global_model_init_copy = FLModelParamUtils.clone(global_model)
metrics_reporter = FakeMetricReporter()
num_training_examples = 32
# one user, who got 32 examples
shard_size = num_training_examples
local_batch_size = num_training_examples
world_size = 1
dummy_dataset = DummyAlphabetDataset(num_rows=num_training_examples)
(
data_provider,
data_loader,
) = DummyAlphabetDataset.create_data_provider_and_loader(
dummy_dataset, shard_size, local_batch_size, global_model
)
assertEqual(
data_loader.num_total_users, math.ceil(num_training_examples / shard_size)
)
users_per_round = 1
local_optimizer_lr = 1.0
epochs = 5
torch.manual_seed(1)
sync_trainer = create_sync_trainer(
model=global_model,
local_lr=local_optimizer_lr,
users_per_round=users_per_round,
epochs=epochs,
)
one_user_global_model, _eval_metric_one_user = sync_trainer.train(
data_provider,
metrics_reporter,
num_total_users=data_provider.num_train_users(),
distributed_world_size=world_size,
)
metrics_reporter.reset()
# 32 users, one example each
shard_size = 1
local_batch_size = 1
world_size = 1
dummy_dataset = DummyAlphabetDataset(num_rows=num_training_examples)
(
data_provider,
data_loader,
) = DummyAlphabetDataset.create_data_provider_and_loader(
dummy_dataset, shard_size, local_batch_size, global_model
)
assertEqual(
data_loader.num_total_users, math.ceil(num_training_examples / shard_size)
)
assertEqual(data_provider.num_train_users(), data_loader.num_total_users)
# select all users to train in each round
users_per_round = data_provider.num_train_users()
torch.manual_seed(1)
sync_trainer = create_sync_trainer(
model=global_model_init,
local_lr=local_optimizer_lr,
users_per_round=users_per_round,
epochs=epochs,
)
all_users_global_model, _eval_metric_all_user = sync_trainer.train(
data_provider,
metrics_reporter,
num_total_users=data_provider.num_train_users(),
distributed_world_size=world_size,
)
assertEqual(
verify_models_equivalent_after_training(
one_user_global_model,
all_users_global_model,
global_model_init_copy,
rel_epsilon=1e-4,
abs_epsilon=1e-6,
),
"",
)
def test_training_with_armijo_line_search(self) -> None:
"""
test Armijo line-search for local LR scheduling
using shrinking factor = 1.0 == constant LR
"""
torch.manual_seed(1)
# create dummy FL model on alphabet
global_model = DummyAlphabetFLModel()
global_model_init = FLModelParamUtils.clone(global_model)
# will be used later to verify training indeed took place
global_model_init_copy = FLModelParamUtils.clone(global_model)
metrics_reporter = FakeMetricReporter()
# one user, who got 26 examples
shard_size = 26
local_batch_size = 26
world_size = 1
dummy_dataset = DummyAlphabetDataset()
(
data_provider,
data_loader,
) = DummyAlphabetDataset.create_data_provider_and_loader(
dummy_dataset, shard_size, local_batch_size, global_model
)
assertEqual(data_loader.num_total_users, math.ceil(26 / shard_size))
assertEqual(data_provider.num_train_users(), data_loader.num_total_users)
users_per_round = 1
local_optimizer_lr = 1.0
epochs = 5
torch.manual_seed(1)
sync_trainer = create_sync_trainer(
model=global_model,
local_lr=local_optimizer_lr,
users_per_round=users_per_round,
epochs=epochs,
)
constant_lr_model, _ = sync_trainer.train(
data_provider,
metrics_reporter,
num_total_users=data_provider.num_train_users(),
distributed_world_size=world_size,
)
metrics_reporter.reset()
torch.manual_seed(1)
# training with Armijo line-search LR scheduler with
# shrinking factor = 1.0 (no shrinking)
local_lr_scheduler_config = ArmijoLineSearchSchedulerConfig(
shrinking_factor=1.0
)
sync_trainer_with_scheduler = create_sync_trainer(
model=global_model_init,
local_lr=local_optimizer_lr,
users_per_round=users_per_round,
epochs=epochs,
local_lr_scheduler=local_lr_scheduler_config,
)
armijo_ls_model, _ = sync_trainer_with_scheduler.train(
data_provider,
metrics_reporter,
num_total_users=data_provider.num_train_users(),
distributed_world_size=world_size,
)
metrics_reporter.reset()
assertEqual(
verify_models_equivalent_after_training(
constant_lr_model,
armijo_ls_model,
global_model_init_copy,
rel_epsilon=1e-6,
abs_epsilon=1e-6,
),
"",
)
def _test_fl_nonfl_equivalent(
self,
num_examples: int,
num_fl_users: int,
epochs: int,
local_lr: float,
server_config: SyncServerConfig,
) -> None:
"""
Given:
data_for_fl={user1:batch1, user2:batch2}
data_for_non_fl={batch1, batch2}
Check that the following produce the same trained model:
1. FL training, 1 user per round. global_opt=SGD, local_lr=x, global_lr=x
2. Non-FL training, opt=SGD, lr=x
"""
torch.manual_seed(1)
# create dummy FL model on alphabet
global_model = DummyAlphabetFLModel()
# will be used later to verify training indeed took place
global_model_init_copy = FLModelParamUtils.clone(global_model)
# num_fl_users users, each with num_examples/num_fl_users training examples
assertTrue(
num_examples % num_fl_users == 0,
f"Expect num_examples({num_examples}) to be multiple of num_fl_users({num_fl_users})",
)
shard_size = num_examples // num_fl_users
batch_size = num_examples // num_fl_users
world_size = 1
dummy_dataset = DummyAlphabetDataset(num_examples)
(
data_provider,
data_loader,
) = DummyAlphabetDataset.create_data_provider_and_loader(
dummy_dataset, shard_size, batch_size, global_model
)
assertEqual(data_loader.num_total_users, math.ceil(num_examples / shard_size))
assertEqual(data_provider.num_train_users(), data_loader.num_total_users)
torch.manual_seed(1)
sync_trainer = create_sync_trainer(
model=global_model,
local_lr=local_lr,
users_per_round=1,
epochs=epochs,
do_eval=False,
server_config=server_config,
)
fl_model, _ = sync_trainer.train(
data_provider,
metrics_reporter=FakeMetricReporter(),
num_total_users=data_provider.num_train_users(),
distributed_world_size=world_size,
)
# non-FL training
dummy_dataset = DummyAlphabetDataset(num_examples)
data_loader = torch.utils.data.DataLoader(
dummy_dataset, batch_size=batch_size, shuffle=False
)
nonfl_model = FLModelParamUtils.clone(global_model_init_copy)
optimizer = instantiate(
config=server_config.server_optimizer,
model=nonfl_model.fl_get_module(),
)
FLTestUtils.run_nonfl_training(
model=nonfl_model,
optimizer=optimizer,
data_loader=data_loader,
epochs=epochs,
)
error_msg = verify_models_equivalent_after_training(
fl_model,
nonfl_model,
global_model_init_copy,
rel_epsilon=1e-4,
abs_epsilon=1e-6,
)
assertEmpty(error_msg, msg=error_msg)
def test_fl_nonfl_equivalent_global_optimizer_sgd(self) -> None:
"""
Given:
batch_size=13
data_for_fl={user1:batch1, user2:batch2}
data_for_non_fl={batch1, batch2}
Check that the following produce the same trained model:
1. FL training, 1 user per round. global_opt=SGD, local_lr=x, global_lr=x
2. Non-FL training, opt=SGD, lr=x
"""
self._test_fl_nonfl_equivalent(
num_examples=26,
num_fl_users=2,
epochs=5,
local_lr=1.0,
server_config=SyncServerConfig(
server_optimizer=FedAvgWithLROptimizerConfig(lr=1.0, momentum=0.0),
active_user_selector=SequentialActiveUserSelectorConfig(),
),
)
def test_fl_nonfl_equivalent_global_optimizer_adam(self) -> None:
"""
Given:
batch_size=16 (bs=num_examples/num_fl_users)
data_for_fl={user1:batch1, user2:batch2}
data_for_non_fl={batch1, batch2}
Check that the following produce the same trained model:
1. FL training, 1 user per round. GlobalOpt=Adam, local_lr=1.0, global_lr=x
2. Non-FL training, opt=Adam, lr=x
"""
self._test_fl_nonfl_equivalent(
num_examples=32,
num_fl_users=2,
epochs=5,
local_lr=1.0,
server_config=SyncServerConfig(
server_optimizer=FedAdamOptimizerConfig(lr=0.001, eps=1e-2),
active_user_selector=SequentialActiveUserSelectorConfig(),
),
)
def test_client_overselection(self) -> None:
"""
test client overselection by equivalence of the two setups:
1. two users in the population, user 0 has two more examples than user 1.
UPR = 1, dropout_rate = 0.5. Hence (1/0.5=2) users will be over-selected.
simulate training time with mean = 1.0, std = 0.0 per example.
Hence user 0 will always be dropped due. Trains with 1 FL epochs. rounds
per epoch = (epoch / UPR = 2), hence user 1 will be trained twice.
2. single user in the population, which is user 1 the the setting above.
UPR = 1, dropout = 1.0. two FL epochs.
"""
torch.manual_seed(1)
# create dummy FL model on alphabet
global_model = DummyAlphabetFLModel()
# keep a copy of initial model to trigger another training instance
global_model_init = FLModelParamUtils.clone(global_model)
# dummy alphabet dataset
dummy_dataset = DummyAlphabetDataset()
# two users, one gets 1 more example than the other
shard_size = len(dummy_dataset) // 2 + 1
local_batch_size = 2
(
data_provider,
data_loader,
) = DummyAlphabetDataset.create_data_provider_and_loader(
dummy_dataset, shard_size, local_batch_size, global_model
)
assertEqual(data_provider.num_train_users(), data_loader.num_total_users)
# assert first user gets (dummy_dataset.num_rows / 2) + 1 data point,
# the second user gets (dummy_dataset.num_rows / 2) - 1 data point
assertEqual(
data_provider.get_train_user(0).num_train_examples(),
dummy_dataset.num_rows / 2 + 1,
)
assertEqual(
data_provider.get_train_user(1).num_train_examples(),
dummy_dataset.num_rows / 2 - 1,
)
# shared trainer config between two training instance
users_per_round = 1
local_optimizer_lr = 1.0
epochs = 1
torch.manual_seed(1)
# training with overselection
dropout_rate = 0.5
sync_trainer_overselection = create_sync_trainer(
model=global_model,
local_lr=local_optimizer_lr,
users_per_round=users_per_round,
epochs=epochs,
dropout_rate=dropout_rate,
timeout_simulator_config=GaussianTimeOutSimulatorConfig(
duration_distribution_generator=PerExampleGaussianDurationDistributionConfig(
training_duration_mean=1.0, training_duration_sd=0.0
),
timeout_wall_per_round=999999,
fl_stopping_time=9999999,
),
)
model_with_overselection, _ = sync_trainer_overselection.train(
data_provider,
metrics_reporter=FakeMetricReporter(),
num_total_users=data_provider.num_train_users(),
distributed_world_size=1,
)
# another training instance: only user[1] in the user population
# removing user 0 from dataset, assign user 1 to be user 0
data_provider._train_users[0] = copy.deepcopy(data_provider._train_users[1])
data_provider._train_users.pop(1)
# only a single user after remove user 0
assertEqual(data_provider.num_train_users(), 1)
global_model = FLModelParamUtils.clone(global_model_init)
torch.manual_seed(1)
dropout_rate = 1.0
epochs = 2
sync_trainer_single_user = create_sync_trainer(
model=global_model,
local_lr=local_optimizer_lr,
users_per_round=users_per_round,
epochs=epochs,
dropout_rate=dropout_rate,
)
model_single_user, _ = sync_trainer_single_user.train(
data_provider,
metrics_reporter=FakeMetricReporter(),
num_total_users=data_provider.num_train_users(),
distributed_world_size=1,
)
assertEqual(
verify_models_equivalent_after_training(
model_with_overselection,
model_single_user,
global_model_init,
rel_epsilon=1e-4,
abs_epsilon=1e-6,
),
"",
)
def test_partial_update_from_clients(self) -> None:
"""
test the equivalence of these two training instance
1. UPR=1. User dataset has 52 characters [a,b,c,d .... x, y ,z, a, b, c... x,y,z],
where each character appears twice in order. Timeout limit is set to
just enough for training half of the dataset
2. UPR=1. User dataset has 26 characters [a,b,c,d ...,x,y,z]. no timeout
"""
torch.manual_seed(1)
# create dummy FL model on alphabet
global_model = DummyAlphabetFLModel()
# keep a copy of initial model to trigger another training instance
global_model_init = FLModelParamUtils.clone(global_model)
# dummy alphabet dataset, getting each character twice
num_rows = 52
dummy_dataset = DummyAlphabetDataset(num_rows)
# a single user getting all examples
shard_size = len(dummy_dataset)
local_batch_size = 2
(
data_provider,
data_loader,
) = DummyAlphabetDataset.create_data_provider_and_loader(
dummy_dataset, shard_size, local_batch_size, global_model
)
# shared trainer config between two training instance
users_per_round = 1
local_optimizer_lr = 1.0
epochs = 1
torch.manual_seed(1)
# training time just enough for 26 examples, although user has
# 52 examples
sync_trainer_timeout = create_sync_trainer(
model=FLModelParamUtils.clone(global_model),
local_lr=local_optimizer_lr,
users_per_round=users_per_round,
epochs=epochs,
timeout_simulator_config=GaussianTimeOutSimulatorConfig(
duration_distribution_generator=PerExampleGaussianDurationDistributionConfig(
training_duration_mean=1.0, training_duration_sd=0.0
),
timeout_wall_per_round=26,
fl_stopping_time=9999999,
),
)
model_with_timeout, _ = sync_trainer_timeout.train(
data_provider,
metrics_reporter=FakeMetricReporter(),
num_total_users=data_provider.num_train_users(),
distributed_world_size=1,
)
# dummy alphabet dataset, getting each character once
num_rows = 26
torch.manual_seed(1)
dummy_dataset = DummyAlphabetDataset(num_rows)
# a single user getting all examples
shard_size = len(dummy_dataset)
local_batch_size = 2
(
data_provider,
data_loader,
) = DummyAlphabetDataset.create_data_provider_and_loader(
dummy_dataset, shard_size, local_batch_size, global_model
)
torch.manual_seed(1)
# training time just enough for 26 examples
sync_trainer_timeout = create_sync_trainer(
model=FLModelParamUtils.clone(global_model),
local_lr=local_optimizer_lr,
users_per_round=users_per_round,
epochs=epochs,
)
model_no_timeout, _ = sync_trainer_timeout.train(
data_provider,
metrics_reporter=FakeMetricReporter(),
num_total_users=data_provider.num_train_users(),
distributed_world_size=1,
)
assertEqual(
verify_models_equivalent_after_training(
model_with_timeout,
model_no_timeout,
global_model_init,
rel_epsilon=1e-4,
abs_epsilon=1e-6,
),
"",
)
def test_client_metric_reporting(self) -> None:
"""
Test that per-client reporting reports exactly every
``client_metrics_reported_per_epoch`` as defined in the config.
"""
torch.manual_seed(1)
# create dummy FL model on alphabet
global_model = DummyAlphabetFLModel()
# dummy alphabet dataset, getting each character twice
num_rows = 52
num_users = 4
dummy_dataset = DummyAlphabetDataset(num_rows)
# a single user getting all examples
shard_size = int(len(dummy_dataset) / num_users)
local_batch_size = 2
(
data_provider,
data_loader,
) = DummyAlphabetDataset.create_data_provider_and_loader(
dummy_dataset, shard_size, local_batch_size, global_model
)
users_per_round = 1
local_optimizer_lr = 1.0
momentum = 0.9
epochs = 6
sync_trainer_with_client_reports = SyncTrainer(
model=global_model,
cuda_enabled=False,
**OmegaConf.structured(
SyncTrainerConfig(
epochs=epochs,
do_eval=True,
users_per_round=users_per_round,
always_keep_trained_model=False,
train_metrics_reported_per_epoch=1,
eval_epoch_frequency=1,
report_train_metrics=False,
report_train_metrics_after_aggregation=True,
client=ClientConfig(
epochs=1,
optimizer=LocalOptimizerSGDConfig(
lr=local_optimizer_lr, momentum=momentum
),
max_clip_norm_normalized=None,
),
server=SyncServerConfig(
server_optimizer=FedAvgWithLROptimizerConfig(
lr=1.0, momentum=0.0
),
active_user_selector=SequentialActiveUserSelectorConfig(),
),
report_client_metrics=True,
report_client_metrics_after_epoch=True,
client_metrics_reported_per_epoch=3,
)
),
)
metrics_reporter = MetricsReporterWithMockedChannels()
model, _ = sync_trainer_with_client_reports.train(
data_provider,
metrics_reporter=metrics_reporter,
num_total_users=data_provider.num_train_users(),
distributed_world_size=1,
)
def count_word(result, word):
return str(result).count(word)
# If client_metrics_reported_per_epoch = 3 and number of epochs = 6
# per client eval metrics should be reported twice.
assertEqual(
count_word(metrics_reporter.stdout_results, "Per_Client_Eval"),
2,
metrics_reporter.stdout_results,
)
def _get_tensorboard_results_from_training(
self, num_total_users: int, num_epochs: int, users_per_round: int
) -> List[MockRecord]:
# dataset has 26 rows
assertTrue(num_total_users <= 26, "Can't have more than 26 users")
torch.manual_seed(1)
# 26 rows in data
shard_size = int(math.ceil(26 / num_total_users))
local_batch_size = 4
dummy_dataset = DummyAlphabetDataset()
global_model = DummyAlphabetFLModel()
(
data_provider,
data_loader,
) = DummyAlphabetDataset.create_data_provider_and_loader(
dummy_dataset,
examples_per_user=shard_size,
batch_size=local_batch_size,
model=global_model,
)
assertEqual(data_provider.num_train_users(), data_loader.num_total_users)
torch.manual_seed(1)
metrics_reporter = MetricsReporterWithMockedChannels()
sync_trainer = create_sync_trainer(
model=global_model,
local_lr=0.1,
users_per_round=users_per_round,
epochs=num_epochs,
user_epochs_per_round=1,
report_train_metrics=True,
report_train_metrics_after_aggregation=True,
)
global_model, best_metric = sync_trainer.train(
data_provider,
metrics_reporter,
num_total_users=num_total_users,
distributed_world_size=1,
)
return metrics_reporter.tensorboard_results
def test_tensorboard_metrics_reporting_simple(self) -> None:
"""Train with tensorboard metrics reporter for one epoch, 5 rounds per epoch.
Test for 2 things:
a) Train, Aggregation and Eval metrics are reported once
b) Metric reporting happens at the end of the epoch (when global_round=5)
"""
num_total_users = 5
num_epochs = 1
users_per_round = 1
tensorboard_results: List[
MockRecord
] = self._get_tensorboard_results_from_training(
num_total_users=num_total_users,
num_epochs=num_epochs,
users_per_round=users_per_round,
)
# ensure that train, aggregation and eval metrics are all reported once
for stage in [
TrainingStage.TRAINING,
TrainingStage.AGGREGATION,
TrainingStage.EVAL,
]:
tag = f"Loss/{TrainingStage(stage).name.title()}"
num_entries = sum(record.tag == tag for record in tensorboard_results)
# we report once per epoch
assertEqual(num_entries, 1)
# training runs for 1 epoch, or 5 rounds
# when train/eval results are reported, global_round_num should be 5
global_steps_reported_expected = [5, 5, 5] # noqa: F841
global_steps_reported_actual = [
record.global_step for record in tensorboard_results
]
assertEqual(
global_steps_reported_actual,
global_steps_reported_expected,
f"Actual global steps: {global_steps_reported_actual}, Expected global steps:{global_steps_reported_expected}",
)
def test_tensorboard_metrics_reporting_complex(self) -> None:
"""Train with tensorboard metrics reporter. Ensure eval and train metrics are
correctly reported to tensorboard
"""
# TODO: run with different values of num_epochs, num_total_users, users_per_round,
# train_metrics_reported_per_epoch, eval_epoch_frequency
num_total_users = 10
num_epochs = 3
users_per_round = 2
tensorboard_results: List[
MockRecord
] = self._get_tensorboard_results_from_training(
num_total_users=num_total_users,
num_epochs=num_epochs,
users_per_round=users_per_round,
)
# ensure that train, aggregation and eval metrics are all reported once per epoch
for stage in [
TrainingStage.TRAINING,
TrainingStage.AGGREGATION,
TrainingStage.EVAL,
]:
tag = f"Loss/{TrainingStage(stage).name.title()}"
num_entries = sum(record.tag == tag for record in tensorboard_results)
# we report once per epoch
assertEqual(num_entries, num_epochs)
rounds_per_epoch = int(math.ceil(num_total_users / users_per_round))
# example. if num_epochs=3, and rounds_per_epoch=5, global_steps_at_epoch_end will be [5, 10, 15]
epochs = list(range(1, num_epochs + 1))
global_steps_at_epoch_end = [e * rounds_per_epoch for e in epochs]
# we report 3 metrics in each epoch: Training, Aggregation and Eval
num_records_in_report = 3
# example. if global_steps_at_epoch_end = [5, 10, 15], and num_records_in_report = 2,
# then global_steps_reported_expected = [5, 5, 10,10, 15,15]
# noqa: F841 variable not used, fixed in next diff
global_steps_reported_expected = [
step
for step in global_steps_at_epoch_end
for k in range(num_records_in_report)
]
# noqa: F841 variable not used, fixed in next diff
global_steps_reported_actual = [
record.global_step for record in tensorboard_results
]
assertEqual(
global_steps_reported_actual,
global_steps_reported_expected,
f"Actual global steps: {global_steps_reported_actual}, Expected global steps:{global_steps_reported_expected}",
)
| canife-main | FLSim/flsim/trainers/tests/test_trainer.py |
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
from enum import Enum
from typing import Union
import numpy as np
import torch
from flsim.common.pytest_helper import assertEmpty, assertEqual
from flsim.data.data_provider import IFLDataProvider
from flsim.interfaces.model import IFLModel
from flsim.optimizers.async_aggregators import (
AsyncAggregatorConfig,
FedAdamAsyncAggregatorConfig,
FedAdamFedBuffAggregatorConfig,
FedAvgWithLRAsyncAggregatorConfig,
FedAvgWithLRFedBuffAggregatorConfig,
create_optimizer_for_async_aggregator,
)
from flsim.optimizers.server_optimizers import (
FedAdamOptimizerConfig,
FedAvgWithLROptimizerConfig,
OptimizerType,
)
from flsim.servers.sync_servers import SyncServerConfig
from flsim.trainers.async_trainer import AsyncTrainerConfig
from flsim.trainers.sync_trainer import SyncTrainerConfig
from flsim.utils.config_utils import is_target
from flsim.utils.fl.common import FLModelParamUtils
from flsim.utils.sample_model import DummyAlphabetFLModel
from flsim.utils.test_utils import (
MetricsReporterWithMockedChannels,
verify_models_equivalent_after_training,
)
from flsim.utils.tests.helpers.test_async_trainer_utils import (
create_async_trainer,
create_event_generator_config,
get_safe_global_lr,
run_fl_training,
)
from flsim.utils.tests.helpers.test_data_utils import DummyAlphabetDataset
from flsim.utils.tests.helpers.test_sync_trainer_utils import create_sync_trainer
from flsim.utils.tests.helpers.test_utils import FLTestUtils
class TrainerType(Enum):
SYNC: str = SyncTrainerConfig._target_.split(".")[-1]
ASYNC: str = AsyncTrainerConfig._target_.split(".")[-1]
NONFL: str = "NonFL"
class FedBuffTestUtils:
@staticmethod
def get_data_provider(
num_examples: int,
num_fl_users: int,
examples_per_user: int,
batch_size: int,
model: IFLModel,
) -> IFLDataProvider:
dummy_dataset = DummyAlphabetDataset(num_examples)
(
data_provider,
fl_data_loader,
) = DummyAlphabetDataset.create_data_provider_and_loader(
dummy_dataset, examples_per_user, batch_size, model
)
assert fl_data_loader.num_total_users == num_fl_users
return data_provider
@staticmethod
def train_comparable_model(
trainer_to_compare_fedbuff_with,
data_provider,
global_model,
server_config,
local_lr: float,
epochs: int,
training_rate: int,
training_duration_mean: float,
training_duration_sd: float,
) -> IFLModel:
metrics_reporter = MetricsReporterWithMockedChannels()
if trainer_to_compare_fedbuff_with == TrainerType.SYNC:
trainer = create_sync_trainer(
model=global_model,
local_lr=local_lr,
epochs=epochs,
users_per_round=training_rate,
server_config=server_config,
)
model_to_compare, _ = trainer.train(
data_provider=data_provider,
metrics_reporter=metrics_reporter,
num_total_users=data_provider.num_train_users(),
distributed_world_size=1,
)
elif trainer_to_compare_fedbuff_with == TrainerType.ASYNC:
trainer = create_async_trainer(
model=global_model,
local_lr=local_lr,
epochs=epochs,
aggregator_config=server_config,
event_generator_config=create_event_generator_config(
training_rate=training_rate,
training_duration_mean=training_duration_mean,
training_duration_sd=training_duration_sd,
),
)
model_to_compare, _ = trainer.train(
data_provider=data_provider,
metrics_reporter=metrics_reporter,
num_total_users=data_provider.num_train_users(),
distributed_world_size=1,
)
elif trainer_to_compare_fedbuff_with == TrainerType.NONFL:
# create an optimizer from aggregator_config.
# for tests in this file, optimizer will either be
# torch.optim.SGD or torch.optim.Adam
if isinstance(server_config, AsyncAggregatorConfig):
optimizer = create_optimizer_for_async_aggregator(
config=server_config,
model=global_model.fl_get_module(),
)
elif isinstance(server_config, SyncServerConfig):
optimizer = OptimizerType.create_optimizer(
model=global_model.fl_get_module(),
config=server_config.server_optimizer,
)
else:
raise AssertionError(f"Incompatible server config:{server_config}")
model_to_compare, _ = FLTestUtils.train_non_fl(
data_provider=data_provider,
global_model=global_model,
optimizer=optimizer,
metrics_reporter=metrics_reporter,
epochs=epochs,
)
# pyre-fixme[61]: `model_to_compare` may not be initialized here.
return model_to_compare
@staticmethod
def get_fedbuff_aggregator(
aggregator_config, buffer_size, fedbuff_lr
) -> Union[FedAdamFedBuffAggregatorConfig, FedAvgWithLRFedBuffAggregatorConfig]:
if isinstance(aggregator_config, AsyncAggregatorConfig):
if "FedAdam" in aggregator_config._target_:
fedbuff_aggregator = FedAdamFedBuffAggregatorConfig(
lr=fedbuff_lr,
# pyre-ignore[16]
weight_decay=aggregator_config.weight_decay,
# pyre-ignore[16]
eps=aggregator_config.eps,
buffer_size=buffer_size,
)
else: # "FedAvgWithLR" in aggregator_config._target_:
fedbuff_aggregator = FedAvgWithLRFedBuffAggregatorConfig(
lr=fedbuff_lr,
# pyre-ignore[16]
momentum=aggregator_config.momentum,
buffer_size=buffer_size,
)
return fedbuff_aggregator
elif isinstance(aggregator_config, SyncServerConfig):
if is_target(aggregator_config.server_optimizer, FedAdamOptimizerConfig):
fedbuff_aggregator = FedAdamFedBuffAggregatorConfig(
lr=fedbuff_lr,
# pyre-ignore[16]
weight_decay=aggregator_config.server_optimizer.weight_decay,
# pyre-ignore[16]
eps=aggregator_config.server_optimizer.eps,
buffer_size=buffer_size,
)
else:
fedbuff_aggregator = FedAvgWithLRFedBuffAggregatorConfig(
lr=fedbuff_lr,
# pyre-ignore[16]
momentum=aggregator_config.server_optimizer.momentum,
buffer_size=buffer_size,
)
return fedbuff_aggregator
else:
raise ValueError("Invalid config", aggregator_config)
@staticmethod
def compare_fedbuff_same(
trainer_to_compare_fedbuff_with,
trainer_to_compare_aggregator_config,
fedbuff_aggregator_config,
base_local_lr: float,
fedbuff_local_lr: float,
epochs: int,
num_examples: int,
num_fl_users: int,
batch_size: int,
examples_per_user: int,
buffer_size,
training_rate: int,
training_duration_mean: float,
training_duration_sd: float,
) -> str:
# we need to make three copies:
# to train the model we want to compare with
global_model = DummyAlphabetFLModel()
# to train a model with FedBuff
global_model_fedbuff_copy = FLModelParamUtils.clone(global_model)
# to verify training indeed took place
global_model_init_copy = FLModelParamUtils.clone(global_model)
data_provider = FedBuffTestUtils.get_data_provider(
num_examples=num_examples,
num_fl_users=num_fl_users,
examples_per_user=examples_per_user,
batch_size=batch_size,
model=global_model,
)
def get_base_trained_model():
model_to_compare = FedBuffTestUtils.train_comparable_model(
trainer_to_compare_fedbuff_with=trainer_to_compare_fedbuff_with,
data_provider=data_provider,
global_model=global_model,
server_config=trainer_to_compare_aggregator_config,
epochs=epochs,
local_lr=base_local_lr,
training_rate=training_rate,
training_duration_mean=training_duration_mean,
training_duration_sd=training_duration_sd,
)
return model_to_compare
def get_fedbuff_trained_model():
fedbuff_trainer = create_async_trainer(
model=global_model_fedbuff_copy,
local_lr=fedbuff_local_lr,
epochs=epochs,
event_generator_config=create_event_generator_config(
training_rate=training_rate,
training_duration_mean=training_duration_mean,
training_duration_sd=training_duration_sd,
),
aggregator_config=fedbuff_aggregator_config,
)
fedbuff_model, _ = fedbuff_trainer.train(
data_provider=data_provider,
metrics_reporter=MetricsReporterWithMockedChannels(),
num_total_users=data_provider.num_train_users(),
distributed_world_size=1,
)
return fedbuff_model
fedbuff_trained_model = get_fedbuff_trained_model()
base_model = get_base_trained_model()
error_msg = verify_models_equivalent_after_training(
base_model,
fedbuff_trained_model,
global_model_init_copy,
rel_epsilon=1e-4,
abs_epsilon=1e-6,
)
return error_msg
@staticmethod
def get_data_params(
min_num_users, max_num_users, min_examples_per_user, max_examples_per_user
):
r"""
Generate data parameters for FL training with
num users in range [min_num_users, max_num_users)
"""
num_fl_users = np.random.randint(min_num_users, max_num_users)
examples_per_user = np.random.randint(
min_examples_per_user, max_examples_per_user
)
num_examples = examples_per_user * num_fl_users
# num_fl_users + 1 because randint upper bound is exclusive
training_rate = np.random.randint(min_num_users, num_fl_users + 1)
return num_fl_users, examples_per_user, num_examples, training_rate
@staticmethod
def compare_nonfl_fedbuff_uneven_data_split(
total_examples: int,
num_fl_users: int,
buffer_size,
non_fl_lr,
fedbuff_global_lr,
examples_per_user_fedbuff: int,
examples_per_user_nonfl: int,
batch_size_fedbuff: int,
batch_size_nonfl: int,
epochs: int,
local_lr: float = 1.0,
) -> str:
# to verify training indeed took place
reference_untrained_model = DummyAlphabetFLModel()
# to train a model with FedVuff
fedbuff_model = FLModelParamUtils.clone(reference_untrained_model)
# to train nonfl
nonfl_model = FLModelParamUtils.clone(reference_untrained_model)
nonfl_data_provider = FedBuffTestUtils.get_data_provider(
num_examples=total_examples,
num_fl_users=1,
examples_per_user=examples_per_user_nonfl,
batch_size=batch_size_nonfl,
model=nonfl_model,
)
fedbuff_data_provider = FedBuffTestUtils.get_data_provider(
num_examples=total_examples,
num_fl_users=num_fl_users,
examples_per_user=examples_per_user_fedbuff,
batch_size=batch_size_fedbuff,
model=fedbuff_model,
)
fedbuff_trained_model, _ = run_fl_training(
fl_model=fedbuff_model,
fl_data_provider=fedbuff_data_provider,
epochs=epochs,
local_lr=local_lr,
aggregator_config=FedAvgWithLRFedBuffAggregatorConfig(
lr=fedbuff_global_lr, buffer_size=buffer_size
),
# sequential training training_rate >> training_duration
training_rate=1,
training_duration_mean=0,
)
optimizer = torch.optim.SGD(
nonfl_model.fl_get_module().parameters(), lr=non_fl_lr
)
nonfl_trained_model, _ = FLTestUtils.train_non_fl(
data_provider=nonfl_data_provider,
global_model=nonfl_model,
optimizer=optimizer,
epochs=epochs,
)
print(
f"Local LR {local_lr} Non FL LR {non_fl_lr} FedBuff LR {fedbuff_global_lr}"
)
return verify_models_equivalent_after_training(
nonfl_trained_model,
fedbuff_trained_model,
reference_untrained_model,
rel_epsilon=1e-4,
abs_epsilon=1e-6,
)
@staticmethod
def compare_async_fedbuff_uneven_data_split(
total_examples: int,
num_fl_users: int,
fedbuff_num_fl_users,
async_global_lr,
fedbuff_global_lr,
batch_size_fedbuff: int,
batch_size_async: int,
epochs: int,
training_rate: int,
training_duration_mean: float,
local_lr: float = 1.0,
) -> str:
# to verify training indeed took place
reference_untrained_model = DummyAlphabetFLModel()
# to train a model with FedBuff
fedbuff_model = FLModelParamUtils.clone(reference_untrained_model)
# to train async
async_model = FLModelParamUtils.clone(reference_untrained_model)
async_data_provider = FedBuffTestUtils.get_data_provider(
num_examples=total_examples,
num_fl_users=1,
examples_per_user=total_examples,
batch_size=batch_size_async,
model=async_model,
)
async_trained_model, _ = run_fl_training(
fl_model=async_model,
fl_data_provider=async_data_provider,
epochs=epochs,
local_lr=local_lr,
aggregator_config=FedAvgWithLRAsyncAggregatorConfig(lr=async_global_lr),
training_rate=training_rate,
training_duration_mean=training_duration_mean,
)
fedbuff_data_provider = FedBuffTestUtils.get_data_provider(
num_examples=total_examples,
num_fl_users=num_fl_users,
examples_per_user=total_examples // num_fl_users,
batch_size=batch_size_fedbuff,
model=fedbuff_model,
)
fedbuff_trained_model, _ = run_fl_training(
fl_model=fedbuff_model,
fl_data_provider=fedbuff_data_provider,
epochs=epochs,
local_lr=local_lr,
aggregator_config=FedAvgWithLRFedBuffAggregatorConfig(
lr=fedbuff_global_lr, buffer_size=fedbuff_num_fl_users
),
training_rate=training_rate,
training_duration_mean=training_duration_mean,
)
print(
f"Local LR {local_lr} Async LR {async_global_lr} FedBuff LR {fedbuff_global_lr}"
)
return verify_models_equivalent_after_training(
async_trained_model,
fedbuff_trained_model,
reference_untrained_model,
rel_epsilon=1e-4,
abs_epsilon=1e-6,
)
class TestFedBuff:
@classmethod
def setup_class(cls) -> None:
np.random.seed(0)
def test_async_fedbuff_same_multiple_clients_to_sync(self) -> None:
r"""
Test when FedBuff has multiple clients to sync
1. Training in parallel. Meaning, both mechanisms start from the same global model (training_time >> training rate)
2. Total examples is the same for both and local lr is 1.0
3. Pure Async 1 user with 10 examples should be the same as FedBuff 10 users each with 1 example
"""
# training in parallel training_duration >> training_rate
training_rate = 1
training_duration_mean = training_rate * 100
total_examples = 10
num_fl_users = 10
batch_size_fedbuff = 1
batch_size_async = total_examples
buffer_size = num_fl_users
local_lr = np.random.sample()
async_global_lr = np.random.sample()
fedbuff_global_lr = async_global_lr / buffer_size
error_msg = FedBuffTestUtils.compare_async_fedbuff_uneven_data_split(
total_examples=total_examples,
num_fl_users=num_fl_users,
fedbuff_num_fl_users=buffer_size,
async_global_lr=async_global_lr,
fedbuff_global_lr=fedbuff_global_lr,
batch_size_fedbuff=batch_size_fedbuff,
batch_size_async=batch_size_async,
epochs=5,
training_rate=training_rate,
training_duration_mean=training_duration_mean,
local_lr=local_lr,
)
assertEmpty(error_msg, msg=error_msg)
def test_non_fl_fedbuff_same_multiple_clients_to_sync(self) -> None:
"""
Test nonFL and FedBuff same with multiple clients to sync
Both mechanisms train on the number of examples yield the same result if
1. FedBuff is trained sequentially (training rate >> training duration)
2. NonFL has 1 user with 10 examples and FedBuff has 10 users each with 1 example trained for 1 epoch
3. Local_lr = some random value, global_lr = some random value
4. fedbuff_global_lr = global_lr / (buffer_size * local_lr)
"""
total_examples = 10
num_fl_users = 10
examples_per_user_fedbuff = 1
batch_size_fedbuff = 1
batch_size_nonfl = total_examples
buffer_size = 10
nonfl_lr = np.random.sample()
fedbuff_local_lr = np.random.sample()
fedbuff_global_lr = nonfl_lr / (buffer_size * fedbuff_local_lr)
error_msg = FedBuffTestUtils.compare_nonfl_fedbuff_uneven_data_split(
total_examples=total_examples,
num_fl_users=num_fl_users,
examples_per_user_fedbuff=examples_per_user_fedbuff,
examples_per_user_nonfl=total_examples,
batch_size_fedbuff=batch_size_fedbuff,
batch_size_nonfl=batch_size_nonfl,
buffer_size=buffer_size,
non_fl_lr=nonfl_lr,
fedbuff_global_lr=fedbuff_global_lr,
local_lr=fedbuff_local_lr,
epochs=1,
)
assertEmpty(error_msg, msg=error_msg)
def test_async_fedbuff_same_sync_every_client(self) -> None:
r"""
FedBuff and Async should yield the same model:
1. Async and FedBuff training duration distribution are the same
training_duration_sd should be small
2. FedBuff update global model every 1 client
"""
# random learning rate between 0 and 10
local_lr = np.random.sample()
global_lr = np.random.sample() * 10
buffer_size = 1
for base_aggregator_config in [
FedAvgWithLRAsyncAggregatorConfig(lr=global_lr),
FedAdamAsyncAggregatorConfig(lr=global_lr, eps=1e-2),
]:
for batch_size in [4, 16, 32]:
(
num_fl_users,
examples_per_user,
num_examples,
training_rate,
) = FedBuffTestUtils.get_data_params(
min_num_users=2,
max_num_users=10,
min_examples_per_user=1,
max_examples_per_user=10,
)
error_msg = FedBuffTestUtils.compare_fedbuff_same(
trainer_to_compare_fedbuff_with=TrainerType.ASYNC,
trainer_to_compare_aggregator_config=base_aggregator_config,
fedbuff_aggregator_config=FedBuffTestUtils.get_fedbuff_aggregator(
aggregator_config=base_aggregator_config,
buffer_size=buffer_size,
fedbuff_lr=global_lr,
),
fedbuff_local_lr=local_lr,
base_local_lr=local_lr,
epochs=5,
num_examples=num_examples,
num_fl_users=num_fl_users,
batch_size=batch_size,
examples_per_user=examples_per_user,
buffer_size=buffer_size,
training_rate=training_rate,
training_duration_mean=1,
training_duration_sd=0,
)
assertEqual(error_msg, "")
def test_nonfl_fedbuff_same_sgd(self) -> None:
r"""
FedBuff and NonFL should yield the same model:
1. FedBuff's training rate = 1, training duration ~ N(0, 0)
2. Round robin user selector
3. FedBuff takes global step every 1 clients
"""
(
num_fl_users,
examples_per_user,
num_examples,
_,
) = FedBuffTestUtils.get_data_params(
min_num_users=1,
max_num_users=10,
min_examples_per_user=1,
max_examples_per_user=10,
)
local_lr = 1.0
buffer_size = 1
for batch_size in [4, 16, 32]:
global_lr = get_safe_global_lr(batch_size, examples_per_user)
base_aggregator_config = SyncServerConfig(
server_optimizer=FedAvgWithLROptimizerConfig(lr=global_lr, momentum=0.0)
)
error = FedBuffTestUtils.compare_fedbuff_same(
trainer_to_compare_fedbuff_with=TrainerType.NONFL,
trainer_to_compare_aggregator_config=base_aggregator_config,
fedbuff_aggregator_config=FedBuffTestUtils.get_fedbuff_aggregator(
aggregator_config=base_aggregator_config,
buffer_size=buffer_size,
fedbuff_lr=global_lr,
),
fedbuff_local_lr=local_lr,
base_local_lr=local_lr,
epochs=1,
num_examples=num_examples,
num_fl_users=num_fl_users,
batch_size=batch_size,
examples_per_user=examples_per_user,
buffer_size=1,
training_rate=1,
training_duration_mean=0,
training_duration_sd=0,
)
assertEqual(error, "")
def test_nonfl_fedbuff_same_adam(self) -> None:
r"""
FedBuff and NonFL should yield the same model:
1. FedBuff's training rate = 1, training duration ~ N(0, 0)
2. Round robin user selector
3. FedBuff takes global step every 1 clients
"""
(
num_fl_users,
examples_per_user,
num_examples,
_,
) = FedBuffTestUtils.get_data_params(
min_num_users=1,
max_num_users=10,
min_examples_per_user=1,
max_examples_per_user=10,
)
local_lr = 1.0
global_lr = np.random.sample() * 0.01
base_aggregator_config = SyncServerConfig(
server_optimizer=FedAdamOptimizerConfig(lr=global_lr, eps=1e-2)
)
buffer_size = 1
error = FedBuffTestUtils.compare_fedbuff_same(
trainer_to_compare_fedbuff_with=TrainerType.NONFL,
trainer_to_compare_aggregator_config=base_aggregator_config,
fedbuff_aggregator_config=FedBuffTestUtils.get_fedbuff_aggregator(
aggregator_config=base_aggregator_config,
buffer_size=buffer_size,
fedbuff_lr=global_lr,
),
fedbuff_local_lr=local_lr,
base_local_lr=local_lr,
epochs=5,
num_examples=num_examples,
num_fl_users=num_fl_users,
batch_size=examples_per_user,
examples_per_user=examples_per_user,
buffer_size=1,
training_rate=1,
training_duration_mean=0,
training_duration_sd=0,
)
assertEqual(error, "")
def test_sync_fedbuff_same_sgd(self) -> None:
r"""
FedBuff and Sync should yield the same model:
1. For simplification, assume training_rate = total_users.
Without this constraint, FedBuff==Sync only for 1 round of sync.
2. `(Training_rate*num_users) << training_duration_mean`.
E.g, `#users=10, training rate = 1, training_duration_mean > 10`.
This is needed so that all users train in parallel.
In particular, in a single epoch, every user starts with the same initial model
3. Round robin user selector
4. Set fedbuff_global_lr = sync_global_lr / num_clients_sync
"""
(
num_fl_users,
examples_per_user,
num_examples,
_,
) = FedBuffTestUtils.get_data_params(
min_num_users=1,
max_num_users=5,
min_examples_per_user=1,
max_examples_per_user=5,
)
local_lr = 1.0
global_lr = 1.0
base_aggregator_config = SyncServerConfig(
server_optimizer=FedAvgWithLROptimizerConfig(lr=global_lr, momentum=0.0)
)
buffer_size = num_fl_users
for batch_size in [4, 16, 32]:
print(f"{num_fl_users} {examples_per_user} {local_lr} {global_lr}")
error = FedBuffTestUtils.compare_fedbuff_same(
trainer_to_compare_fedbuff_with=TrainerType.SYNC,
trainer_to_compare_aggregator_config=base_aggregator_config,
fedbuff_aggregator_config=FedBuffTestUtils.get_fedbuff_aggregator(
aggregator_config=base_aggregator_config,
buffer_size=buffer_size,
fedbuff_lr=global_lr / buffer_size,
),
fedbuff_local_lr=local_lr,
base_local_lr=local_lr,
epochs=1,
num_examples=num_examples,
num_fl_users=num_fl_users,
batch_size=batch_size,
examples_per_user=examples_per_user,
buffer_size=buffer_size,
training_rate=buffer_size,
training_duration_mean=buffer_size * 2,
training_duration_sd=0,
)
assertEqual(error, "")
def test_sync_fedbuff_same_adam(self) -> None:
r"""
For sync == FedBuff adam,
`fedbuff_local_lr = sync_local_lr / buffer_size`
and FedBuff and sync global lr's should be the same.
Sync and FedBuff compute different "delta"
fedbuff_delta = sync_delta * buffer_size
to fix this difference, for SGD, we set `global_lr_fedbuff = global_lr_sync/buffer_size`
however, for Adam, this normalization doesn't work:
Since Adam stores first and second moments (mean and variance) of deltas.
In particular, the following are not equivalent:
delta=d, lr=l
delta=d*k, lr=l/k
instead, we have to set `local_lr_fedbuff = local_lr_sync / buffer_size`
"""
(
num_fl_users,
examples_per_user,
num_examples,
_,
) = FedBuffTestUtils.get_data_params(
min_num_users=1,
max_num_users=10,
min_examples_per_user=1,
max_examples_per_user=10,
)
local_lr = 1.0
global_lr = np.random.sample() * 0.01
base_aggregator_config = SyncServerConfig(
server_optimizer=FedAdamOptimizerConfig(lr=global_lr, eps=1e-2)
)
buffer_size = num_fl_users
error = FedBuffTestUtils.compare_fedbuff_same(
trainer_to_compare_fedbuff_with=TrainerType.SYNC,
trainer_to_compare_aggregator_config=base_aggregator_config,
fedbuff_aggregator_config=FedBuffTestUtils.get_fedbuff_aggregator(
aggregator_config=base_aggregator_config,
buffer_size=buffer_size,
fedbuff_lr=global_lr,
),
fedbuff_local_lr=local_lr / num_fl_users,
base_local_lr=local_lr,
epochs=1,
num_examples=num_examples,
num_fl_users=num_fl_users,
batch_size=examples_per_user,
examples_per_user=examples_per_user,
buffer_size=buffer_size,
training_rate=buffer_size,
training_duration_mean=buffer_size * 100,
training_duration_sd=0,
)
print(f"{num_fl_users} {examples_per_user} {local_lr} {global_lr}")
assertEqual(error, "")
def test_partial_model_update(self) -> None:
r"""
Test for partial update
Assume we have 2 users (user1 and user2), buffer_size = 2, and both nonfl and fl training have
exactly one batch training sequentially (training_rate >> training_duration), and denote initial global model as g0
with the following timeline
user1 starts training (receives g0)
user1 ends training
user2 starts training (note: user2 should receive g0)
user2 ends training
this sequence should produce the same model as non-fl training on user1 + user2 datasets
"""
num_fl_users = 2
total_examples = 20
examples_per_user_fedbuff = total_examples // num_fl_users
buffer_size = num_fl_users
nonfl_lr = np.random.sample()
fedbuff_local_lr = np.random.sample()
# We need to normalize global_lr to account for the local training
# hence we need to divide by the (buffer_size * fedbuff_local_lr)
fedbuff_global_lr = nonfl_lr / (buffer_size * fedbuff_local_lr)
error_msg = FedBuffTestUtils.compare_nonfl_fedbuff_uneven_data_split(
total_examples=total_examples,
num_fl_users=num_fl_users,
buffer_size=buffer_size,
non_fl_lr=nonfl_lr,
fedbuff_global_lr=fedbuff_global_lr,
local_lr=fedbuff_local_lr,
examples_per_user_fedbuff=examples_per_user_fedbuff,
examples_per_user_nonfl=total_examples,
batch_size_fedbuff=examples_per_user_fedbuff,
batch_size_nonfl=total_examples,
epochs=1,
)
print(
f"NonFL LR {nonfl_lr} FedBuff Local LR {fedbuff_local_lr} FedBuff Global LR {fedbuff_global_lr}"
)
assertEmpty(error_msg, msg=error_msg)
def test_remaining_clients_to_sync(self) -> None:
"""
An aggregator can have unaggregated clients before
an end of the epoch
This case can happen in two scenario
1. clients_to_sync > total_users
2. total_users is not divisible by clients_to_sync (num_fl_users % clients_to_sync != 0)
"""
# training in parallel training_duration >> training_rate
training_rate = 1
training_duration_mean = training_rate * 100
total_examples = 10
num_fl_users = 10
batch_size_fedbuff = 1
batch_size_async = total_examples
# test for scenario 1
buffer_size = num_fl_users * 10
local_lr = np.random.sample()
async_global_lr = np.random.sample()
fedbuff_global_lr = async_global_lr / num_fl_users
error_msg = FedBuffTestUtils.compare_async_fedbuff_uneven_data_split(
total_examples=total_examples,
num_fl_users=num_fl_users,
fedbuff_num_fl_users=buffer_size,
async_global_lr=async_global_lr,
fedbuff_global_lr=fedbuff_global_lr,
batch_size_fedbuff=batch_size_fedbuff,
batch_size_async=batch_size_async,
epochs=1,
training_rate=training_rate,
training_duration_mean=training_duration_mean,
local_lr=local_lr,
)
assertEmpty(error_msg, msg=error_msg)
# test for scenario 2
buffer_size = 4
fedbuff_global_lr = async_global_lr / num_fl_users
error_msg = FedBuffTestUtils.compare_async_fedbuff_uneven_data_split(
total_examples=total_examples,
num_fl_users=num_fl_users,
fedbuff_num_fl_users=buffer_size,
async_global_lr=async_global_lr,
fedbuff_global_lr=fedbuff_global_lr,
batch_size_fedbuff=batch_size_fedbuff,
batch_size_async=batch_size_async,
epochs=1,
training_rate=training_rate,
training_duration_mean=training_duration_mean,
local_lr=local_lr,
)
assertEmpty(error_msg, msg=error_msg)
| canife-main | FLSim/flsim/trainers/tests/test_fedbuff.py |
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
from typing import NamedTuple
EPS = 1e-10
class Timeline(NamedTuple):
"""
A point in time of the simulation.
This contains epoch, round, and rounds_per_epoch or
global_round and rounds_per_epoch. All of these values
start from 1 by FLSim convention. If any of these
need not be set, the default value of 0 is assigned, which
helps us handle collision cases / initialization errors.
Timeline usually refers to how many epochs/rounds have *finished*
"""
epoch: int = 0
round: int = 0
global_round: int = 0
rounds_per_epoch: int = 1
def global_round_num(self) -> int:
r"""
Returns the global round number starting from 1.
For example, if round is 2 and epoch is 10 and num_rounds_per_epoch is 10,
it will return 102.
"""
assert (self.epoch and self.round) or self.global_round
return (
self.global_round or (self.epoch - 1) * self.rounds_per_epoch + self.round
)
def as_float(self, offset: int = 0) -> float:
r"""
prints the time-line as a floating number.
E.g. if round is 2 and epoch is 10 and num_rounds_per_epoch is 10,
it will return 10.2. By default uses the round value of the object
but can also show a value for a run that is offset away from the
current round.
"""
return (self.global_round_num() + offset) / self.rounds_per_epoch
def tick(self, tick_interval: float) -> bool:
r"""
Returns true 'tick_interval' times every epoch
E.g: if tick_interval is 10, it returns true 10 times every epoch
This function is useful for deciding when to report train/eval results
"""
return (self.as_float() + EPS) // tick_interval > (
self.as_float(-1) + EPS
) // tick_interval
def __str__(self):
assert (self.epoch > 0 and self.round > 0) or self.global_round > 0
e = self.epoch or ((self.global_round - 1) // self.rounds_per_epoch + 1)
r = self.round or (((self.global_round - 1) % self.rounds_per_epoch) + 1)
gr = self.global_round or ((e - 1) * self.rounds_per_epoch + r)
return f"(epoch = {e}, round = {r}, global round = {gr})"
| canife-main | FLSim/flsim/common/timeline.py |
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import logging
from queue import PriorityQueue
from typing import Optional
from flsim.channels.base_channel import IdentityChannel
from flsim.clients.async_client import AsyncClientDevice, AsyncClientFactory
from flsim.clients.base_client import ClientConfig
from flsim.common.logger import Logger
from flsim.common.timeout_simulator import TimeOutSimulator
from flsim.common.training_event_handler import AsyncTrainingEventHandler
from flsim.utils.async_trainer.async_user_selector import AsyncUserSelector
from flsim.utils.async_trainer.training_event_generator import IEventGenerator
from flsim.utils.cuda import DEFAULT_CUDA_MANAGER, ICudaStateManager
from tqdm import tqdm
class JobQueueStats:
"""Keeps track of #pending jobs in a job queue.
Computes event based average of jobs
"""
def __init__(self):
self.num_finished_jobs: float = 0
self.pending_jobs_sum: float = 0
self.num_started_jobs: float = 0
def on_job_start(self) -> None:
self.num_started_jobs += 1
def on_job_end(self) -> None:
self.num_finished_jobs += 1
self.pending_jobs_sum += self.num_started_jobs
self.num_started_jobs -= 1
def avg_pending_jobs(self) -> float:
if not self.num_finished_jobs:
return float("Inf")
return self.pending_jobs_sum / self.num_finished_jobs
def as_str(self) -> str:
return f"Mean:{self.avg_pending_jobs():.3f}"
class AsyncTrainingSimulator:
"""This class simulates asynchronous training of devices where training
start times are assumed to be drawn from some probability Generator
(e.g. IEventGenerator). It has a priority queue to keep track
of event starts and event ends. The queue will have exactly one start
event at any point (start events are typically poisson distributed).
Exactly how many end events are there depends on the Generator of
training time compared to the rate of the poisson process for event
starts. It is initialized with:
1) event_generator: IEventGenerator,
2) model_trainer: AsyncTrainingEventHandler, and
3) num_train_end_events_per_epoch: int.
Async trainer's top-level run() function will be called for each epoch.
Then, the simulator will first fetch a DeviceState from its min_heap and
updates the simulator’s timestamp to the event’s relevant_time. If the
fetched DeviceState is waiting for training, we start training with
AsyncJobScheduler’s on_training_start_function, put the DeviceState back
to the min_heap, and log to the JobQueueStats.
Otherwise, it simply ends the training. Note that this works, because
relevant_time is set to training end time after training is triggered.
"""
logger: logging.Logger = Logger.get_logger(__name__)
def __init__(
self,
event_generator: IEventGenerator,
job_scheduler: AsyncTrainingEventHandler,
user_selector: AsyncUserSelector,
shared_client_config: ClientConfig,
num_train_end_events_per_epoch: int,
cuda_manager: ICudaStateManager = DEFAULT_CUDA_MANAGER,
timeout_simulator: Optional[TimeOutSimulator] = None,
channel: Optional[IdentityChannel] = None,
):
self.job_scheduler: AsyncTrainingEventHandler = job_scheduler
self.user_selector: AsyncUserSelector = user_selector
self.event_generator: IEventGenerator = event_generator
self.shared_client_config = shared_client_config
self.num_train_end_events_per_epoch: int = num_train_end_events_per_epoch
self.num_train_end_events: int = 0
self.current_time: float = 0
self.min_heap: PriorityQueue[AsyncClientDevice] = PriorityQueue()
self.queue_stats: JobQueueStats = JobQueueStats()
self.timeout_simulator = timeout_simulator
self.channel = channel
self.cuda_manager = cuda_manager
# init the first event
self.create_future_training_start_event()
def create_future_training_start_event(self) -> None:
# create training client and insert into heap
new_client = AsyncClientFactory.create(
current_time=self.current_time,
event_generator=self.event_generator,
user_selector=self.user_selector,
client_config=self.shared_client_config,
timeout_simulator=self.timeout_simulator,
channel=self.channel,
cuda_manager=self.cuda_manager,
)
# put will query __lt__ on top
self.min_heap.put(new_client)
def start_training(self, top: AsyncClientDevice) -> None:
self.job_scheduler.on_training_start(top)
# put will query __lt__ on top
self.min_heap.put(top)
self.queue_stats.on_job_start()
def end_training(self, top: AsyncClientDevice) -> None:
self.queue_stats.on_job_end()
self.job_scheduler.on_training_end(top)
def run_one_epoch(self) -> None:
with tqdm(
total=self.num_train_end_events_per_epoch,
desc="Client Training Per Epoch",
unit="client",
position=0,
) as pbar:
while not self.min_heap.empty():
# notify async trainer that num_train_end_events_per_epoch have ended training
if self.num_train_end_events == self.num_train_end_events_per_epoch:
self.num_train_end_events = 0
break
top = self.min_heap.get()
# forward time
self.current_time = top.next_event_time()
if top.is_waiting_to_start():
self.start_training(top)
self.create_future_training_start_event()
else:
self.num_train_end_events += 1
self.end_training(top)
pbar.update(1)
def avg_pending_jobs(self) -> float:
return self.queue_stats.avg_pending_jobs()
def print_stats(self, prefix: str) -> None:
print(f"{prefix}PendingJobs, {self.queue_stats.as_str()}")
print(f"Current Time: {self.current_time:.4f}")
| canife-main | FLSim/flsim/common/training_simulator.py |
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
"""Utils class to fine tune a global model on client's train set then set the personalized model
on the client's eval set
"""
from typing import Any, Iterable, Tuple
from flsim.clients.base_client import ClientConfig
from flsim.common.timeline import Timeline
from flsim.data.data_provider import IFLUserData
from flsim.interfaces.metrics_reporter import IFLMetricsReporter, TrainingStage
from flsim.interfaces.model import IFLModel
from flsim.utils.cuda import CudaTransferMinimizer
from hydra.utils import instantiate
from tqdm import tqdm
class FineTuner:
@classmethod
def fine_tune_and_evaluate(
cls,
data: Iterable[IFLUserData],
global_model: IFLModel,
client_config: ClientConfig,
metrics_reporter: IFLMetricsReporter,
cuda_state_manager: CudaTransferMinimizer,
training_stage: TrainingStage,
timeline: Timeline,
epochs: int,
) -> Tuple[Any, bool]:
for user_data in tqdm(
data,
desc="Fine-tune clients",
unit="client",
):
FineTuner.fine_tune_model(
global_model=global_model,
data=user_data,
client_config=client_config,
metrics_reporter=metrics_reporter,
cuda_state_manager=cuda_state_manager,
epochs=epochs,
)
return metrics_reporter.report_metrics(
model=global_model,
reset=True,
stage=training_stage,
timeline=timeline,
epoch=timeline.global_round_num(), # for legacy
print_to_channels=True,
)
@classmethod
def fine_tune_model(
cls,
global_model: IFLModel,
data: IFLUserData,
client_config: ClientConfig,
metrics_reporter: IFLMetricsReporter,
cuda_state_manager: CudaTransferMinimizer,
epochs: int,
) -> IFLModel:
eval_client = instantiate(
client_config,
_partial_=False,
dataset=data,
cuda_manager=cuda_state_manager,
)
fine_tuned_model, _, _ = eval_client.copy_and_train_model(
model=global_model, epochs=epochs
)
eval_client.eval(model=fine_tuned_model, metrics_reporter=metrics_reporter)
return fine_tuned_model
| canife-main | FLSim/flsim/common/fine_tuner.py |
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
from hydra.core.config_store import ConfigStore # @manual
from .timeout_simulator import (
GaussianTimeOutSimulatorConfig,
NeverTimeOutSimulatorConfig,
)
ConfigStore.instance().store(
name="base_never_timeout_simulator",
node=NeverTimeOutSimulatorConfig,
group="timeout_simulator",
)
ConfigStore.instance().store(
name="base_gaussian_timeout_simulator",
node=GaussianTimeOutSimulatorConfig,
group="timeout_simulator",
)
| canife-main | FLSim/flsim/common/__init__.py |
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import abc
from typing import List, Tuple
import numpy as np
from flsim.clients.async_client import AsyncClientDevice
from flsim.utils.async_trainer.device_state import DeviceState, TrainingState
class IAsyncTrainingEventHandler(abc.ABC):
r"""
Interface class that performs asynchronous FL model training.
Handles start training for a particular device state and end training for a particular device state.
The AsyncTrainer should implement this class along with FLTrainerBase
"""
@abc.abstractmethod
def on_training_start(self, client: AsyncClientDevice) -> None:
r"""
Callback to notify that a client is starting training
Marks the client state as training.
Note:
-----
The actual training process doesn't start until on_training_end
There's a choice of when should training ACTUALLY happen: when the
training_start event fires, or when training_end event fires
Answer: when training_end event fires. That's because training often
produces side effects - like metric collection. So we should only
train when we're sure that we want those side-effects
"""
pass
@abc.abstractmethod
def on_training_end(self, client: AsyncClientDevice) -> None:
r"""
Callback to notify the trainer of a training end event.
Trains local model for the client and incorporates it into the global model
"""
pass
@abc.abstractmethod
def train_and_update_global_model(self, client: AsyncClientDevice) -> None:
pass
class AsyncTrainingEventHandler(IAsyncTrainingEventHandler):
def on_training_start(self, client: AsyncClientDevice) -> None:
pass
def on_training_end(self, client: AsyncClientDevice) -> None:
r"""
Trains the client's local model and update seqnum
This function does the following:
1. Changes training state to `TRAINING_FINISHED`
2. Trains client's local model by calling `train_and_update_global_model`
"""
client.training_ended()
self.train_and_update_global_model(client=client)
def train_and_update_global_model(self, client: AsyncClientDevice) -> None:
pass
class TestAsyncTrainingEventHandler(AsyncTrainingEventHandler):
r"""
AsyncTrainingEventHandler that stores all model_states in a list.
Useful for testing training simulation.
"""
def __init__(self):
super().__init__()
self.training_events: List[Tuple(DeviceState, TrainingState)] = []
self.num_unseen_global_model_updates: List[int] = []
self.current_seqnum = 0
def on_training_start(self, client: AsyncClientDevice) -> None:
client.training_started(self.current_seqnum)
self.training_events.append((client, TrainingState.TRAINING))
def on_training_end(self, client: AsyncClientDevice) -> None:
super().on_training_end(client)
self.training_events.append((client, TrainingState.TRAINING_FINISHED))
self.current_seqnum += 1
def train_and_update_global_model(self, client: AsyncClientDevice) -> None:
self.num_unseen_global_model_updates.append(
self.current_seqnum - client.model_seqnum
)
def seqnum_diff_mean(self):
return np.mean(self.num_unseen_global_model_updates)
def seqnum_std(self):
return np.std(self.num_unseen_global_model_updates)
| canife-main | FLSim/flsim/common/training_event_handler.py |
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import logging
import sys
LOGGING_LEVEL: int = logging.INFO
# If this flag is true logging will be printed to both stdout and stderr
PRINT_TO_STDOUT = False
class Logger:
parent_name = "FLSimLogger"
_instance = None
logging_level = LOGGING_LEVEL
print_to_stdout = PRINT_TO_STDOUT
parent_logger = logging.getLogger(parent_name)
parent_logger.setLevel(logging_level)
children_loggers = []
@classmethod
def get_logger(cls, name: str) -> logging.Logger:
"""Returns a Logger which is a child of the PARENT_LOGGER. Logger hierarchy is
through the name, with `.` used to denote hierarchy levels.
"""
logger = logging.getLogger(cls.parent_name + "." + name)
if cls.print_to_stdout:
handler = logging.StreamHandler(sys.stdout)
cls.parent_logger.addHandler(handler)
cls.children_loggers.append(logger)
return logger
@classmethod
def set_logging_level(cls, level: int) -> None:
cls.parent_logger.setLevel(level)
for child_logger in cls.children_loggers:
child_logger.setLevel(0)
| canife-main | FLSim/flsim/common/logger.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import math
from enum import Enum
from typing import List, Tuple
class DiversityMetricType(Enum):
gradient_diversity = 1
orthogonality = 2
delta_norm_sq = 3
sum_client_delta_norm_sq = 4
sum_client_delta_mutual_angles = 5
class DiversityMetrics:
"""
This class stores diversity metrics for a set of gradients -- see arXiv:1706.05699
For a set of vectors grad f_i,
sum || grad f_i ||_2^2 sum_of_norms
GD = ______________________ = ___________________
|| sum grad f_i ||_2^2 norm_of_sum
sum || grad f_i ||_2^2 sum_of_norms
ORTH = ___________________________________ = ____________________________
sum{i != j} dot(grad f_i, grad f_j) norm_of_sum - sum_of_norms
delta_norm_sq = norm_of_sum
sum_client_delta_norm_sq = sum_of_norms
sum_client_delta_mutual_angles = norm_of_sum - sum_of_norms
For any set of n vectors, GD >= 1/n. ORTH and GD can be infinite,
so we store the reciprocal of these quantities. This gives us good bounds:
0 <= 1/GD <= n and -1 <= 1/ORTH <= n-1
delta_norm_sq is the magnitude of the aggregate gradient step.
This is a nonnegative quantity.
sum_client_delta_norm_sq is the sum of client gradient updates.
sum_client_delta_mutual_angles depends only on the sum of mutual angles
between each pair of client gradients. It can be positive or negative.
The comparators will output a comparison of GD (or ORTH), not 1/GD.
So a > b means GD(a) > GD(b), i.e. internal comparison of 1/GD(a) < 1/GD(b)
Inputs:
norm_of_sum >= 0
sum_of_norms > 0
"""
def __init__(
self,
norm_of_sum: float,
sum_of_norms: float,
diversity_metric_type: DiversityMetricType = DiversityMetricType.gradient_diversity,
):
if not isinstance(diversity_metric_type, DiversityMetricType):
raise ValueError("diversity_metric_type must be of DiversityMetricType")
self._recpr_gradient_diversity = norm_of_sum / sum_of_norms
self._recpr_orthogonality = (norm_of_sum - sum_of_norms) / sum_of_norms
self._norm_of_sum = norm_of_sum
self._sum_of_norms = sum_of_norms
self._diversity_metric_type = diversity_metric_type
# Provide getters but not setters, since the GD and ORTH are not actually stored.
@property
def gradient_diversity(self) -> float:
if self._recpr_gradient_diversity == 0:
return math.nan
return 1.0 / self._recpr_gradient_diversity
@property
def orthogonality(self) -> float:
if self._recpr_orthogonality == 0:
return math.nan
return 1.0 / self._recpr_orthogonality
@property
def delta_norm_sq(self) -> float:
return self._norm_of_sum
@property
def sum_client_delta_norm_sq(self) -> float:
return self._sum_of_norms
@property
def sum_client_delta_mutual_angles(self) -> float:
return self._norm_of_sum - self._sum_of_norms
@property
def metric_value(self) -> float:
if self._diversity_metric_type == DiversityMetricType.orthogonality:
return self.orthogonality
elif self._diversity_metric_type == DiversityMetricType.delta_norm_sq:
return self.delta_norm_sq
elif (
self._diversity_metric_type == DiversityMetricType.sum_client_delta_norm_sq
):
return self.sum_client_delta_norm_sq
elif (
self._diversity_metric_type
== DiversityMetricType.sum_client_delta_mutual_angles
):
return self.sum_client_delta_mutual_angles
return self.gradient_diversity
@property
def diversity_metric_type(self) -> DiversityMetricType:
return self._diversity_metric_type
@diversity_metric_type.setter
def diversity_metric_type(self, diversity_metric_type: DiversityMetricType):
if not isinstance(self.diversity_metric_type, DiversityMetricType):
raise ValueError("diversity_metric_type must be of DiversityMetricType")
self._diversity_metric_type = diversity_metric_type
# Metrics must not be 'None' to be equal. If LHS has a value and RHS is
# 'None', LHS is greater. Since quantities considered are reciprocals, the
# inequalities below are reversed from what one would expect.
def __eq__(self, other):
v1, v2 = self._get_metrics_of_interest(other)
return math.isclose(v1, v2, rel_tol=1e-06)
def __ne__(self, other):
v1, v2 = self._get_metrics_of_interest(other)
return v1 != v2
def __gt__(self, other):
v1, v2 = self._get_metrics_of_interest(other)
return v1 < v2
def __lt__(self, other):
v1, v2 = self._get_metrics_of_interest(other)
return v1 > v2
def __ge__(self, other):
v1, v2 = self._get_metrics_of_interest(other)
return v1 <= v2
def __le__(self, other):
v1, v2 = self._get_metrics_of_interest(other)
return v1 >= v2
def __repr__(self):
return "%5.5f" % self.metric_value
def _get_metrics_of_interest(self, other) -> Tuple[float, float]:
# For comparison, the two objects must have the same metric of interest
assert self.diversity_metric_type == other.diversity_metric_type
v1 = self._recpr_gradient_diversity
v2 = other._recpr_gradient_diversity
if self.diversity_metric_type == DiversityMetricType.orthogonality:
v1 = self._recpr_orthogonality
v2 = other._recpr_orthogonality
elif self.diversity_metric_type == DiversityMetricType.delta_norm_sq:
# Use negatives for comparison, since internal comparators are flipped
v1 = -self.delta_norm_sq
v2 = -other.delta_norm_sq
elif self.diversity_metric_type == DiversityMetricType.sum_client_delta_norm_sq:
# Use negatives for comparison, since internal comparators are flipped
v1 = -self.sum_client_delta_norm_sq
v2 = -other.sum_client_delta_norm_sq
elif (
self.diversity_metric_type
== DiversityMetricType.sum_client_delta_mutual_angles
):
# Use negatives for comparison, since internal comparators are flipped
v1 = -self.sum_client_delta_mutual_angles
v2 = -other.sum_client_delta_mutual_angles
return v1, v2
class DiversityStatistics:
def __init__(self, diversity_metrics_list: List[DiversityMetrics]):
if len(diversity_metrics_list) == 0:
raise ValueError("diversity_metrics_list must not be empty")
metrics = [
cohort_metric.metric_value for cohort_metric in diversity_metrics_list
]
self.maximum_metric = max(metrics)
self.average_metric = sum(metrics) / len(metrics)
self.minimum_metric = min(metrics)
def __eq__(self, other):
close_max = math.isclose(
self.maximum_metric, other.maximum_metric, rel_tol=1e-04
)
close_avg = math.isclose(
self.average_metric, other.average_metric, rel_tol=1e-04
)
close_min = math.isclose(
self.minimum_metric, other.minimum_metric, rel_tol=1e-04
)
return close_max and close_avg and close_min
def __repr__(self):
return "%5.5f,%5.5f,%5.5f" % (
self.maximum_metric,
self.average_metric,
self.minimum_metric,
)
| canife-main | FLSim/flsim/common/diversity_metrics.py |
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
"""Contains helpers for Python PyTest tests.
There is a list of helper utilities missing in PyTest.
We define some of the helpful utilities in this one file
so that all FL-sim PyTests can reuse them easily.
"""
from typing import Any, List, Sized
def assertIsInstance(o: object, t: type) -> None:
assert isinstance(o, t)
def assertAlmostEqual(first, second, places=None, msg=None, delta=None) -> None:
"""Assert that ``first`` and ``second`` are almost equal to each other.
The equality of ``first`` and ``second`` is determined in a similar way to
the ``assertAlmostEqual`` function of the standard library.
"""
if delta is not None and places is not None:
raise TypeError("specify delta or places - not both")
# pyre-fixme[35]: Target cannot be annotated.
msg: str = ""
diff = abs(second - first)
if delta is not None:
if diff <= delta:
return
msg = (
f"The difference between {first} and {second} is not within {delta} delta."
)
else:
if places is None:
places = 7
if round(diff, places) == 0:
return
msg = f"The difference between {first} and {second} is not within {places} decimal places."
raise AssertionError(msg)
def assertEqual(object1: object, object2: object, e: object = None) -> None:
assert object1 == object2, e
def assertNotEqual(object1: object, object2: object, e: object = None) -> None:
assert object1 != object2, e
def assertLess(object1, object2) -> None:
assert object1 < object2
def assertGreater(object1, object2) -> None:
assert object1 > object2
def assertLessEqual(object1, object2) -> None:
assert object1 <= object2
def assertGreaterEqual(object1, object2) -> None:
assert object1 >= object2
def assertIsNotNone(o: object) -> None:
assert o is not None
def assertTrue(o: object, e: object = None) -> None:
assert o, e
def assertFalse(o: object, e: object = None) -> None:
assert not o, e
def assertEmpty(o: Sized, msg: object = None) -> None:
assert len(o) == 0, msg
def assertNotEmpty(o: Sized, msg: object = None) -> None:
assert len(o) > 0, msg
def assertListEqual(l1: List, l2: List) -> None:
assert l1 == l2
class assertRaises(object):
def __init__(self, expected_exc: type) -> None:
self.expected_exc = expected_exc
def __enter__(self) -> "assertRaises":
return self
def __exit__(self, exc_type: type, exc_value: Exception, traceback: Any) -> bool:
if exc_type is None:
raise AssertionError(f"{self.expected_exc} was not raised")
return isinstance(exc_value, self.expected_exc)
| canife-main | FLSim/flsim/common/pytest_helper.py |
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
from __future__ import annotations
import abc
from dataclasses import dataclass
from typing import List
from flsim.utils.config_utils import fullclassname, init_self_cfg
from flsim.utils.timing.training_duration_distribution import (
PerExampleGaussianDurationDistributionConfig,
)
from hydra.utils import instantiate
from omegaconf import MISSING
class TimeOutSimulator(abc.ABC):
def __init__(self, **kwargs):
init_self_cfg(
self,
component_class=__class__,
config_class=TimeOutSimulatorConfig,
**kwargs,
)
@classmethod
def _set_defaults_in_cfg(cls, cfg):
pass
@abc.abstractmethod
def simulate_per_example_training_time(self) -> float:
"""
return avg unit of time required to process an example
"""
pass
@abc.abstractmethod
def simulate_training_time(self, device_perf: float, num_samples: int) -> float:
"""
return training time on one client
"""
pass
@abc.abstractmethod
def track_training_time_distribution(self, one_user_training_time: float) -> None:
"""
update the empirical training time statistics
"""
pass
@abc.abstractmethod
def track_fl_elapsed_time(self, training_time_in_round: List[float]) -> None:
"""
track total unit of time taken by FL so far given list of user training time
in the round
"""
pass
@abc.abstractmethod
def user_timeout(self, training_time: float) -> bool:
"""
time out for one user
"""
pass
@abc.abstractmethod
def stop_fl(self) -> bool:
"""
stopping condition for entire FL training
"""
pass
@property
@abc.abstractmethod
def sample_mean_per_user(self) -> float:
"""
empirical mean of training time per user
"""
pass
@property
@abc.abstractmethod
def sample_var_per_user(self) -> float:
"""
empirical variance of training time per user
"""
pass
class NeverTimeOutSimulator(TimeOutSimulator):
def __init__(self, **kwargs):
init_self_cfg(
self,
component_class=__class__,
config_class=NeverTimeOutSimulatorConfig,
**kwargs,
)
super().__init__(**kwargs)
self._sample_mean_per_user = 0.0
self._sample_var_per_user = 0.0
@classmethod
def _set_defaults_in_cfg(cls, cfg):
pass
def simulate_per_example_training_time(self) -> float:
"""
training finish instantaneously
"""
return 0.0
def simulate_training_time(self, device_perf: float, num_samples: int) -> float:
"""
training finish instantaneously
"""
return 0.0
def track_training_time_distribution(self, one_user_training_time: float) -> None:
pass
def user_timeout(self, training_time: float) -> bool:
return False
@property
def sample_mean_per_user(self) -> float:
return 0.0
@property
def sample_var_per_user(self) -> float:
return 0.0
def track_fl_elapsed_time(self, training_time_in_round: List[float]) -> None:
pass
def stop_fl(self) -> bool:
return False
class GaussianTimeOutSimulator(TimeOutSimulator):
def __init__(self, **kwargs):
init_self_cfg(
self,
component_class=__class__,
config_class=GaussianTimeOutSimulatorConfig,
**kwargs,
)
super().__init__(**kwargs)
self.duration_distribution_generator = instantiate(
self.cfg.duration_distribution_generator
)
self._num_users_tracked = 0
self._num_users_succeed = 0
self._sample_mean_per_user = 0.0
self._sample_sec_moment_per_user = 0.0
self._sample_var_per_user = 0.0
self._fl_stopping_time = self.cfg.fl_stopping_time
self._fl_total_elapse_time = 0.0
@classmethod
def _set_defaults_in_cfg(cls, cfg):
pass
@property
def fl_stopping_time(self):
return self._fl_stopping_time
@property
def sample_mean_per_user(self):
return self._sample_mean_per_user
@property
def sample_var_per_user(self):
return self._sample_var_per_user
def simulate_per_example_training_time(self) -> float:
"""
return an gaussian R.V. representing # unit of time per example
"""
return self.duration_distribution_generator.bounded_gaussian_sample()
def simulate_training_time(self, device_perf: float, num_samples: int) -> float:
"""
return training time on one client
"""
# pyre-fixme[16]: `GaussianTimeOutSimulator` has no attribute `cfg`.
return min([device_perf * num_samples, self.cfg.timeout_wall_per_round])
def track_training_time_distribution(self, one_user_training_time: float) -> None:
self._num_users_tracked += 1
# update sample mean
self._sample_mean_per_user = (
self._sample_mean_per_user * (self._num_users_tracked - 1)
+ one_user_training_time
) / self._num_users_tracked
# update sample second moment
self._sample_sec_moment_per_user = (
self._sample_sec_moment_per_user * (self._num_users_tracked - 1)
+ one_user_training_time**2
) / self._num_users_tracked
# update sample variance, with degree of freedom correction
if self._num_users_tracked > 1:
self._sample_var_per_user = (
self._num_users_tracked / (self._num_users_tracked - 1)
) * (self._sample_sec_moment_per_user - self._sample_mean_per_user**2)
def user_timeout(self, training_time: float) -> bool:
"""
training time out for one user
"""
# pyre-fixme[16]: `GaussianTimeOutSimulator` has no attribute `cfg`.
return training_time >= self.cfg.timeout_wall_per_round
def stop_fl(self) -> bool:
"""
stop entire FL training when total budget exceeds
"""
return self._fl_total_elapse_time >= self._fl_stopping_time
def track_fl_elapsed_time(self, training_time_in_round: List[float]) -> None:
self._fl_total_elapse_time += min(
# pyre-fixme[16]: `GaussianTimeOutSimulator` has no attribute `cfg`.
[self.cfg.timeout_wall_per_round, max(training_time_in_round)]
)
@dataclass
class TimeOutSimulatorConfig:
_target_: str = MISSING
_recursive_: bool = False
@dataclass
class NeverTimeOutSimulatorConfig(TimeOutSimulatorConfig):
_target_: str = fullclassname(NeverTimeOutSimulator)
@dataclass
class GaussianTimeOutSimulatorConfig(TimeOutSimulatorConfig):
_target_: str = fullclassname(GaussianTimeOutSimulator)
timeout_wall_per_round: float = 1.0
fl_stopping_time: float = 1.0
duration_distribution_generator: PerExampleGaussianDurationDistributionConfig = (
PerExampleGaussianDurationDistributionConfig()
)
| canife-main | FLSim/flsim/common/timeout_simulator.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import copy
import math
import random
from flsim.common.diversity_metrics import (
DiversityMetrics,
DiversityMetricType,
DiversityStatistics,
)
from flsim.common.pytest_helper import assertEqual, assertTrue
class TestDiversityStatistics:
def test_initialize_diversity_statistics(self) -> None:
n_test_cases = 1000
num_cohorts_per_test = 10
std_dev = 500.0
num_tol = 1e-06
for _ in range(n_test_cases):
cohort_diversity_metrics = []
cohort_gradient_diversities = []
cohort_orthogonalities = []
for _ in range(num_cohorts_per_test):
norm_of_sum = abs(random.normalvariate(0, std_dev))
sum_of_norms = abs(random.normalvariate(0, std_dev))
diversity_metrics = DiversityMetrics(norm_of_sum, sum_of_norms)
cohort_diversity_metrics.append(diversity_metrics)
cohort_gradient_diversities.append(diversity_metrics.gradient_diversity)
cohort_orthogonalities.append(diversity_metrics.orthogonality)
diversity_statistics = DiversityStatistics(cohort_diversity_metrics)
diversity_statistics_eq = DiversityStatistics(cohort_diversity_metrics)
assertEqual(diversity_statistics, diversity_statistics_eq)
assertTrue(
math.isclose(
diversity_statistics.maximum_metric,
max(cohort_gradient_diversities),
rel_tol=num_tol,
)
)
assertTrue(
math.isclose(
diversity_statistics.minimum_metric,
min(cohort_gradient_diversities),
rel_tol=num_tol,
)
)
assertTrue(
math.isclose(
diversity_statistics.average_metric,
sum(cohort_gradient_diversities) / len(cohort_gradient_diversities),
rel_tol=num_tol,
)
)
for metric in cohort_diversity_metrics:
metric.diversity_metric_type = DiversityMetricType.orthogonality
diversity_statistics = DiversityStatistics(cohort_diversity_metrics)
assertTrue(
math.isclose(
diversity_statistics.maximum_metric,
max(cohort_orthogonalities),
rel_tol=num_tol,
)
)
assertTrue(
math.isclose(
diversity_statistics.minimum_metric,
min(cohort_orthogonalities),
rel_tol=num_tol,
)
)
assertTrue(
math.isclose(
diversity_statistics.average_metric,
sum(cohort_orthogonalities) / len(cohort_orthogonalities),
rel_tol=num_tol,
)
)
class TestDiversityMetrics:
def test_initialize_diversity_metrics(self) -> None:
diversity_metrics = DiversityMetrics(norm_of_sum=1.0, sum_of_norms=2.0)
assertTrue(isinstance(diversity_metrics, DiversityMetrics))
assertEqual(
diversity_metrics.diversity_metric_type,
DiversityMetricType.gradient_diversity,
)
# Test all comparators for both metrics of interest
diversity_metrics_incr = DiversityMetrics(norm_of_sum=1.0, sum_of_norms=3.0)
assertTrue(diversity_metrics_incr > diversity_metrics)
assertTrue(diversity_metrics_incr >= diversity_metrics)
assertTrue(diversity_metrics < diversity_metrics_incr)
assertTrue(diversity_metrics <= diversity_metrics_incr)
assertTrue(diversity_metrics != diversity_metrics_incr)
assertTrue(not (diversity_metrics == diversity_metrics_incr))
diversity_metrics.diversity_metric_type = DiversityMetricType.orthogonality
diversity_metrics_incr.diversity_metric_type = DiversityMetricType.orthogonality
assertTrue(diversity_metrics_incr > diversity_metrics)
assertTrue(diversity_metrics_incr >= diversity_metrics)
assertTrue(diversity_metrics < diversity_metrics_incr)
assertTrue(diversity_metrics <= diversity_metrics_incr)
assertTrue(diversity_metrics != diversity_metrics_incr)
assertTrue(not (diversity_metrics == diversity_metrics_incr))
n_test_cases = 1000
std_dev = 500.0
numerical_tol = 1e-04
for _ in range(n_test_cases):
norm_of_sum = abs(random.normalvariate(0, std_dev))
sum_of_norms = abs(random.normalvariate(0, std_dev))
diversity_metrics = DiversityMetrics(norm_of_sum, sum_of_norms)
# Check bounds on and relationship between the two stored quantities
assertTrue(diversity_metrics._recpr_gradient_diversity >= 0.0)
assertTrue(diversity_metrics._recpr_orthogonality >= -1.0)
assertTrue(
diversity_metrics.orthogonality < 0
or diversity_metrics.gradient_diversity
<= diversity_metrics.orthogonality
)
assertTrue(
math.isclose(
diversity_metrics._recpr_gradient_diversity
- diversity_metrics._recpr_orthogonality,
1.0,
rel_tol=numerical_tol,
)
)
diversity_metrics_cpy = copy.deepcopy(diversity_metrics)
diversity_metrics_eq = DiversityMetrics(norm_of_sum, sum_of_norms)
assertEqual(diversity_metrics_cpy, diversity_metrics)
assertEqual(diversity_metrics_eq, diversity_metrics)
# Increasing sum_of_norms will increase both gradient diversity and orthogonality.
# Increasing norm_of_sum will decrease them both.
diversity_metrics_incr = DiversityMetrics(norm_of_sum, sum_of_norms * 1.01)
diversity_metrics_decr = DiversityMetrics(norm_of_sum * 1.01, sum_of_norms)
assertTrue(diversity_metrics_incr > diversity_metrics)
assertTrue(diversity_metrics_decr < diversity_metrics)
diversity_metrics.diversity_metric_type = DiversityMetricType.orthogonality
diversity_metrics_incr.diversity_metric_type = (
DiversityMetricType.orthogonality
)
diversity_metrics_decr.diversity_metric_type = (
DiversityMetricType.orthogonality
)
assertTrue(diversity_metrics_incr > diversity_metrics)
assertTrue(diversity_metrics_decr < diversity_metrics)
diversity_metrics.diversity_metric_type = (
DiversityMetricType.gradient_diversity
)
# Check that metric_value returns the value of the property of interest
diversity_metrics.diversity_metric_type = DiversityMetricType.orthogonality
assertTrue(
math.isclose(
diversity_metrics.metric_value, diversity_metrics.orthogonality
)
)
diversity_metrics.diversity_metric_type = (
DiversityMetricType.gradient_diversity
)
assertTrue(
math.isclose(
diversity_metrics.metric_value, diversity_metrics.gradient_diversity
)
)
# Check the special case when norm_of_sum = sum_of_norms
diversity_metrics_eq = DiversityMetrics(sum_of_norms, sum_of_norms)
assertTrue(
math.isclose(
diversity_metrics_eq._recpr_gradient_diversity,
1.0,
rel_tol=numerical_tol,
)
)
assertTrue(
math.isclose(
diversity_metrics_eq._recpr_orthogonality,
0.0,
rel_tol=numerical_tol,
)
)
assertTrue(
math.isclose(
diversity_metrics_eq.gradient_diversity, 1.0, rel_tol=numerical_tol
)
)
assertTrue(math.isnan(diversity_metrics_eq.orthogonality))
| canife-main | FLSim/flsim/common/tests/test_diversity_metrics.py |
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
from flsim.common.pytest_helper import (
assertAlmostEqual,
assertEqual,
assertFalse,
assertTrue,
)
from flsim.common.timeline import Timeline
class TestTimeline:
def test_global_round_num(self) -> None:
# test first round
tl = Timeline(epoch=1, round=1)
assertEqual(tl.global_round_num(), 1)
# test first round does not depend on rounds_per_epoch
tl = Timeline(epoch=1, round=1, rounds_per_epoch=100)
assertEqual(tl.global_round_num(), 1)
# test global_round_num properly calculated if global_round not given
tl = Timeline(epoch=2, round=3, rounds_per_epoch=4)
assertEqual(tl.global_round_num(), 7)
# test global_round_num equals global_round regardless of epoch/round
tl = Timeline(epoch=1, round=1, rounds_per_epoch=1, global_round=123)
assertEqual(tl.global_round_num(), tl.global_round)
def test_as_float(self) -> None:
# test first round
tl = Timeline(epoch=1, round=1)
assertAlmostEqual(tl.as_float(), 1.0) # float starts from 1
# test first round
tl = Timeline(epoch=1, round=1, rounds_per_epoch=100)
assertAlmostEqual(tl.as_float(), 1 / 100)
# first round of 10th epoch
tl = Timeline(epoch=10, round=1)
assertAlmostEqual(tl.as_float(), 10.0)
# test first round does not depend on rounds_per_epoch
tl = Timeline(epoch=10, round=1, rounds_per_epoch=100)
assertAlmostEqual(tl.as_float(), 9 + (1 / 100))
# test offset
tl = Timeline(epoch=2, round=1, rounds_per_epoch=100)
assertAlmostEqual(tl.as_float(-1), 1.0)
# test from global_round
tl = Timeline(global_round=12, rounds_per_epoch=10)
assertAlmostEqual(tl.as_float(), 1.2)
assertAlmostEqual(tl.as_float(1), 1.3)
# test global_round dominates
tl = Timeline(global_round=12, rounds_per_epoch=10, epoch=3, round=4)
assertAlmostEqual(tl.as_float(), 1.2)
# test random
tl = Timeline(epoch=5, round=2, rounds_per_epoch=3)
assertAlmostEqual(tl.as_float(), 4.66, delta=0.01)
def test_string(self) -> None:
tl = Timeline(epoch=2, round=2, rounds_per_epoch=10)
assertEqual(f"{tl}", "(epoch = 2, round = 2, global round = 12)")
tl = Timeline(global_round=12, rounds_per_epoch=10)
assertEqual(f"{tl}", "(epoch = 2, round = 2, global round = 12)")
tl = Timeline(global_round=10, rounds_per_epoch=10)
assertEqual(f"{tl}", "(epoch = 1, round = 10, global round = 10)")
def test_tick_simple(self) -> None:
# test every epoch, i.e. tick_interval = 1
tl = Timeline(epoch=1, round=1, rounds_per_epoch=10)
assertFalse(tl.tick(tick_interval=1))
tl = Timeline(epoch=1, round=10, rounds_per_epoch=10)
assertTrue(tl.tick(tick_interval=1))
tl = Timeline(epoch=2, round=1, rounds_per_epoch=10)
assertFalse(tl.tick(tick_interval=1))
tl = Timeline(epoch=2, round=10, rounds_per_epoch=10)
assertTrue(tl.tick(tick_interval=1))
# test every 2 epoch
tl = Timeline(epoch=1, round=1, rounds_per_epoch=10)
assertFalse(tl.tick(tick_interval=2))
tl = Timeline(epoch=1, round=10, rounds_per_epoch=10)
assertFalse(tl.tick(tick_interval=2))
tl = Timeline(epoch=2, round=1, rounds_per_epoch=10)
assertFalse(tl.tick(tick_interval=2))
tl = Timeline(epoch=2, round=10, rounds_per_epoch=10)
assertTrue(tl.tick(tick_interval=2))
# test every 0.2 epoch
tl = Timeline(epoch=1, round=1, rounds_per_epoch=10)
assertFalse(tl.tick(tick_interval=0.2))
tl = Timeline(epoch=1, round=2, rounds_per_epoch=10)
assertTrue(tl.tick(tick_interval=0.2))
tl = Timeline(epoch=2, round=1, rounds_per_epoch=10)
assertFalse(tl.tick(tick_interval=0.2))
tl = Timeline(epoch=1, round=2, rounds_per_epoch=10)
assertTrue(tl.tick(tick_interval=0.2))
tl = Timeline(epoch=1, round=3, rounds_per_epoch=10)
assertFalse(tl.tick(tick_interval=0.2))
tl = Timeline(epoch=1, round=4, rounds_per_epoch=10)
assertTrue(tl.tick(tick_interval=0.2))
# test with global round
tl = Timeline(global_round=4, rounds_per_epoch=2)
assertFalse(tl.tick(tick_interval=3))
assertTrue(tl.tick(tick_interval=2))
def test_tick_complex(self) -> None:
# doing 100 epochs, 10 rounds per epoch, ticking interval = 10 epochs
sum = 0
for e in range(1, 101):
for r in range(1, 11):
tl = Timeline(epoch=e, round=r, rounds_per_epoch=10)
sum += tl.tick(10)
assertEqual(sum, 10)
# doing 100 epochs, 10 rounds per epoch, ticking interval = 0.9 epochs
sum = 0
for e in range(1, 101):
for r in range(1, 11):
tl = Timeline(epoch=e, round=r, rounds_per_epoch=10)
sum += tl.tick(0.9)
assertEqual(sum, 111)
| canife-main | FLSim/flsim/common/tests/test_round_timeline.py |
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
| canife-main | FLSim/flsim/common/tests/__init__.py |
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import numpy as np
import torch
from flsim.common.pytest_helper import (
assertAlmostEqual,
assertEqual,
assertIsInstance,
assertTrue,
)
from flsim.common.timeout_simulator import (
GaussianTimeOutSimulator,
GaussianTimeOutSimulatorConfig,
)
from flsim.utils.timing.training_duration_distribution import (
PerExampleGaussianDurationDistributionConfig,
)
from omegaconf import OmegaConf
class TestTrainingTimeOutSimulator:
def test_online_stat_computation_correct(self) -> None:
timeout_simulator = GaussianTimeOutSimulator(
**OmegaConf.structured(
GaussianTimeOutSimulatorConfig(
timeout_wall_per_round=3.0,
fl_stopping_time=99999.0,
duration_distribution_generator=PerExampleGaussianDurationDistributionConfig(
training_duration_mean=1.0, training_duration_sd=1.0
),
),
)
)
assertTrue(isinstance(timeout_simulator, GaussianTimeOutSimulator))
num_users = 1000
max_sample_per_user = 20
num_samples_per_user = [
(user % max_sample_per_user) + 1 for user in range(num_users)
]
training_time_all_users = []
for i in range(num_users):
sim_device_perf = timeout_simulator.simulate_per_example_training_time()
sim_train_time = timeout_simulator.simulate_training_time(
sim_device_perf, num_samples_per_user[i]
)
timeout_simulator.track_training_time_distribution(sim_train_time)
training_time_all_users.append(sim_train_time)
# using np.allclose to compare floats
assertTrue(
np.allclose(
[timeout_simulator.sample_mean_per_user],
[np.mean(training_time_all_users)],
)
)
assertTrue(
np.allclose(
[timeout_simulator.sample_var_per_user],
[np.var(training_time_all_users, ddof=1)],
)
)
def test_fl_stops_small_stopping_time(self) -> None:
"""
create a dummy "training loop" (loop through users and rounds
without actual training) and stops the training loop by explicitly
comparing against fl_stopping_time, versus stopping the training
loop via internally tracked variables in timeout_simulator and
timeout_simulator.stop_fl() method.
"""
timeout_simulator = GaussianTimeOutSimulator(
**OmegaConf.structured(
GaussianTimeOutSimulatorConfig(
timeout_wall_per_round=7.0,
fl_stopping_time=10.0,
duration_distribution_generator=PerExampleGaussianDurationDistributionConfig(
training_duration_mean=5.0, training_duration_sd=1.0
),
),
)
)
assertIsInstance(timeout_simulator, GaussianTimeOutSimulator)
num_users = 1000
max_sample_per_user = 20
num_rounds = 10
num_users_per_round = int(num_users / num_rounds)
num_samples_per_user = [
(user % max_sample_per_user) + 1 for user in range(num_users)
]
elapsed_time_each_round = []
torch.manual_seed(1)
# ground-truth: manually calling private functions in timeout_simulator and
# record all elapsed time into list for book-keeping
for r in range(num_rounds):
training_time_users_in_round = []
for i in range(num_users_per_round):
sim_device_perf = timeout_simulator.simulate_per_example_training_time()
sim_train_time = timeout_simulator.simulate_training_time(
sim_device_perf, num_samples_per_user[r * num_users_per_round + i]
)
training_time_users_in_round.append(sim_train_time)
elapsed_time_each_round.append(max(training_time_users_in_round))
if sum(elapsed_time_each_round) >= timeout_simulator._fl_stopping_time:
fl_stopping_round_ground_truth = r
break
torch.manual_seed(1)
# using timeout_simulator.track_fl_elapsed_time and timeout_simulator.stop_fl()
# to determine stopping condition
for r in range(num_rounds):
training_time_users_in_round = []
for i in range(num_users_per_round):
sim_device_perf = timeout_simulator.simulate_per_example_training_time()
sim_train_time = timeout_simulator.simulate_training_time(
sim_device_perf, num_samples_per_user[r * num_users_per_round + i]
)
training_time_users_in_round.append(sim_train_time)
timeout_simulator.track_fl_elapsed_time(training_time_users_in_round)
if timeout_simulator.stop_fl():
fl_stopping_round_simulator = r
break
assertAlmostEqual(
sum(elapsed_time_each_round),
timeout_simulator._fl_total_elapse_time,
delta=1e-6,
)
# pyre-fixme[61]: `fl_stopping_round_ground_truth` is undefined, or not
# always defined.
# pyre-fixme[61]: `fl_stopping_round_simulator` is undefined, or not always
# defined.
assertEqual(fl_stopping_round_ground_truth, fl_stopping_round_simulator)
def test_fl_stops_small_stopping_time_2(self) -> None:
r"""
training time should not be negative
"""
timeout_simulator = GaussianTimeOutSimulator(
**OmegaConf.structured(
GaussianTimeOutSimulatorConfig(
timeout_wall_per_round=9999,
fl_stopping_time=99999,
duration_distribution_generator=PerExampleGaussianDurationDistributionConfig(
training_duration_mean=1.0,
training_duration_sd=10.0,
training_duration_min=0.00001,
),
),
)
)
for _ in range(1000):
assert timeout_simulator.simulate_per_example_training_time() > 0.0
| canife-main | FLSim/flsim/common/tests/test_timeout_simulator.py |
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import torch
from flsim.clients.base_client import ClientConfig
from flsim.common.fine_tuner import FineTuner
from flsim.common.pytest_helper import assertEmpty, assertEqual
from flsim.common.timeline import Timeline
from flsim.interfaces.metrics_reporter import TrainingStage
from flsim.optimizers.local_optimizers import LocalOptimizerSGDConfig
from flsim.utils import test_utils as utils
from flsim.utils.cuda import DEFAULT_CUDA_MANAGER
class TestFineTuner:
def _fake_data(self, num_batches, batch_size):
dataset = [torch.ones(batch_size, 2) for _ in range(num_batches)]
dataset = utils.DatasetFromList(dataset)
return utils.DummyUserData(dataset, utils.SampleNet(utils.Linear()))
def _get_model(self, value):
model = utils.SampleNet(utils.Linear())
model.fl_get_module().fill_all(value)
return model
def test_fine_tune_model(self) -> None:
"""
Global model starts at 1, grad = 1, then model after
1 batch is going be 0
"""
metrics_reporter = utils.SimpleMetricReporter()
global_model = self._get_model(1)
data = self._fake_data(num_batches=1, batch_size=1)
client_config = ClientConfig(optimizer=LocalOptimizerSGDConfig(lr=1.0))
fine_tuned_model = FineTuner.fine_tune_model(
global_model,
data,
client_config,
metrics_reporter,
# pyre-fixme[6]: Expected `CudaTransferMinimizer` for 5th param but got
# `ICudaStateManager`.
DEFAULT_CUDA_MANAGER,
epochs=1,
)
error_msg = utils.model_parameters_equal_to_value(fine_tuned_model, 0.0)
assertEmpty(error_msg, error_msg)
def test_fine_tune_and_evaluate(self) -> None:
global_model = self._get_model(1)
num_clients = 10
num_batches = 10
batch_size = 1
data = [
self._fake_data(num_batches=num_batches, batch_size=batch_size)
for _ in range(num_clients)
]
client_config = ClientConfig()
metrics_reporter = utils.SimpleMetricReporter()
FineTuner.fine_tune_and_evaluate(
data,
global_model,
client_config,
metrics_reporter,
# pyre-fixme[6]: Expected `CudaTransferMinimizer` for 5th param but got
# `ICudaStateManager`.
DEFAULT_CUDA_MANAGER,
TrainingStage.PERSONALIZED_TEST,
Timeline(epoch=1, round=2, global_round=2, rounds_per_epoch=10),
epochs=1,
)
assertEqual(len(metrics_reporter.batch_metrics), num_batches * num_clients)
assertEqual(
sum([p.num_examples for p in metrics_reporter.batch_metrics]),
num_clients * batch_size * num_batches,
)
| canife-main | FLSim/flsim/common/tests/test_fine_tuner.py |
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
from typing import List
import pytest
from flsim.clients.async_client import AsyncClientDevice
from flsim.clients.base_client import Client, ClientConfig
from flsim.common.pytest_helper import assertEqual
from flsim.common.training_event_handler import TestAsyncTrainingEventHandler
from flsim.data.data_provider import FLDataProviderFromList, FLUserDataFromList
from flsim.utils.async_trainer.async_user_selector import AsyncUserSelectorInfo
from flsim.utils.async_trainer.device_state import TrainingSchedule
from flsim.utils.sample_model import MockFLModel
from flsim.utils.tests.helpers.test_data_utils import DummyAlphabetDataset
from omegaconf import OmegaConf
@pytest.fixture(scope="class")
def prepare_job_scheduler_util_test(request) -> None:
request.cls.shared_client_config = ClientConfig(
epochs=1,
max_clip_norm_normalized=0,
only_federated_params=True,
random_seed=1,
store_models_and_optimizers=False,
)
@pytest.mark.usefixtures("prepare_job_scheduler_util_test")
class TestJobSchedulerUtil:
def _build_data_provider(
self, num_examples, examples_per_user: int, user_batch_size: int, global_model
) -> FLDataProviderFromList:
dummy_dataset = DummyAlphabetDataset(num_examples)
data_provider, _ = DummyAlphabetDataset.create_data_provider_and_loader(
dummy_dataset, examples_per_user, user_batch_size, global_model
)
return data_provider
def _create_mock_base_client_info(self, examples_per_user, user_index):
user_data = FLUserDataFromList(
[1] * examples_per_user,
MockFLModel(num_examples_per_user=examples_per_user),
)
user_info = AsyncUserSelectorInfo(user_data=user_data, user_index=user_index)
base_client = Client(
**OmegaConf.structured(self.shared_client_config),
dataset=user_info.user_data,
name=f"client_{user_info.user_index}",
)
return user_info, base_client
def test_test_async_job_scheduler(self) -> None:
num_users = 100
model_trainer = TestAsyncTrainingEventHandler()
assertEqual(len(model_trainer.num_unseen_global_model_updates), 0)
assertEqual(len(model_trainer.training_events), 0)
for user_index in range(num_users):
training_schedule = TrainingSchedule(
creation_time=0,
start_time=user_index,
end_time=num_users + (num_users - user_index - 1),
)
user_info, base_client = self._create_mock_base_client_info(
examples_per_user=1, user_index=user_index
)
client = AsyncClientDevice(
training_schedule=training_schedule,
client=base_client,
user_info=user_info,
)
model_trainer.on_training_start(client)
assertEqual(client.model_seqnum, model_trainer.current_seqnum)
assertEqual(client.model_seqnum, user_index)
model_trainer.on_training_end(client)
# each user generates two events: one TrainingFinished, and one TrainingStart
assertEqual(len(model_trainer.training_events), 2 * num_users)
# training proceeds sequentially, so num_unseen_global_model_updates should
# be a list of zeroes
assertEqual(model_trainer.num_unseen_global_model_updates, [0] * num_users)
def test_staleness_compute_in_job_scheduler(self) -> None:
# train in LIFO order: the first user to start training is the last
# one to finish training
# in this case, assume n users, observed staleness should be:
# 0 (user n), 1 (user n-1), 2 (user n-2)... n (user 1)
# timeline:
# USERS START TRAINING
# t=0, user 0 starts training
# t=1, user 1 starts training
# ...
# t=n-1, user n-1 starts training
# USERS END TRAINING
# t=n, user n-1 ends training
# t=n+1, user n-2 ends training
# ...
# t=n+(n-1), user 0 ends training
num_users = 100
model_trainer = TestAsyncTrainingEventHandler()
clients: List[AsyncClientDevice] = []
for user_index in range(num_users):
# user i starts training at time i, ends training at time n + (n-i-1)
training_schedule = TrainingSchedule(
creation_time=0,
start_time=user_index,
end_time=num_users + (num_users - user_index - 1),
)
user_info, base_client = self._create_mock_base_client_info(
examples_per_user=1, user_index=user_index
)
client = AsyncClientDevice(
training_schedule=training_schedule,
client=base_client,
user_info=user_info,
)
model_trainer.on_training_start(client)
clients.append(client)
while len(clients):
client = clients.pop()
model_trainer.on_training_end(client)
# observed staleness should be [0, 1, 2, ....num_users-1]
assertEqual(
model_trainer.num_unseen_global_model_updates, list(range(num_users))
)
| canife-main | FLSim/flsim/common/tests/test_training_event_handler.py |
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import math
from typing import List, Tuple, Type
import numpy as np
import pytest
import torch
from flsim.clients.base_client import ClientConfig
from flsim.common.pytest_helper import (
assertAlmostEqual,
assertEqual,
assertGreaterEqual,
assertLessEqual,
assertTrue,
)
from flsim.common.training_event_handler import TestAsyncTrainingEventHandler
from flsim.common.training_simulator import AsyncTrainingSimulator
from flsim.data.data_provider import FLDataProviderFromList, IFLDataProvider
from flsim.utils.async_trainer.async_user_selector import (
AsyncUserSelector,
RandomAsyncUserSelector,
RoundRobinAsyncUserSelector,
)
from flsim.utils.async_trainer.device_state import DeviceState, TrainingState
from flsim.utils.async_trainer.training_event_generator import (
AsyncTrainingEventGenerator,
AsyncTrainingEventGeneratorConfig,
AsyncTrainingEventGeneratorFromList,
AsyncTrainingEventGeneratorFromListConfig,
ConstantAsyncTrainingStartTimeDistrConfig,
EventTimingInfo,
PoissonAsyncTrainingStartTimeDistrConfig,
)
from flsim.utils.sample_model import MockFLModel
from flsim.utils.test_utils import check_inherit_logging_level
from flsim.utils.tests.helpers.test_training_simulator_utils import ( # noqa: B950 line is too long
ConstantEventGenTestConfig,
ConstantEventGenTestConfigPerUserGaussian,
PoissonEventGenTestConfig,
PoissonEventGenTestConfigPerUserGaussian,
)
from flsim.utils.timing.training_duration_distribution import (
DurationDistributionConfig,
PerExampleGaussianDurationDistributionConfig,
PerUserExponentialDurationDistributionConfig,
PerUserGaussianDurationDistributionConfig,
PerUserHalfNormalDurationDistributionConfig,
PerUserUniformDurationDistributionConfig,
)
from omegaconf import OmegaConf
class Globals:
DEVICE_STATE_IDX = 0
TRAINING_STATE_IDX = 1
@pytest.fixture(scope="class")
def prepare_training_simulator_test_utils(request) -> None:
torch.random.manual_seed(0)
request.cls.shared_client_config = ClientConfig(
epochs=1,
max_clip_norm_normalized=0,
only_federated_params=True,
random_seed=1,
store_models_and_optimizers=False,
)
@pytest.mark.usefixtures("prepare_training_simulator_test_utils")
class TestTrainingSimulatorUtils:
def random_user_selector(self, data_provider: IFLDataProvider) -> AsyncUserSelector:
return RandomAsyncUserSelector(data_provider=data_provider)
def round_robin_user_selector(
self, data_provider: IFLDataProvider
) -> AsyncUserSelector:
return RoundRobinAsyncUserSelector(data_provider=data_provider)
def _create_data_provider(self, num_users: int, examples_per_user: int):
# one_user_data has 1 batch of len = examples_per_user
one_user_data = [list(range(examples_per_user))]
data = [one_user_data] * num_users
return FLDataProviderFromList(
train_user_list=data,
eval_user_list=data,
test_user_list=data,
model=MockFLModel(num_examples_per_user=examples_per_user),
)
def _verify_event(
self,
training_event: Tuple[DeviceState, TrainingState],
exp_start_time: int,
exp_end_time: int,
exp_training_state: TrainingState,
) -> None:
assertEqual(
# pyre-fixme[16]: `TrainingState` has no attribute `training_start_time`.
training_event[Globals.DEVICE_STATE_IDX].training_schedule.start_time,
exp_start_time,
)
assertEqual(
# pyre-fixme[16]: Item `TrainingState` of `Union[DeviceState,
# TrainingState]` has no attribute `training_schedule`.
training_event[Globals.DEVICE_STATE_IDX].training_schedule.end_time,
exp_end_time,
)
assertEqual(training_event[Globals.TRAINING_STATE_IDX], exp_training_state)
def test_training_simulator(self) -> None:
"""Check that the priority queue in the training simulator works as
expected by creating a training simulator with a known list of
event start times and event durations.
"""
event_list = [
EventTimingInfo(prev_event_start_to_current_start=1, duration=3),
EventTimingInfo(prev_event_start_to_current_start=2, duration=5),
EventTimingInfo(prev_event_start_to_current_start=2, duration=1),
EventTimingInfo(prev_event_start_to_current_start=10, duration=10),
]
start_times_gaps = [val.prev_event_start_to_current_start for val in event_list]
start_times = [
sum(start_times_gaps[0 : (x + 1)]) for x in range(0, len(start_times_gaps))
]
durations = [d.duration for d in event_list]
end_times = [t[0] + t[1] for t in zip(start_times, durations)]
distr = AsyncTrainingEventGeneratorFromList(
**OmegaConf.structured(
AsyncTrainingEventGeneratorFromListConfig(training_events=event_list)
)
)
# time-sorted event list:
# event0 starts at time 1
# event1 starts at time 3
# event0 ends at time 4
# event2 starts at time 5
# event2 ends at time 6
# event1 ends at time 8
num_users = 50
job_scheduler = TestAsyncTrainingEventHandler()
data_provider = self._create_data_provider(
num_users=num_users, examples_per_user=1
)
training_sim = AsyncTrainingSimulator(
job_scheduler=job_scheduler,
user_selector=self.random_user_selector(data_provider=data_provider),
event_generator=distr,
# pyre-fixme[16]: `TestTrainingSimulatorUtils` has no attribute
# `shared_client_config`.
shared_client_config=self.shared_client_config,
num_train_end_events_per_epoch=len(event_list) - 1,
)
training_sim.run_one_epoch()
# events are stored in increasing order of ending time
seq_index = 0
event_index = 0
# 0: event0 starts at time 1
self._verify_event(
job_scheduler.training_events[seq_index],
start_times[event_index],
end_times[event_index],
TrainingState.TRAINING,
)
# 1: event1 starts at time 3
seq_index += 1
event_index = 1
self._verify_event(
job_scheduler.training_events[seq_index],
start_times[event_index],
end_times[event_index],
TrainingState.TRAINING,
)
# 2: event0 ends at time 4
seq_index += 1
event_index = 0
self._verify_event(
job_scheduler.training_events[seq_index],
start_times[event_index],
end_times[event_index],
TrainingState.TRAINING_FINISHED,
)
# 3: event2 starts time 5
seq_index += 1
event_index = 2
self._verify_event(
job_scheduler.training_events[seq_index],
start_times[event_index],
end_times[event_index],
TrainingState.TRAINING,
)
# 4: event2 ends time 6
seq_index += 1
event_index = 2
self._verify_event(
job_scheduler.training_events[seq_index],
start_times[event_index],
end_times[event_index],
TrainingState.TRAINING_FINISHED,
)
# 5: event1 ends time 8
seq_index += 1
event_index = 1
self._verify_event(
job_scheduler.training_events[seq_index],
start_times[event_index],
end_times[event_index],
TrainingState.TRAINING_FINISHED,
)
assertEqual(job_scheduler.current_seqnum, 3)
# check stats for seqnum
# global model updates are as below: {user_seqnum, global_seqnum}
# {1, 1} event1 ends
# {2, 2} event3 ends
# {1, 3} event2 ends
# seqnum_diffs = [0, 0, 2], mean = 2/3, sd = sqrt(24/27)
assertAlmostEqual(job_scheduler.seqnum_diff_mean(), 2 / 3)
assertAlmostEqual(job_scheduler.seqnum_std(), math.sqrt(24 / 27))
def test_async_stats(self) -> None:
"""Check that the priority JobQueueStats functionality in the training
simulator works as expected by creating a training simulator with a
known list of event start times and event durations.
"""
event_list = [
EventTimingInfo(prev_event_start_to_current_start=1, duration=3),
EventTimingInfo(prev_event_start_to_current_start=2, duration=5),
EventTimingInfo(prev_event_start_to_current_start=2, duration=1),
EventTimingInfo(prev_event_start_to_current_start=10, duration=10),
]
distr = AsyncTrainingEventGeneratorFromList(
**OmegaConf.structured(
AsyncTrainingEventGeneratorFromListConfig(training_events=event_list)
)
)
# time-sorted event list:
# event1 starts at time 1
# event2_starts at time 3
# event1 ends at time 4, 2 pending jobs
# event3 starts at time 5
# event3 ends at time 6, 2 pending job
# event2 ends at time 8, 1 pending jobs
num_users = 50
data_provider = self._create_data_provider(
num_users=num_users, examples_per_user=1
)
job_scheduler = TestAsyncTrainingEventHandler()
training_sim = AsyncTrainingSimulator(
job_scheduler=job_scheduler,
user_selector=self.random_user_selector(data_provider=data_provider),
event_generator=distr,
# pyre-fixme[16]: `TestTrainingSimulatorUtils` has no attribute
# `shared_client_config`.
shared_client_config=self.shared_client_config,
num_train_end_events_per_epoch=len(event_list) - 1,
)
training_sim.run_one_epoch()
avg_pending_jobs = training_sim.avg_pending_jobs()
assertTrue(avg_pending_jobs == 5 / 3)
def test_sequential_training(self) -> None:
"""Check that in sequential training (where mean and SD of training
time is zero), jobs are truly trained sequentially: i.e, if job A
starts training, job A ends training before any other jobs start
training
"""
num_users = 50
training_start_time_distr = PoissonAsyncTrainingStartTimeDistrConfig(
training_rate=10
)
duration_distr = PerExampleGaussianDurationDistributionConfig(
training_duration_mean=0, training_duration_sd=0
)
distr = AsyncTrainingEventGenerator(
**OmegaConf.structured(
AsyncTrainingEventGeneratorConfig(
training_start_time_distribution=training_start_time_distr,
duration_distribution_generator=duration_distr,
)
)
)
job_scheduler = TestAsyncTrainingEventHandler()
data_provider = self._create_data_provider(
num_users=num_users, examples_per_user=1
)
training_sim = AsyncTrainingSimulator(
job_scheduler=job_scheduler,
user_selector=self.random_user_selector(data_provider=data_provider),
event_generator=distr,
# pyre-ignore[16]: for pytest fixture
shared_client_config=self.shared_client_config,
num_train_end_events_per_epoch=num_users,
)
training_sim.run_one_epoch()
# two 'events' are generated for each job,
# and stored in job_scheduler.training_events
# one TRAINING event and one TRAINING_FINISHED event
assertEqual(len(job_scheduler.training_events), num_users * 2)
for user_num in range(num_users):
# two events for each user
user_index = 2 * user_num
assertEqual(
job_scheduler.training_events[user_index][Globals.TRAINING_STATE_IDX],
TrainingState.TRAINING,
)
assertEqual(
job_scheduler.training_events[user_index + 1][
Globals.TRAINING_STATE_IDX
],
TrainingState.TRAINING_FINISHED,
)
# verify that both events are from the same user
assertEqual(
job_scheduler.training_events[user_index][Globals.DEVICE_STATE_IDX],
job_scheduler.training_events[user_index + 1][Globals.DEVICE_STATE_IDX],
)
def test_uniform_event_gen_stats(self) -> None:
"""Test that when using an EventGenerator with uniformly distributed
time between events, and constant duration of training events,
stats about #pending_jobs and model_seqnum_diff are correct
"""
for test_config in [
ConstantEventGenTestConfig,
ConstantEventGenTestConfigPerUserGaussian,
]:
num_users = test_config.num_users
num_examples_per_user = test_config.num_examples_per_user
training_start_time_distr = ConstantAsyncTrainingStartTimeDistrConfig(
training_rate=test_config.training_rate
)
training_duration_mean = test_config.training_duration_mean
training_duration_sd = test_config.training_duration_sd
duration_distr = test_config.training_duration_distribution_config(
training_duration_mean=training_duration_mean,
training_duration_sd=training_duration_sd,
)
distr = AsyncTrainingEventGenerator(
**OmegaConf.structured(
AsyncTrainingEventGeneratorConfig(
training_start_time_distribution=training_start_time_distr,
duration_distribution_generator=duration_distr,
)
)
)
job_scheduler = TestAsyncTrainingEventHandler()
data_provider = self._create_data_provider(
num_users=num_users, examples_per_user=num_examples_per_user
)
training_sim = AsyncTrainingSimulator(
job_scheduler=job_scheduler,
user_selector=self.random_user_selector(data_provider=data_provider),
event_generator=distr,
# pyre-ignore[16]: for pytest fixture
shared_client_config=self.shared_client_config,
num_train_end_events_per_epoch=num_users,
)
training_sim.run_one_epoch()
assertEqual(
training_sim.queue_stats.avg_pending_jobs(),
test_config.mean_pending_jobs,
)
assertEqual(job_scheduler.seqnum_diff_mean(), test_config.mean_seqnum_diff)
assertEqual(job_scheduler.seqnum_std(), test_config.sd_seqnum_diff)
def test_poisson_event_gen_stats(self) -> None:
"""Test that when using an EventGenerator with poisson distributed
time between events, and constant duration of training events,
stats about #pending_jobs and model_seqnum_diff are correct
"""
for test_config in [
PoissonEventGenTestConfig,
PoissonEventGenTestConfigPerUserGaussian,
]:
np.random.seed(1)
torch.manual_seed(1)
num_users = test_config.num_users
num_examples_per_user = test_config.num_examples_per_user
training_start_time_distr = PoissonAsyncTrainingStartTimeDistrConfig(
training_rate=test_config.training_rate
)
duration_distr = PerExampleGaussianDurationDistributionConfig(
training_duration_mean=test_config.training_duration_mean,
training_duration_sd=test_config.training_duration_sd,
)
distr = AsyncTrainingEventGenerator(
**OmegaConf.structured(
AsyncTrainingEventGeneratorConfig(
training_start_time_distribution=training_start_time_distr,
duration_distribution_generator=duration_distr,
)
)
)
job_scheduler = TestAsyncTrainingEventHandler()
data_provider = self._create_data_provider(
num_users=num_users, examples_per_user=num_examples_per_user
)
training_sim = AsyncTrainingSimulator(
job_scheduler=job_scheduler,
user_selector=self.random_user_selector(data_provider=data_provider),
event_generator=distr,
# pyre-ignore[16]: for pytest fixture
shared_client_config=self.shared_client_config,
num_train_end_events_per_epoch=num_users,
)
training_sim.run_one_epoch()
assertAlmostEqual(
training_sim.queue_stats.avg_pending_jobs(),
test_config.mean_pending_jobs,
delta=0.50,
)
assertEqual(job_scheduler.seqnum_diff_mean(), test_config.mean_seqnum_diff)
assertAlmostEqual(
job_scheduler.seqnum_std(), test_config.sd_seqnum_diff, delta=0.001
)
@staticmethod
def get_user_finishing_seq(
total_num_users: int,
training_duration_distr_config: Type[DurationDistributionConfig],
) -> List[int]:
per_user = PerUserGaussianDurationDistributionConfig
per_example = PerExampleGaussianDurationDistributionConfig
if training_duration_distr_config == per_user:
# sequential finishing order
return list(range(total_num_users))
elif training_duration_distr_config == per_example:
# reverse finishing order
return list(reversed(range(total_num_users)))
else:
raise AssertionError(
f"Unknown type string:{training_duration_distr_config}"
)
def verify_num_examples_multiplier(
self, duration_distr_config: Type[DurationDistributionConfig]
) -> None:
"""if duration_distr_type == per-example-gaussian
training_duration=num_examples*training_duration_per_example
else
training_duration=training_duration_per_example
Use a config where training time is completely determined by
number of examples. Eg:
- GaussianDuration,PoissonStartTime
- training_rate: very high
- mean training time: very high
num_examples_per_user = [n, n-1, n-2, ..., 3, 2, 1]
If per-example-gaussian
The last user should finish first.
So training finish time should be: [user n, user n-1, ..., user2, user1]
else if per-user-gaussian
First user should finish first
So training finish time should be: [user 1, user 2, user 3, .... user n]
"""
num_users = 50
training_start_time_distr = PoissonAsyncTrainingStartTimeDistrConfig(
training_rate=1000
)
duration_distr = duration_distr_config(
training_duration_mean=1000,
training_duration_sd=0,
)
distr = AsyncTrainingEventGenerator(
**OmegaConf.structured(
AsyncTrainingEventGeneratorConfig(
training_start_time_distribution=training_start_time_distr,
duration_distribution_generator=duration_distr,
)
)
)
num_examples_per_user = list(reversed(range(1, num_users + 1)))
data = [
[1] * num_example
for num_example, _ in zip(num_examples_per_user, range(num_users))
]
data_provider = FLDataProviderFromList(
train_user_list=data,
eval_user_list=data,
test_user_list=data,
model=MockFLModel(),
)
job_scheduler = TestAsyncTrainingEventHandler()
training_sim = AsyncTrainingSimulator(
job_scheduler=job_scheduler,
user_selector=self.round_robin_user_selector(data_provider),
event_generator=distr,
# pyre-ignore[16]: for pytest fixture
shared_client_config=self.shared_client_config,
num_train_end_events_per_epoch=num_users,
)
training_sim.run_one_epoch()
ts_idx = Globals.TRAINING_STATE_IDX
ds_idx = Globals.DEVICE_STATE_IDX
# two 'events' are generated for each job,
# and stored in job_scheduler.training_events
# one TRAINING event and one TRAINING_FINISHED event
assertEqual(len(job_scheduler.training_events), num_users * 2)
first_n_events = job_scheduler.training_events[:num_users]
# First num_user events should be TRAINING events
assertEqual(
[i[ts_idx] for i in first_n_events], [TrainingState.TRAINING] * num_users
)
# First num_user events should have user_index in sequence
assertEqual(
[i[ds_idx].user_info.user_index for i in first_n_events],
list(range(num_users)),
)
last_n_events = job_scheduler.training_events[num_users:]
# Next num_user events should be TRAINING_FINISHED events:
assertEqual(
[i[ts_idx] for i in last_n_events],
[TrainingState.TRAINING_FINISHED] * num_users,
)
# Users should finish in seq if type = PerUserGaussian,
# in reverse seq if type = PerExampleGaussian
user_finishing_seq = self.get_user_finishing_seq(
num_users, duration_distr_config
)
assertEqual(
[i[ds_idx].user_info.user_index for i in last_n_events], user_finishing_seq
)
def test_training_duration_per_example_gaussian(self) -> None:
pass
def test_training_duration_per_user_gaussian(self) -> None:
pass
def test_training_end_training_start_relative_priority(self) -> None:
r"""
Test that when training end and training start events have the same time,
training end event happens earlier (has lower priority)
"""
def test_logging_level(self) -> None:
num_users = 50
training_start_time_distr = PoissonAsyncTrainingStartTimeDistrConfig(
training_rate=10
)
duration_distr = PerExampleGaussianDurationDistributionConfig(
training_duration_mean=0, training_duration_sd=0
)
distr = AsyncTrainingEventGenerator(
**OmegaConf.structured(
AsyncTrainingEventGeneratorConfig(
training_start_time_distribution=training_start_time_distr,
duration_distribution_generator=duration_distr,
)
)
)
job_scheduler = TestAsyncTrainingEventHandler()
data_provider = self._create_data_provider(
num_users=num_users, examples_per_user=1
)
training_sim = AsyncTrainingSimulator(
job_scheduler=job_scheduler,
user_selector=self.random_user_selector(data_provider=data_provider),
event_generator=distr,
# pyre-fixme[16]: `TestTrainingSimulatorUtils` has no attribute
# `shared_client_config`.
shared_client_config=self.shared_client_config,
num_train_end_events_per_epoch=num_users,
)
assertTrue(check_inherit_logging_level(training_sim, 50))
assertTrue(check_inherit_logging_level(training_sim, 10))
def _training_staleness_distribution(
self,
duration_distr,
training_rate: int = 1000,
num_users: int = 10000,
examples_per_user: int = 1,
):
data_provider = self._create_data_provider(
num_users=num_users, examples_per_user=examples_per_user
)
training_start_time_distr = PoissonAsyncTrainingStartTimeDistrConfig(
training_rate=training_rate
)
distr = AsyncTrainingEventGenerator(
**OmegaConf.structured(
AsyncTrainingEventGeneratorConfig(
training_start_time_distribution=training_start_time_distr,
duration_distribution_generator=duration_distr,
)
)
)
handler = TestAsyncTrainingEventHandler()
training_sim = AsyncTrainingSimulator(
job_scheduler=handler,
user_selector=RandomAsyncUserSelector(data_provider=data_provider),
event_generator=distr,
# pyre-fixme[16]: `TestTrainingSimulatorUtils` has no attribute
# `shared_client_config`.
shared_client_config=self.shared_client_config,
num_train_end_events_per_epoch=num_users,
)
training_sim.run_one_epoch()
return np.array(handler.num_unseen_global_model_updates)
def test_per_user_half_normal_training_duration(self) -> None:
"""
Test for staleness with half normal
Condition 1:
Check right skewed to make sure that the distribution is
heavy tailed and there is no concentration of users with low
staleness more than usual
Condition 2:
Check that the distribution has a long tail that approximates a Gaussian tail
All values should be within 6 std of the mean
"""
num_users = 1000
training_rate = 10
training_min = 0.00
training_std = 1.5
duration_distr_config = PerUserHalfNormalDurationDistributionConfig(
training_duration_sd=training_std, training_duration_min=training_min
)
staleness = self._training_staleness_distribution(
duration_distr_config, training_rate=training_rate, num_users=num_users
)
mean = np.mean(staleness)
sd = np.std(staleness)
assertLessEqual(np.median(staleness), mean)
# check that not all values are within 2 sd
assertGreaterEqual(max(staleness), mean + 2 * sd)
# check all values are within 7 sd of the mean
# 2e-11 = e^(−k^2/2) = exp(-7^2/2)
assertLessEqual(max(staleness), mean + 7 * sd)
def test_per_user_uniform_training_duration(self) -> None:
num_users = 2000
training_rate = 10
training_min = 0.0
training_mean = 1.0
duration_distr_config = PerUserUniformDurationDistributionConfig(
training_duration_mean=training_mean, training_duration_min=training_min
)
staleness = self._training_staleness_distribution(
duration_distr_config, training_rate=training_rate, num_users=num_users
)
mean = np.mean(staleness)
sd = np.std(staleness)
# check that under uniform mean and median close to the same
assertAlmostEqual(np.median(staleness), mean, delta=1.5)
# check that all values are within 4 sd from the mean
assertLessEqual(max(staleness), mean + 4 * sd)
assertGreaterEqual(min(staleness), mean - 4 * sd)
def test_per_user_exponential_training_duration(self) -> None:
for training_mean in [0.5, 1, 2]:
num_users = 2000
training_rate = 10
duration_distr_config = PerUserExponentialDurationDistributionConfig(
training_duration_mean=training_mean
)
staleness = self._training_staleness_distribution(
duration_distr_config, training_rate=training_rate, num_users=num_users
)
mean = np.mean(staleness)
median = np.median(staleness)
expected_mean = training_rate * training_mean
assertAlmostEqual(expected_mean, mean, delta=1.5)
assertLessEqual(median, mean)
| canife-main | FLSim/flsim/common/tests/test_training_simulator.py |
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
# The base hydra configs for different components of FLSim are defined
# in their respective packages' __init__.py files.
# Importing the trainer module will execute flsim.trainers' __init__
# and cause chain reaction leading to execution and consequent import
# of all the config store definitions. Similarly, importing data module
# will import all necessary dataloader configs.
import flsim.data # noqa
import flsim.trainers # noqa
| canife-main | FLSim/flsim/configs/__init__.py |
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
| canife-main | FLSim/flsim/configs/tests/__init__.py |
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import flsim.configs # noqa
from flsim.common.pytest_helper import assertRaises
from flsim.utils.sample_model import DummyAlphabetFLModel
from hydra.errors import InstantiationException
from hydra.experimental import compose, initialize
from hydra.utils import instantiate
class TestConfigValidation:
def test_throw_exception_on_missing_field(self) -> None:
with initialize(config_path=None):
cfg = compose(
config_name=None,
overrides=[
"+trainer=base_async_trainer",
# ThresholdStalenessWeightConfig has MISSING fields.
# Hydra should throw an exception at instantiation time
# since we won't be setting those fields here.
"[email protected]_weight.staleness_weight=base_threshold_staleness_weight",
],
)
with assertRaises(
InstantiationException, # with Hydra 1.1
):
instantiate(cfg.trainer, model=DummyAlphabetFLModel(), cuda_enabled=False)
| canife-main | FLSim/flsim/configs/tests/test_config_validation.py |
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
from __future__ import annotations
import copy
from dataclasses import dataclass
from typing import List, Tuple
import torch
from flsim.active_user_selectors.simple_user_selector import (
ActiveUserSelector,
ActiveUserSelectorConfig,
)
from flsim.common.diversity_metrics import (
DiversityMetrics,
DiversityMetricType,
DiversityStatistics,
)
from flsim.data.data_provider import IFLDataProvider
from flsim.interfaces.model import IFLModel
from flsim.utils.config_utils import fullclassname, init_self_cfg
class DiverseUserSelectorUtils:
@staticmethod
def calculate_diversity_metrics(
data_provider: IFLDataProvider,
global_model: IFLModel,
user_indices: List[int],
loss_reduction_type: str = "mean",
client_gradient_scaling: str = "sum",
diversity_metric_type: DiversityMetricType = DiversityMetricType.gradient_diversity,
) -> DiversityMetrics:
"""Calculates the Gradient Diversity of a cohort -- see arXiv:1706.05699
For the FL application, define a aggregate gradient for each user, which is the sum of
the gradient of the loss function for all the user's data samples (denoted grad f_i).
We compute two quantities: norm_of_sum is the norm of the sum of these user
gradients, and sum_of_norms is the sum of the norms of these gradients. These quantities
are passed to a DiversityMetrics object which computes the gradient diversity metrics.
norm_of_sum = || sum grad f_i ||_2^2
sum_of_norms = sum || grad f_i ||_2^2
loss_reduction_type should match the loss reduction type of the model (pytorch defaults to mean)
client_gradient_scaling determines how the gradient for each client is computed. Either
the gradient is summed over all the user's data, or it is the average client gradient.
"""
if loss_reduction_type not in {"sum", "mean"}:
raise ValueError('loss_reduction_type must be "sum" or "mean"')
if client_gradient_scaling not in {"sum", "mean"}:
raise ValueError('client_gradient_scaling must be "sum" or "mean"')
global_model.fl_get_module().train()
params = list(global_model.fl_get_module().parameters())
def zero_like_grad(param_list: List[torch.Tensor]) -> List[torch.Tensor]:
zero_gradient = []
for group in param_list:
if group.requires_grad:
zero_gradient.append(torch.zeros_like(group, requires_grad=False))
return zero_gradient
gradient_sum = zero_like_grad(params) # pyre-ignore
norm_of_sum, sum_of_norms = 0.0, 0.0
# Accumulate the gradient over a user's batches, normalized by batch size
for user_idx in user_indices:
user_data = data_provider.get_train_user(user_idx)
accumulated_gradient = zero_like_grad(params) # pyre-ignore
for batch in user_data.train_data():
global_model.fl_get_module().zero_grad()
batch_metrics = global_model.fl_forward(batch)
batch_metrics.loss.backward()
for group_in, group_out in zip(params, accumulated_gradient):
if loss_reduction_type == "mean":
group_out += group_in.grad * batch_metrics.num_examples
else:
group_out += group_in.grad
for group_in, group_out in zip(accumulated_gradient, gradient_sum):
if client_gradient_scaling == "mean":
group_in = group_in / user_data.num_train_examples()
group_out += group_in
sum_of_norms += torch.sum(group_in * group_in).item()
for group in gradient_sum:
norm_of_sum += torch.sum(group * group).item()
return DiversityMetrics(
norm_of_sum=norm_of_sum,
sum_of_norms=sum_of_norms,
diversity_metric_type=diversity_metric_type,
)
@staticmethod
def select_diverse_cohort(
data_provider: IFLDataProvider,
global_model: IFLModel,
users_per_round: int,
available_users: List[int],
rng: torch.Generator,
num_search_samples: int,
maximize_metric: bool,
loss_reduction_type: str = "mean",
client_gradient_scaling: str = "sum",
diversity_metric_type: DiversityMetricType = DiversityMetricType.gradient_diversity,
) -> Tuple[List[int], DiversityStatistics]:
"""Choose a cohort which optimizes the diversity metric type out of some number
of sample cohorts. Calculate the max, min, and average metric of the sample cohorts.
Inputs:
maximize_metric: whether the diversity metric is maximized (True) or minimized (False)
Outputs:
Cohort (out of the sample cohorts) which maximizes the metric
Statistics of the metric of interest of the cohort.
"""
if users_per_round >= len(available_users):
candidate_selected_indices = copy.copy(available_users)
candidate_metrics = DiverseUserSelectorUtils.calculate_diversity_metrics(
data_provider=data_provider,
global_model=global_model,
user_indices=candidate_selected_indices,
loss_reduction_type=loss_reduction_type,
client_gradient_scaling=client_gradient_scaling,
diversity_metric_type=diversity_metric_type,
)
cohort_stats = DiversityStatistics([candidate_metrics])
return candidate_selected_indices, cohort_stats
# Choose cohorts from the available users and calculate their diversity statistics.
selected_indices = torch.multinomial(
torch.ones(len(available_users), dtype=torch.float),
users_per_round,
replacement=False,
generator=rng,
).tolist()
candidate_user_indices = [available_users[idx] for idx in selected_indices]
candidate_diversity_metrics = (
DiverseUserSelectorUtils.calculate_diversity_metrics(
data_provider=data_provider,
global_model=global_model,
user_indices=candidate_user_indices,
loss_reduction_type=loss_reduction_type,
client_gradient_scaling=client_gradient_scaling,
diversity_metric_type=diversity_metric_type,
)
)
sample_cohort_metrics = [candidate_diversity_metrics]
for _ in range(num_search_samples - 1):
selected_indices = torch.multinomial(
torch.ones(len(available_users), dtype=torch.float),
users_per_round,
replacement=False,
generator=rng,
).tolist()
user_indices = [available_users[idx] for idx in selected_indices]
cohort_diversity_metrics = (
DiverseUserSelectorUtils.calculate_diversity_metrics(
data_provider=data_provider,
global_model=global_model,
user_indices=user_indices,
loss_reduction_type=loss_reduction_type,
client_gradient_scaling=client_gradient_scaling,
diversity_metric_type=diversity_metric_type,
)
)
sample_cohort_metrics.append(cohort_diversity_metrics)
if maximize_metric and (
cohort_diversity_metrics > candidate_diversity_metrics
):
candidate_user_indices = user_indices
candidate_diversity_metrics = cohort_diversity_metrics
elif (not maximize_metric) and (
cohort_diversity_metrics < candidate_diversity_metrics
):
candidate_user_indices = user_indices
candidate_diversity_metrics = cohort_diversity_metrics
cohort_stats = DiversityStatistics(sample_cohort_metrics)
return candidate_user_indices, cohort_stats
class DiversityReportingUserSelector(ActiveUserSelector):
"""User Selector which chooses users uniformly randomly and records
the Gradient Diversity (GD) of the cohort. For GD, see arXiv:1706.05699.
If constant_cohorts is set to True, choose a set of cohorts at the start of
training and at each round, report the GD of each of those sample cohorts.
"""
def __init__(self, **kwargs):
init_self_cfg(
self,
component_class=__class__,
config_class=DiversityReportingUserSelectorConfig,
**kwargs,
)
super().__init__(**kwargs)
if not isinstance(self.cfg.diversity_metric_type, DiversityMetricType):
raise ValueError("diversity_metric_type must be of DiversityMetricType")
if self.cfg.loss_reduction_type not in {"sum", "mean"}:
raise ValueError('loss_reduction_type must be "sum" or "mean"')
if self.cfg.client_gradient_scaling not in {"sum", "mean"}:
raise ValueError('client_gradient_scaling must be "sum" or "mean"')
self.sample_cohorts = []
@classmethod
def _set_defaults_in_cfg(cls, cfg):
pass
def get_user_indices(self, **kwargs) -> List[int]:
required_inputs = [
"num_total_users",
"users_per_round",
"data_provider",
"global_model",
"epoch",
]
(
num_total_users,
users_per_round,
data_provider,
global_model,
epoch,
) = self.unpack_required_inputs(required_inputs, kwargs)
selected_indices = torch.multinomial(
torch.ones(num_total_users, dtype=torch.float),
users_per_round,
replacement=False,
generator=self.rng,
).tolist()
currently_active = (
# pyre-fixme[16]: `DiversityReportingUserSelector` has no attribute `cfg`.
epoch >= self.cfg.epochs_before_active
and epoch < self.cfg.epochs_before_active + self.cfg.num_epochs_active
)
if not currently_active:
return selected_indices
if len(self.sample_cohorts) == 0 or not self.cfg.constant_cohorts:
self.sample_cohorts = []
for _ in range(self.cfg.num_candidate_cohorts):
sample_cohort_indices = torch.multinomial(
torch.ones(num_total_users, dtype=torch.float),
users_per_round,
replacement=False,
generator=self.rng,
).tolist()
self.sample_cohorts.append(sample_cohort_indices)
diversity_metrics_list = []
for sample_cohort in self.sample_cohorts:
diversity_metrics_list.append(
DiverseUserSelectorUtils.calculate_diversity_metrics(
data_provider=data_provider,
global_model=global_model,
user_indices=sample_cohort,
loss_reduction_type=self.cfg.loss_reduction_type,
client_gradient_scaling=self.cfg.client_gradient_scaling,
diversity_metric_type=self.cfg.diversity_metric_type,
)
)
return selected_indices
class DiversityStatisticsReportingUserSelector(ActiveUserSelector):
"""Find the statistics of the diversity metric of interest.
The cohort is selected uniformly randomly with replacement.
Record the sample maximum, average, and minimum metric for:
1) cohorts chosen with replacement
2) cohorts chosen without replacement (round-robin), where the cohort
with the maximum metric is removed from the list of available users.
"""
def __init__(self, **kwargs):
init_self_cfg(
self,
component_class=__class__,
config_class=DiversityStatisticsReportingUserSelectorConfig,
**kwargs,
)
super().__init__(**kwargs)
if not isinstance(self.cfg.diversity_metric_type, DiversityMetricType):
raise ValueError("diversity_metric_type must be of DiversityMetricType")
if self.cfg.loss_reduction_type not in {"sum", "mean"}:
raise ValueError('loss_reduction_type must be "sum" or "mean"')
if self.cfg.client_gradient_scaling not in {"sum", "mean"}:
raise ValueError('client_gradient_scaling must be "sum" or "mean"')
self.available_users = []
self.cohort_stats_with_replacement = []
self.cohort_stats_without_replacement = []
@classmethod
def _set_defaults_in_cfg(cls, cfg):
pass
def get_user_indices(self, **kwargs) -> List[int]:
required_inputs = [
"num_total_users",
"users_per_round",
"data_provider",
"global_model",
"epoch",
]
(
num_total_users,
users_per_round,
data_provider,
global_model,
epoch,
) = self.unpack_required_inputs(required_inputs, kwargs)
if len(self.available_users) == 0:
self.available_users = list(range(num_total_users))
# First uniformly randomly select the actual cohort used for training
baseline_selected_indices = torch.multinomial(
torch.ones(num_total_users, dtype=torch.float),
users_per_round,
replacement=False,
generator=self.rng,
).tolist()
if (
# pyre-fixme[16]: `DiversityStatisticsReportingUserSelector` has no
# attribute `cfg`.
epoch < self.cfg.epochs_before_active
or epoch >= self.cfg.epochs_before_active + self.cfg.num_epochs_active
):
return baseline_selected_indices
(_, stat_with_replacement) = DiverseUserSelectorUtils.select_diverse_cohort(
data_provider=data_provider,
global_model=global_model,
users_per_round=users_per_round,
available_users=list(range(num_total_users)),
rng=self.rng,
num_search_samples=self.cfg.num_candidate_cohorts,
maximize_metric=self.cfg.maximize_metric,
loss_reduction_type=self.cfg.loss_reduction_type,
client_gradient_scaling=self.cfg.client_gradient_scaling,
diversity_metric_type=self.cfg.diversity_metric_type,
)
(
candidate_user_indices,
stat_without_replacement,
) = DiverseUserSelectorUtils.select_diverse_cohort(
data_provider=data_provider,
global_model=global_model,
users_per_round=users_per_round,
available_users=self.available_users,
rng=self.rng,
num_search_samples=self.cfg.num_candidate_cohorts,
maximize_metric=self.cfg.maximize_metric,
loss_reduction_type=self.cfg.loss_reduction_type,
client_gradient_scaling=self.cfg.client_gradient_scaling,
diversity_metric_type=self.cfg.diversity_metric_type,
)
self.cohort_stats_with_replacement.append(stat_with_replacement)
self.cohort_stats_without_replacement.append(stat_without_replacement)
# Update the list of available users for round-robin selector
self.available_users = [
idx for idx in self.available_users if idx not in candidate_user_indices
]
return baseline_selected_indices
class DiversityMaximizingUserSelector(ActiveUserSelector):
"""A user selector class which chooses cohorts to greedily maximize their Diversity.
Config options:
num_candidate_cohorts: The number of cohorts considered; the cohort with maximum Diversity
loss_reduction_type: This should match the loss reduction type in the learning model used
client_gradient_scaling: How to compute the gradient summary of a client -- whether is should
be the sum or average of the gradients of its data.
diversity_metric_type: What is the Diversity Metric which is being optimized
maximize_metric: Whether to maximize (True) or minimize (False) the specified diversity metric
with_replacement: If false, choose cohorts in a round-robin fashion. If true, select each cohort
from all clients at each round.
"""
def __init__(self, **kwargs):
init_self_cfg(
self,
component_class=__class__,
config_class=DiversityMaximizingUserSelectorConfig,
**kwargs,
)
super().__init__(**kwargs)
if not isinstance(self.cfg.diversity_metric_type, DiversityMetricType):
raise ValueError("diversity_metric_type must be of DiversityMetricType")
if self.cfg.loss_reduction_type not in {"sum", "mean"}:
raise ValueError('loss_reduction_type must be "sum" or "mean"')
if self.cfg.client_gradient_scaling not in {"sum", "mean"}:
raise ValueError('client_gradient_scaling must be "sum" or "mean"')
self.available_users = []
@classmethod
def _set_defaults_in_cfg(cls, cfg):
pass
def get_user_indices(self, **kwargs) -> List[int]:
required_inputs = [
"num_total_users",
"users_per_round",
"data_provider",
"global_model",
"epoch",
]
(
num_total_users,
users_per_round,
data_provider,
global_model,
epoch,
) = self.unpack_required_inputs(required_inputs, kwargs)
if len(self.available_users) == 0:
self.available_users = list(range(num_total_users))
if (
# pyre-fixme[16]: `DiversityMaximizingUserSelector` has no attribute `cfg`.
epoch < self.cfg.epochs_before_active
or epoch >= self.cfg.epochs_before_active + self.cfg.num_epochs_active
):
baseline_selected_indices = torch.multinomial(
torch.ones(num_total_users, dtype=torch.float),
users_per_round,
replacement=False,
generator=self.rng,
).tolist()
return baseline_selected_indices
(candidate_user_indices, _,) = DiverseUserSelectorUtils.select_diverse_cohort(
data_provider=data_provider,
global_model=global_model,
users_per_round=users_per_round,
available_users=self.available_users,
rng=self.rng,
num_search_samples=self.cfg.num_candidate_cohorts,
maximize_metric=self.cfg.maximize_metric,
loss_reduction_type=self.cfg.loss_reduction_type,
client_gradient_scaling=self.cfg.client_gradient_scaling,
diversity_metric_type=self.cfg.diversity_metric_type,
)
if not self.cfg.with_replacement:
self.available_users = [
idx for idx in self.available_users if idx not in candidate_user_indices
]
return candidate_user_indices
@dataclass
class DiversityReportingUserSelectorConfig(ActiveUserSelectorConfig):
_target_: str = fullclassname(DiversityReportingUserSelector)
diversity_metric_type: DiversityMetricType = DiversityMetricType.gradient_diversity
epochs_before_active: int = 0
num_epochs_active: int = int(10e8)
num_candidate_cohorts: int = 10
loss_reduction_type: str = "mean"
client_gradient_scaling: str = "sum"
constant_cohorts: bool = False
@dataclass
class DiversityStatisticsReportingUserSelectorConfig(ActiveUserSelectorConfig):
_target_: str = fullclassname(DiversityStatisticsReportingUserSelector)
diversity_metric_type: DiversityMetricType = DiversityMetricType.gradient_diversity
maximize_metric: bool = True
epochs_before_active: int = 0
num_epochs_active: int = int(10e8)
num_candidate_cohorts: int = 10
loss_reduction_type: str = "mean"
client_gradient_scaling: str = "sum"
@dataclass
class DiversityMaximizingUserSelectorConfig(ActiveUserSelectorConfig):
_target_: str = fullclassname(DiversityMaximizingUserSelector)
diversity_metric_type: DiversityMetricType = DiversityMetricType.gradient_diversity
maximize_metric: bool = True
epochs_before_active: int = 0
num_epochs_active: int = int(10e8)
num_candidate_cohorts: int = 10
loss_reduction_type: str = "mean"
client_gradient_scaling: str = "sum"
with_replacement: bool = False
| canife-main | FLSim/flsim/active_user_selectors/diverse_user_selector.py |
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
from hydra.core.config_store import ConfigStore # @manual
from .diverse_user_selector import (
DiversityMaximizingUserSelectorConfig,
DiversityReportingUserSelectorConfig,
DiversityStatisticsReportingUserSelectorConfig,
)
from .simple_user_selector import (
HighLossActiveUserSelectorConfig,
NumberOfSamplesActiveUserSelectorConfig,
RandomRoundRobinActiveUserSelectorConfig,
SequentialActiveUserSelectorConfig,
UniformlyRandomActiveUserSelectorConfig,
)
ConfigStore.instance().store(
name="base_uniformly_random_active_user_selector",
node=UniformlyRandomActiveUserSelectorConfig,
group="active_user_selector",
)
ConfigStore.instance().store(
name="base_sequential_active_user_selector",
node=SequentialActiveUserSelectorConfig,
group="active_user_selector",
)
ConfigStore.instance().store(
name="base_random_round_robin_active_user_selector",
node=RandomRoundRobinActiveUserSelectorConfig,
group="active_user_selector",
)
ConfigStore.instance().store(
name="base_number_of_samples_active_user_selector",
node=NumberOfSamplesActiveUserSelectorConfig,
group="active_user_selector",
)
ConfigStore.instance().store(
name="base_high_loss_active_user_selector",
node=HighLossActiveUserSelectorConfig,
group="active_user_selector",
)
ConfigStore.instance().store(
name="base_diversity_reporting_user_selector",
node=DiversityReportingUserSelectorConfig,
group="active_user_selector",
)
ConfigStore.instance().store(
name="base_diversity_statistics_reporting_user_selector",
node=DiversityStatisticsReportingUserSelectorConfig,
group="active_user_selector",
)
ConfigStore.instance().store(
name="base_diversity_maximizing_user_selector",
node=DiversityMaximizingUserSelectorConfig,
group="active_user_selector",
)
| canife-main | FLSim/flsim/active_user_selectors/__init__.py |
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
from __future__ import annotations
import abc
import copy
import math
from dataclasses import dataclass
from typing import Any, Dict, List, Optional, Tuple
import torch
from flsim.data.data_provider import IFLDataProvider, IFLUserData
from flsim.interfaces.model import IFLModel
from flsim.utils.config_utils import fullclassname, init_self_cfg
from omegaconf import MISSING
class ActiveUserSelectorUtils:
@staticmethod
def convert_to_probability(
user_utility: torch.Tensor,
fraction_with_zero_prob: float,
softmax_temperature: float,
weights=None,
) -> torch.Tensor:
if weights is None:
weights = torch.ones(len(user_utility), dtype=torch.float)
num_to_zero_out = math.floor(fraction_with_zero_prob * len(user_utility))
sorted_indices = torch.argsort(user_utility, descending=True).tolist()
unnormalized_probs = torch.exp(softmax_temperature * user_utility) * weights
if num_to_zero_out > 0:
for i in sorted_indices[-num_to_zero_out:]:
unnormalized_probs[i] = 0
tmp_sum = sum(unnormalized_probs.tolist())
assert tmp_sum > 0
normalized_probs = unnormalized_probs / tmp_sum
return normalized_probs
@staticmethod
def normalize_by_sample_count(
user_utility: torch.Tensor,
user_sample_counts: torch.Tensor,
averaging_exponent: float,
) -> torch.Tensor:
sample_averaging_weights = 1 / torch.pow(user_sample_counts, averaging_exponent)
user_utility = sample_averaging_weights * user_utility
return user_utility
@staticmethod
def samples_per_user(data_provider: IFLDataProvider) -> torch.Tensor:
samples_per_user = [
data_provider.get_train_user(u).num_train_examples()
for u in data_provider.train_user_ids()
]
samples_per_user = torch.tensor(samples_per_user, dtype=torch.float)
return samples_per_user
@staticmethod
def select_users(
users_per_round: int,
probs: torch.Tensor,
fraction_uniformly_random: float,
rng: Any,
) -> List[int]:
num_total_users = len(probs)
num_randomly_selected = math.floor(users_per_round * fraction_uniformly_random)
num_actively_selected = users_per_round - num_randomly_selected
assert len(torch.nonzero(probs)) >= num_actively_selected
if num_actively_selected > 0:
actively_selected_indices = torch.multinomial(
probs, num_actively_selected, replacement=False, generator=rng
).tolist()
else:
actively_selected_indices = []
if num_randomly_selected > 0:
tmp_probs = torch.tensor(
[
0 if x in actively_selected_indices else 1
for x in range(num_total_users)
],
dtype=torch.float,
)
randomly_selected_indices = torch.multinomial(
tmp_probs, num_randomly_selected, replacement=False, generator=rng
).tolist()
else:
randomly_selected_indices = []
selected_indices = actively_selected_indices + randomly_selected_indices
return selected_indices
@staticmethod
def sample_available_users(
users_per_round: int, available_users: List[int], rng: torch.Generator
) -> List[int]:
if users_per_round >= len(available_users):
return copy.copy(available_users)
selected_indices = torch.multinomial(
torch.ones(len(available_users), dtype=torch.float),
users_per_round,
replacement=False,
generator=rng,
).tolist()
return [available_users[idx] for idx in selected_indices]
class ActiveUserSelector(abc.ABC):
def __init__(self, **kwargs):
init_self_cfg(
self,
component_class=__class__,
config_class=ActiveUserSelectorConfig,
**kwargs,
)
self.rng = torch.Generator()
if self.cfg.user_selector_seed is not None:
self.rng = self.rng.manual_seed(self.cfg.user_selector_seed)
else:
self.rng.seed()
@classmethod
def _set_defaults_in_cfg(cls, cfg):
pass
@abc.abstractmethod
def get_user_indices(self, **kwargs) -> List[int]:
pass
def get_users_unif_rand(
self, num_total_users: int, users_per_round: int
) -> List[int]:
selected_indices = torch.multinomial(
torch.ones(num_total_users, dtype=torch.float),
users_per_round,
replacement=False,
generator=self.rng,
).tolist()
return selected_indices
def unpack_required_inputs(
self, required_inputs: List[str], kwargs: Dict[str, Any]
) -> List[Any]:
inputs = []
for key in required_inputs:
input = kwargs.get(key, None)
assert (
input is not None
), "Input `{}` is required for get_user_indices in active_user_selector {}.".format(
key, self.__class__.__name__
)
inputs.append(input)
return inputs
class UniformlyRandomActiveUserSelector(ActiveUserSelector):
"""Simple User Selector which does random sampling of users"""
def __init__(self, **kwargs):
init_self_cfg(
self,
component_class=__class__,
config_class=UniformlyRandomActiveUserSelectorConfig,
**kwargs,
)
super().__init__(**kwargs)
@classmethod
def _set_defaults_in_cfg(cls, cfg):
pass
def get_user_indices(self, **kwargs) -> List[int]:
required_inputs = ["num_total_users", "users_per_round"]
num_total_users, users_per_round = self.unpack_required_inputs(
required_inputs, kwargs
)
selected_indices = torch.multinomial(
torch.ones(num_total_users, dtype=torch.float),
users_per_round,
# pyre-fixme[16]: `UniformlyRandomActiveUserSelector` has no attribute
# `cfg`.
replacement=self.cfg.random_with_replacement,
generator=self.rng,
).tolist()
return selected_indices
class SequentialActiveUserSelector(ActiveUserSelector):
"""Simple User Selector which chooses users in sequential manner.
e.g. if 2 users (user0 and user1) were trained in the previous round,
the next 2 users (user2 and user3) will be picked in the current round.
"""
def __init__(self, **kwargs):
init_self_cfg(
self,
component_class=__class__,
config_class=SequentialActiveUserSelectorConfig,
**kwargs,
)
super().__init__(**kwargs)
self.cur_round_user_index = 0
@classmethod
def _set_defaults_in_cfg(cls, cfg):
pass
def get_user_indices(self, **kwargs) -> List[int]:
required_inputs = ["num_total_users", "users_per_round"]
num_total_users, users_per_round = self.unpack_required_inputs(
required_inputs, kwargs
)
# when having covered all the users, return the cursor to 0
if num_total_users <= self.cur_round_user_index:
self.cur_round_user_index = 0
next_round_user_index = self.cur_round_user_index + users_per_round
user_indices = list(
range(
self.cur_round_user_index, min(next_round_user_index, num_total_users)
)
)
self.cur_round_user_index = next_round_user_index
return user_indices
class RandomRoundRobinActiveUserSelector(ActiveUserSelector):
"""User Selector which chooses users randomly in a round-robin fashion.
Each round users are selected uniformly randomly from the users not
yet selected in that epoch.
"""
def __init__(self, **kwargs):
init_self_cfg(
self,
component_class=__class__,
config_class=RandomRoundRobinActiveUserSelectorConfig,
**kwargs,
)
super().__init__(**kwargs)
self.available_users = []
@classmethod
def _set_defaults_in_cfg(cls, cfg):
pass
def get_user_indices(self, **kwargs) -> List[int]:
required_inputs = ["num_total_users", "users_per_round"]
num_total_users, users_per_round = self.unpack_required_inputs(
required_inputs, kwargs
)
# when having covered all the users, reset the list of available users
if len(self.available_users) == 0:
self.available_users = list(range(num_total_users))
user_indices = ActiveUserSelectorUtils.sample_available_users(
users_per_round, self.available_users, self.rng
)
# Update the list of available users
# TODO(dlazar): ensure this is the fastest method. If not, write a util
self.available_users = [
idx for idx in self.available_users if idx not in user_indices
]
return user_indices
class NumberOfSamplesActiveUserSelector(ActiveUserSelector):
"""Active User Selector which chooses users with probability weights determined
by the number of training points on the client. Clients with more samples are
given higher probability of being selected. Assumes that number of samples
on each client is known by the server and updated constantly.
"""
def __init__(self, **kwargs):
init_self_cfg(
self,
component_class=__class__,
config_class=NumberOfSamplesActiveUserSelectorConfig,
**kwargs,
)
super().__init__(**kwargs)
@classmethod
def _set_defaults_in_cfg(cls, cfg):
pass
def get_user_indices(self, **kwargs) -> List[int]:
required_inputs = ["users_per_round", "data_provider"]
users_per_round, data_provider = self.unpack_required_inputs(
required_inputs, kwargs
)
user_utility = ActiveUserSelectorUtils.samples_per_user(data_provider)
# pyre-fixme[16]: `NumberOfSamplesActiveUserSelector` has no attribute `cfg`.
user_utility = torch.pow(user_utility, self.cfg.exponent)
probs = ActiveUserSelectorUtils.convert_to_probability(
user_utility=torch.log(user_utility),
fraction_with_zero_prob=self.cfg.fraction_with_zero_prob,
softmax_temperature=1,
)
selected_indices = ActiveUserSelectorUtils.select_users(
users_per_round=users_per_round,
probs=probs,
fraction_uniformly_random=self.cfg.fraction_uniformly_random,
rng=self.rng,
)
return selected_indices
class HighLossActiveUserSelector(ActiveUserSelector):
"""Active User Selector which chooses users with probability weights determined
by the loss the model suffers on the user's data. Since this is a function of
both the data on the client and the model, user loss values are not updated
for every user before a selection round. Instead the class will keep a record
of the loss and only update the value for a user when they are used for training
since in this case the model is being transmitted to the user anyway.
If this staleness become a significant problem, try using ideas from
non-stationary UCB: https://arxiv.org/pdf/0805.3415.pdf.
"""
def __init__(self, **kwargs):
init_self_cfg(
self,
component_class=__class__,
config_class=HighLossActiveUserSelectorConfig,
**kwargs,
)
super().__init__(**kwargs)
self.user_losses: torch.Tensor = torch.tensor([], dtype=torch.float)
self.user_sample_counts: torch.Tensor = torch.tensor([], dtype=torch.float)
@classmethod
def _set_defaults_in_cfg(cls, cfg):
pass
def _get_initial_losses_and_counts(
self,
num_total_users: int,
data_provider: IFLDataProvider,
global_model: IFLModel,
) -> Tuple[torch.Tensor, torch.Tensor]:
user_losses = torch.zeros(num_total_users, dtype=torch.float)
user_sample_counts = torch.zeros(num_total_users, dtype=torch.float)
for i in range(num_total_users):
(
user_losses[i],
user_sample_counts[i],
) = self._get_user_loss_and_sample_count(
data_provider.get_train_user(i), global_model
)
return user_losses, user_sample_counts
def _non_active_sampling(
self, num_total_users: int, users_per_round: int
) -> List[int]:
selected_indices = torch.multinomial(
torch.ones(num_total_users, dtype=torch.float),
users_per_round,
replacement=False,
generator=self.rng,
).tolist()
return selected_indices
@staticmethod
def _get_user_loss_and_sample_count(
user_data: IFLUserData, model: IFLModel
) -> Tuple[float, int]:
loss = 0
num_samples = 0
for batch in user_data.train_data():
metrics = model.get_eval_metrics(batch)
loss += metrics.loss.item() * metrics.num_examples
num_samples += metrics.num_examples
return loss, num_samples
def get_user_indices(self, **kwargs) -> List[int]:
required_inputs = [
"num_total_users",
"users_per_round",
"data_provider",
"global_model",
"epoch",
]
(
num_total_users,
users_per_round,
data_provider,
global_model,
epoch,
) = self.unpack_required_inputs(required_inputs, kwargs)
# pyre-fixme[16]: `HighLossActiveUserSelector` has no attribute `cfg`.
if epoch < self.cfg.epochs_before_active:
selected_indices = self._non_active_sampling(
num_total_users, users_per_round
)
return selected_indices
if self.user_losses.nelement() == 0:
(
self.user_losses,
self.user_sample_counts,
) = self._get_initial_losses_and_counts(
num_total_users, data_provider, global_model
)
user_utility = ActiveUserSelectorUtils.normalize_by_sample_count(
user_utility=self.user_losses,
user_sample_counts=self.user_sample_counts,
averaging_exponent=self.cfg.count_normalization_exponent,
)
probs = ActiveUserSelectorUtils.convert_to_probability(
user_utility=user_utility,
fraction_with_zero_prob=self.cfg.fraction_with_zero_prob,
softmax_temperature=self.cfg.softmax_temperature,
)
selected_indices = ActiveUserSelectorUtils.select_users(
users_per_round=users_per_round,
probs=probs,
fraction_uniformly_random=self.cfg.fraction_uniformly_random,
rng=self.rng,
)
for i in selected_indices:
(
self.user_losses[i],
self.user_sample_counts[i],
) = self._get_user_loss_and_sample_count(
data_provider.get_train_user(i), global_model
)
return selected_indices
@dataclass
class ActiveUserSelectorConfig:
_target_: str = MISSING
_recursive_: bool = False
user_selector_seed: Optional[int] = None
@dataclass
class UniformlyRandomActiveUserSelectorConfig(ActiveUserSelectorConfig):
_target_: str = fullclassname(UniformlyRandomActiveUserSelector)
random_with_replacement: bool = False
@dataclass
class SequentialActiveUserSelectorConfig(ActiveUserSelectorConfig):
_target_: str = fullclassname(SequentialActiveUserSelector)
@dataclass
class RandomRoundRobinActiveUserSelectorConfig(ActiveUserSelectorConfig):
_target_: str = fullclassname(RandomRoundRobinActiveUserSelector)
@dataclass
class NumberOfSamplesActiveUserSelectorConfig(ActiveUserSelectorConfig):
_target_: str = fullclassname(NumberOfSamplesActiveUserSelector)
exponent: float = 1.0
fraction_uniformly_random: float = 0.0
fraction_with_zero_prob: float = 0.0
@dataclass
class HighLossActiveUserSelectorConfig(ActiveUserSelectorConfig):
_target_: str = fullclassname(HighLossActiveUserSelector)
count_normalization_exponent: float = 0.0
epochs_before_active: int = 0
fraction_uniformly_random: float = 0.0
fraction_with_zero_prob: float = 0.0
softmax_temperature: float = 1.0
| canife-main | FLSim/flsim/active_user_selectors/simple_user_selector.py |
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
| canife-main | FLSim/flsim/active_user_selectors/tests/__init__.py |
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import math
from collections import Counter
import torch
from flsim.active_user_selectors.simple_user_selector import (
ActiveUserSelectorUtils,
HighLossActiveUserSelector,
HighLossActiveUserSelectorConfig,
NumberOfSamplesActiveUserSelector,
NumberOfSamplesActiveUserSelectorConfig,
RandomRoundRobinActiveUserSelector,
RandomRoundRobinActiveUserSelectorConfig,
SequentialActiveUserSelector,
SequentialActiveUserSelectorConfig,
UniformlyRandomActiveUserSelector,
UniformlyRandomActiveUserSelectorConfig,
)
from flsim.common.pytest_helper import (
assertEqual,
assertIsInstance,
assertRaises,
assertTrue,
)
from flsim.utils.sample_model import DummyAlphabetFLModel
from flsim.utils.tests.helpers.test_data_utils import DummyAlphabetDataset
from hydra.utils import instantiate
class TestActiveUserSelector:
def test_uniformly_random_user_selection(self) -> None:
# Test the uniformly random user selection in the ActiveUserSelector base class
null_selector = instantiate(SequentialActiveUserSelectorConfig())
assertIsInstance(null_selector, SequentialActiveUserSelector)
users = tuple(range(5))
torch.manual_seed(123456789)
selections = [
null_selector.get_users_unif_rand(
num_total_users=len(users), users_per_round=1
)[0]
for _ in range(10000)
]
counts = Counter(selections)
assertTrue(min(counts.values()) > 1500 and max(counts.values()) < 2500)
# Make sure that the users aren't being selected sequentially
assertTrue(min(counts.values()) < max(counts.values()) - 2)
with assertRaises(AssertionError):
null_selector.get_user_indices()
uniform_selector_1 = instantiate(
SequentialActiveUserSelectorConfig(user_selector_seed=12345)
)
uniform_selector_2 = instantiate(
SequentialActiveUserSelectorConfig(user_selector_seed=12345)
)
selection_1 = uniform_selector_1.get_users_unif_rand(
num_total_users=10000, users_per_round=1
)
selection_2 = uniform_selector_2.get_users_unif_rand(
num_total_users=10000, users_per_round=1
)
assertEqual(selection_1, selection_2)
def test_uniformly_random_user_selector(self) -> None:
null_selector = instantiate(UniformlyRandomActiveUserSelectorConfig())
assertIsInstance(null_selector, UniformlyRandomActiveUserSelector)
users = tuple(range(5))
torch.manual_seed(123456789)
selections = [
null_selector.get_user_indices(
num_total_users=len(users), users_per_round=1
)[0]
for _ in range(10000)
]
counts = Counter(selections)
assertTrue(min(counts.values()) > 1500 and max(counts.values()) < 2500)
with assertRaises(AssertionError):
null_selector.get_user_indices()
uniform_selector_1 = instantiate(
UniformlyRandomActiveUserSelectorConfig(user_selector_seed=12345)
)
uniform_selector_2 = instantiate(
UniformlyRandomActiveUserSelectorConfig(user_selector_seed=12345)
)
selection_1 = uniform_selector_1.get_user_indices(
num_total_users=10000, users_per_round=1
)
selection_2 = uniform_selector_2.get_user_indices(
num_total_users=10000, users_per_round=1
)
assertEqual(selection_1, selection_2)
def test_sequential_user_selector(self) -> None:
# 1) test if num_users is not divisible by users_per_round
num_total_users, users_per_round, round_num = 10, 3, 8
selector = instantiate(SequentialActiveUserSelectorConfig())
assertIsInstance(selector, SequentialActiveUserSelector)
for round_index in range(round_num):
user_indices = selector.get_user_indices(
num_total_users=num_total_users, users_per_round=users_per_round
)
if round_index % 4 == 0:
assertEqual(user_indices, [0, 1, 2])
if round_index % 4 == 1:
assertEqual(user_indices, [3, 4, 5])
if round_index % 4 == 2:
assertEqual(user_indices, [6, 7, 8])
if round_index % 4 == 3:
assertEqual(user_indices, [9])
# 2) test if num_users is divisible by users_per_round
num_total_users, round_num = 9, 6
selector = instantiate(SequentialActiveUserSelectorConfig())
for round_index in range(round_num):
user_indices = selector.get_user_indices(
num_total_users=num_total_users, users_per_round=users_per_round
)
if round_index % 3 == 0:
assertEqual(user_indices, [0, 1, 2])
if round_index % 3 == 1:
assertEqual(user_indices, [3, 4, 5])
if round_index % 3 == 2:
assertEqual(user_indices, [6, 7, 8])
def test_random_round_robin_user_selector(self) -> None:
# 1) test if num_users is not divisible by users_per_round
num_total_users, users_per_round = 87, 15
available_users = range(num_total_users)
selector = instantiate(RandomRoundRobinActiveUserSelectorConfig())
assertIsInstance(selector, RandomRoundRobinActiveUserSelector)
while len(available_users) > 0:
user_indices = selector.get_user_indices(
num_total_users=num_total_users, users_per_round=users_per_round
)
user_indices_set = set(user_indices)
available_user_set = set(available_users)
assertEqual(len(user_indices), min(users_per_round, len(available_users)))
assertEqual(len(user_indices_set), len(user_indices))
assertEqual(len(available_user_set), len(available_users))
assertTrue(user_indices_set.issubset(available_user_set))
available_users = list(available_user_set - user_indices_set)
# Try one more time to make sure that the user selector class reset
user_indices = selector.get_user_indices(
num_total_users=num_total_users, users_per_round=users_per_round
)
user_indices_set = set(user_indices)
available_user_set = set(range(num_total_users))
assertEqual(len(user_indices), users_per_round)
assertEqual(len(user_indices_set), users_per_round)
assertTrue(user_indices_set.issubset(available_user_set))
# 2) test if num_users is divisible by users_per_round
num_total_users, users_per_round = 60, 15
selector = instantiate(RandomRoundRobinActiveUserSelectorConfig())
while len(available_users) > 0:
user_indices = selector.get_user_indices(
num_total_users=num_total_users, users_per_round=users_per_round
)
user_indices_set = set(user_indices)
available_user_set = set(available_users)
assertEqual(len(user_indices), users_per_round)
assertEqual(len(user_indices_set), len(user_indices))
assertEqual(len(available_user_set), len(available_users))
assertTrue(user_indices_set.issubset(available_user_set))
available_users = list(available_user_set - user_indices_set)
# Try one more time to make sure that the user selector class reset
user_indices = selector.get_user_indices(
num_total_users=num_total_users, users_per_round=users_per_round
)
user_indices_set = set(user_indices)
available_user_set = set(range(num_total_users))
assertEqual(len(user_indices), users_per_round)
assertEqual(len(user_indices_set), users_per_round)
assertTrue(user_indices_set.issubset(available_user_set))
def test_number_of_samples_user_selector(self) -> None:
selector = instantiate(NumberOfSamplesActiveUserSelectorConfig())
assertIsInstance(selector, NumberOfSamplesActiveUserSelector)
shard_size = 5
local_batch_size = 4
dummy_dataset = DummyAlphabetDataset(num_rows=6)
dummy_model = DummyAlphabetFLModel()
data_provider, _ = DummyAlphabetDataset.create_data_provider_and_loader(
dummy_dataset, shard_size, local_batch_size, dummy_model
)
selections = [
selector.get_user_indices(users_per_round=1, data_provider=data_provider)[0]
for _ in range(1000)
]
counts = Counter(selections)
assertTrue(counts[0] > counts[1])
def test_high_loss_user_selector(self) -> None:
selector = instantiate(HighLossActiveUserSelectorConfig())
assertIsInstance(selector, HighLossActiveUserSelector)
selector = instantiate(
HighLossActiveUserSelectorConfig(softmax_temperature=0.01)
)
shard_size = 10
local_batch_size = 4
dummy_dataset = DummyAlphabetDataset(num_rows=11)
dummy_model = DummyAlphabetFLModel()
data_provider, _ = DummyAlphabetDataset.create_data_provider_and_loader(
dummy_dataset, shard_size, local_batch_size, dummy_model
)
selections = [
selector.get_user_indices(
num_total_users=2,
users_per_round=1,
data_provider=data_provider,
global_model=dummy_model,
epoch=0,
)[0]
for i in range(1000)
]
counts = Counter(selections)
assertTrue(counts[0] > counts[1])
selector = instantiate(
HighLossActiveUserSelectorConfig(
softmax_temperature=0.01, epochs_before_active=1
)
)
selections = [
selector.get_user_indices(
num_total_users=2,
users_per_round=1,
data_provider=data_provider,
global_model=dummy_model,
epoch=0,
)[0]
for i in range(1000)
]
counts = Counter(selections)
assertTrue(counts[0] > 400)
assertTrue(counts[0] < 600)
class TestActiveUserSelectorUtils:
tolerance = 1e-5
def test_convert_to_probability(self) -> None:
valuations = torch.tensor([1, 1, 1, 2, 2], dtype=torch.float)
weights = torch.tensor([1, 1, 1, 1, 1], dtype=torch.float)
total_prob = sum(
ActiveUserSelectorUtils.convert_to_probability(valuations, 0, 1)
)
assertEqual(total_prob, 1)
prob_1 = ActiveUserSelectorUtils.convert_to_probability(
valuations, 0, 1, weights
)
prob_2 = ActiveUserSelectorUtils.convert_to_probability(valuations, 0, 1)
assertTrue(torch.allclose(prob_1, prob_2, rtol=self.tolerance))
prob_1 = ActiveUserSelectorUtils.convert_to_probability(valuations, 0, 1)
prob_2 = torch.exp(valuations) / sum(torch.exp(valuations))
assertTrue(torch.allclose(prob_1, prob_2, rtol=self.tolerance))
weights = torch.tensor([1, 2, 1, 2, 1], dtype=torch.float)
unnormalized_probs = torch.tensor(
[math.exp(1), 2 * math.exp(1), math.exp(1), 2 * math.exp(2), math.exp(2)],
dtype=torch.float,
)
prob_1 = unnormalized_probs / sum(unnormalized_probs)
prob_2 = ActiveUserSelectorUtils.convert_to_probability(
valuations, 0, 1, weights
)
assertTrue(torch.allclose(prob_1, prob_2, rtol=self.tolerance))
prob_1 = ActiveUserSelectorUtils.convert_to_probability(valuations, 0, 0)
prob_2 = torch.tensor([0.2] * 5, dtype=torch.float)
assertTrue(torch.allclose(prob_1, prob_2, rtol=self.tolerance))
prob_1 = ActiveUserSelectorUtils.convert_to_probability(valuations, 0, 25)
prob_2 = torch.tensor([0, 0, 0, 0.5, 0.5], dtype=torch.float)
assertTrue(torch.allclose(prob_1, prob_2, rtol=self.tolerance))
prob = ActiveUserSelectorUtils.convert_to_probability(valuations, 0.5, 1)
assertEqual(len(torch.nonzero(prob)), 3)
with assertRaises(AssertionError):
ActiveUserSelectorUtils.convert_to_probability(valuations, 1, 1)
def test_normalize_by_sample_count(self) -> None:
user_utility = torch.tensor([0, 1, 2, 3], dtype=torch.float)
counts = torch.tensor([1, 10, 100, 1], dtype=torch.float)
no_normalization = ActiveUserSelectorUtils.normalize_by_sample_count(
user_utility, counts, 0
)
assertTrue(torch.allclose(no_normalization, user_utility, rtol=self.tolerance))
avged = user_utility / counts
avg_normalization = ActiveUserSelectorUtils.normalize_by_sample_count(
user_utility, counts, 1
)
assertTrue(torch.allclose(avg_normalization, avged, rtol=self.tolerance))
def test_samples_per_user(self) -> None:
shard_size = 4
local_batch_size = 4
dummy_dataset = DummyAlphabetDataset()
dummy_model = DummyAlphabetFLModel()
data_provider, _ = DummyAlphabetDataset.create_data_provider_and_loader(
dummy_dataset, shard_size, local_batch_size, dummy_model
)
samples_per_user = ActiveUserSelectorUtils.samples_per_user(data_provider)
assertTrue(
torch.allclose(
samples_per_user, torch.tensor([4, 4, 4, 4, 4, 4, 2], dtype=torch.float)
)
)
def test_select_users(self) -> None:
"""select_users has two mechanisms for selecting users: p proportion are
selected according to the probabilities in probs, and (1-p) proportion are
selected uniformly at random, where p = fraction_uniformly_random. This test
ensures that the correct number of users are returned for different values
of p.
"""
probs = torch.tensor([0.1] * 10, dtype=torch.float)
users_per_round = 3
rng = torch.Generator()
assertEqual(
len(ActiveUserSelectorUtils.select_users(users_per_round, probs, 0, rng)), 3
)
assertEqual(
len(ActiveUserSelectorUtils.select_users(users_per_round, probs, 1, rng)), 3
)
assertEqual(
len(ActiveUserSelectorUtils.select_users(users_per_round, probs, 0.5, rng)),
3,
)
assertEqual(
sorted(
ActiveUserSelectorUtils.select_users(
2, torch.tensor([0.5, 0.5, 0, 0, 0], dtype=torch.float), 0, rng
)
),
[0, 1],
)
def test_sample_available_users(self) -> None:
num_total_users, users_per_round = 95, 10
available_users = range(num_total_users)
rng = torch.Generator()
while len(available_users) > 0:
prev_available_users = available_users
user_indices = ActiveUserSelectorUtils.sample_available_users(
users_per_round,
# pyre-fixme[6]: Expected `List[int]` for 2nd param but got
# `Union[typing.List[int], range]`.
available_users,
rng,
)
user_indices_set = set(user_indices)
available_user_set = set(available_users)
assertEqual(len(user_indices), min(users_per_round, len(available_users)))
assertEqual(available_users, prev_available_users)
assertTrue(user_indices_set.issubset(available_user_set))
assertEqual(len(user_indices_set), len(user_indices))
assertEqual(len(available_user_set), len(available_users))
available_users = list(available_user_set - user_indices_set)
| canife-main | FLSim/flsim/active_user_selectors/tests/test_active_user_selector.py |
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import math
from collections import Counter
import torch
from flsim.active_user_selectors.diverse_user_selector import (
DiverseUserSelectorUtils,
DiversityMaximizingUserSelector,
DiversityMaximizingUserSelectorConfig,
DiversityStatisticsReportingUserSelector,
DiversityStatisticsReportingUserSelectorConfig,
)
from flsim.common.diversity_metrics import DiversityMetricType
from flsim.common.pytest_helper import assertEqual, assertIsInstance, assertTrue
from flsim.utils.sample_model import DummyAlphabetFLModel, LinearFLModel
from flsim.utils.tests.helpers.test_data_utils import (
DummyAlphabetDataset,
NonOverlappingDataset,
RandomDataset,
)
from hydra.utils import instantiate
class TestDiverseUserSelector:
# Since diversity statistics reporting selector chooses uniformly randomly, replicate the tests
# for that class. select_diverse_cohort is tested separately.
def test_diversity_statistics_reporting_user_selector(self) -> None:
null_selector = instantiate(
DiversityStatisticsReportingUserSelectorConfig(num_candidate_cohorts=2)
)
assertIsInstance(null_selector, DiversityStatisticsReportingUserSelector)
# This user selector class requires a global model and data provider.
users = tuple(range(5))
shard_size, local_batch_size = 4, 4
dummy_dataset = DummyAlphabetDataset()
dummy_model = DummyAlphabetFLModel()
data_provider, _ = DummyAlphabetDataset.create_data_provider_and_loader(
dummy_dataset, shard_size, local_batch_size, dummy_model
)
uniform_selector_1 = instantiate(
DiversityStatisticsReportingUserSelectorConfig(
user_selector_seed=12345, num_candidate_cohorts=2
)
)
uniform_selector_2 = instantiate(
DiversityStatisticsReportingUserSelectorConfig(
user_selector_seed=12345, num_candidate_cohorts=2
)
)
# Repeat, since it can't be done with many users as test_uniformly_random_user_selector
num_trials = 25
for _ in range(num_trials):
selection_1 = uniform_selector_1.get_user_indices(
num_total_users=len(users),
users_per_round=1,
data_provider=data_provider,
global_model=dummy_model,
epoch=0,
)
selection_2 = uniform_selector_2.get_user_indices(
num_total_users=len(users),
users_per_round=1,
data_provider=data_provider,
global_model=dummy_model,
epoch=0,
)
assertEqual(selection_1, selection_2)
def test_diversity_maximizing_user_selector(self) -> None:
# Check that the selector actually chooses the cohort with maximum GD.
# For cohort size 3 out of 4 users, there are 4 possible cohorts.
# With 100 candidate cohorts we will try all 4 with very high probability.
num_total_users, users_per_round = 4, 3
num_data_per_user, dim_data = 6, 40
shard_size, local_batch_size = 6, 6
num_candidates = 100
available_users = range(num_total_users)
null_selector = instantiate(
DiversityMaximizingUserSelectorConfig(num_candidate_cohorts=num_candidates)
)
assertIsInstance(null_selector, DiversityMaximizingUserSelector)
linear_model = LinearFLModel(D_in=dim_data, D_out=1)
random_dataset = RandomDataset(
num_users=num_total_users,
num_data_per_user=num_data_per_user,
dim_data=dim_data,
)
data_provider, _ = RandomDataset.create_data_provider_and_loader(
random_dataset, shard_size, local_batch_size, linear_model
)
diverse_cohort = null_selector.get_user_indices(
num_total_users=num_total_users,
users_per_round=users_per_round,
data_provider=data_provider,
global_model=linear_model,
epoch=0,
)
max_diversity = DiverseUserSelectorUtils.calculate_diversity_metrics(
data_provider=data_provider,
global_model=linear_model,
user_indices=diverse_cohort,
)
available_users_set = set(available_users)
# Enumerate through each possible cohort, show it doesn't have greater Diversity
for i in available_users:
cohort = list(available_users_set.difference({i}))
cohort_diversity = DiverseUserSelectorUtils.calculate_diversity_metrics(
data_provider=data_provider,
global_model=linear_model,
user_indices=cohort,
)
assertTrue(cohort_diversity <= max_diversity)
# Test epochs_before_active as is done in test_high_loss_user_selector()
selector = instantiate(
DiversityMaximizingUserSelectorConfig(epochs_before_active=1)
)
shard_size, local_batch_size = 10, 4
dummy_dataset = DummyAlphabetDataset(num_rows=11)
dummy_model = DummyAlphabetFLModel()
data_provider, _ = DummyAlphabetDataset.create_data_provider_and_loader(
dummy_dataset, shard_size, local_batch_size, dummy_model
)
selections = [
selector.get_user_indices(
num_total_users=2,
users_per_round=1,
data_provider=data_provider,
global_model=dummy_model,
epoch=0,
)[0]
for i in range(1000)
]
counts = Counter(selections)
assertTrue(counts[0] > 400)
assertTrue(counts[0] < 600)
def test_user_selector_diversity_metric_type(self) -> None:
# Test that the created user selector uses the specified diversity metric
metric_type_strings = [
"gradient_diversity",
"orthogonality",
"delta_norm_sq",
"sum_client_delta_norm_sq",
"sum_client_delta_mutual_angles",
]
metric_types = [
DiversityMetricType.gradient_diversity,
DiversityMetricType.orthogonality,
DiversityMetricType.delta_norm_sq,
DiversityMetricType.sum_client_delta_norm_sq,
DiversityMetricType.sum_client_delta_mutual_angles,
]
for metric_str, metric_type in zip(metric_type_strings, metric_types):
user_selector = instantiate(
DiversityMaximizingUserSelectorConfig(diversity_metric_type=metric_str)
)
assertEqual(user_selector.cfg.diversity_metric_type, metric_type)
class TestDiverseUserSelectorUtils:
def test_calculate_diversity_metrics(self) -> None:
# We construct nonoverlapping data for each user so that the gradient diversity will be 1
num_total_users, users_per_round = 10, 3
num_nonzeros_per_user, num_data_per_user = 4, 6
shard_size, local_batch_size = 6, 6
linear_model = LinearFLModel(
D_in=num_total_users * num_nonzeros_per_user, D_out=1
)
nonoverlap_dataset = NonOverlappingDataset(
num_users=num_total_users,
num_nonzeros_per_user=num_nonzeros_per_user,
num_data_per_user=num_data_per_user,
)
data_provider, _ = NonOverlappingDataset.create_data_provider_and_loader(
nonoverlap_dataset, shard_size, local_batch_size, linear_model
)
# Check GD for all users then for random cohorts; ensure GD=1 for each
available_users = range(num_total_users)
rng = torch.Generator()
diversity_metrics = DiverseUserSelectorUtils.calculate_diversity_metrics(
data_provider,
linear_model,
# pyre-fixme[6]: Expected `List[int]` for 3rd param but got `range`.
available_users,
)
assertTrue(
math.isclose(diversity_metrics.gradient_diversity, 1.0, rel_tol=1e-04)
)
while len(available_users) > 0:
selected_indices = torch.multinomial(
torch.ones(len(available_users), dtype=torch.float),
min(users_per_round, len(available_users)),
replacement=False,
generator=rng,
).tolist()
user_indices = [available_users[idx] for idx in selected_indices]
available_users = [
idx for idx in available_users if idx not in user_indices
]
diversity_metrics = DiverseUserSelectorUtils.calculate_diversity_metrics(
data_provider, linear_model, user_indices
)
assertTrue(
math.isclose(diversity_metrics.gradient_diversity, 1.0, rel_tol=1e-04)
)
# By the definition of GD, it will always be at least 1/num_users.
num_total_users, users_per_round = 10, 3
num_data_per_user, dim_data = 6, 40
shard_size, local_batch_size = 6, 6
linear_model = LinearFLModel(
D_in=num_total_users * num_nonzeros_per_user, D_out=1
)
random_dataset = RandomDataset(
num_users=num_total_users,
num_data_per_user=num_data_per_user,
dim_data=dim_data,
)
data_provider, _ = RandomDataset.create_data_provider_and_loader(
random_dataset, shard_size, local_batch_size, linear_model
)
# Check GD for all users then for random cohorts; ensure GD>=1/num_users for each
available_users = range(num_total_users)
rng = torch.Generator()
diversity_metrics = DiverseUserSelectorUtils.calculate_diversity_metrics(
data_provider,
linear_model,
# pyre-fixme[6]: Expected `List[int]` for 3rd param but got `range`.
available_users,
)
assertTrue(diversity_metrics.gradient_diversity >= 1.0 / num_total_users)
while len(available_users) > 0:
selected_indices = torch.multinomial(
torch.ones(len(available_users), dtype=torch.float),
min(users_per_round, len(available_users)),
replacement=False,
generator=rng,
).tolist()
user_indices = [available_users[idx] for idx in selected_indices]
diversity_metrics = DiverseUserSelectorUtils.calculate_diversity_metrics(
data_provider, linear_model, user_indices
)
assertTrue(
diversity_metrics.gradient_diversity
>= 1.0 / min(num_total_users, len(available_users))
)
available_users = [
idx for idx in available_users if idx not in user_indices
]
def test_select_diverse_cohort(self) -> None:
# Use dataset in which the gradient diversity is 1 for any cohort and
# Check max, avg, and min GD = 1
num_total_users, users_per_round = 10, 3
num_nonzeros_per_user, num_data_per_user = 4, 6
shard_size, local_batch_size = 6, 6
num_candidate_cohorts = 5
loss_reduction_type = "mean"
client_gradient_scaling = "sum"
diversity_metric_type = DiversityMetricType.gradient_diversity
maximize_metric = True
rel_tol = 1e-04
available_users = range(num_total_users)
rng = torch.Generator()
linear_model = LinearFLModel(
D_in=num_total_users * num_nonzeros_per_user, D_out=1
)
nonoverlap_dataset = NonOverlappingDataset(
num_users=num_total_users,
num_nonzeros_per_user=num_nonzeros_per_user,
num_data_per_user=num_data_per_user,
)
data_provider, _ = NonOverlappingDataset.create_data_provider_and_loader(
nonoverlap_dataset, shard_size, local_batch_size, linear_model
)
(_, diversity_statistics,) = DiverseUserSelectorUtils.select_diverse_cohort(
data_provider=data_provider,
global_model=linear_model,
users_per_round=users_per_round,
# pyre-fixme[6]: Expected `List[int]` for 4th param but got `range`.
available_users=available_users,
rng=rng,
num_search_samples=num_candidate_cohorts,
maximize_metric=maximize_metric,
loss_reduction_type=loss_reduction_type,
client_gradient_scaling=client_gradient_scaling,
diversity_metric_type=diversity_metric_type,
)
assertTrue(
math.isclose(diversity_statistics.maximum_metric, 1.0, rel_tol=rel_tol)
)
assertTrue(
math.isclose(diversity_statistics.average_metric, 1.0, rel_tol=rel_tol)
)
assertTrue(
math.isclose(diversity_statistics.minimum_metric, 1.0, rel_tol=rel_tol)
)
# On random data, check diversity statistics for all users then for random cohorts. Ensure that
# there is separation between the max, average, and min when not all users are in the cohort.
num_total_users, users_per_round = 10, 3
num_data_per_user, dim_data = 6, 40
shard_size, local_batch_size = 6, 6
num_candidate_cohorts = 5
loss_reduction_type = "mean"
client_gradient_scaling = "sum"
diversity_metric_type = DiversityMetricType.gradient_diversity
maximize_metric = True
available_users = range(num_total_users)
linear_model = LinearFLModel(D_in=dim_data, D_out=1)
random_dataset = RandomDataset(
num_users=num_total_users,
num_data_per_user=num_data_per_user,
dim_data=dim_data,
)
data_provider, _ = RandomDataset.create_data_provider_and_loader(
random_dataset, shard_size, local_batch_size, linear_model
)
rng = torch.Generator()
(
diverse_cohort,
diversity_statistics,
) = DiverseUserSelectorUtils.select_diverse_cohort(
data_provider=data_provider,
global_model=linear_model,
users_per_round=num_total_users,
# pyre-fixme[6]: Expected `List[int]` for 4th param but got `range`.
available_users=available_users,
rng=rng,
num_search_samples=num_candidate_cohorts,
maximize_metric=maximize_metric,
loss_reduction_type=loss_reduction_type,
client_gradient_scaling=client_gradient_scaling,
diversity_metric_type=diversity_metric_type,
)
assertEqual(set(diverse_cohort), set(available_users))
assertTrue(
math.isclose(
diversity_statistics.maximum_metric,
diversity_statistics.average_metric,
rel_tol=rel_tol,
)
)
assertTrue(
math.isclose(
diversity_statistics.average_metric,
diversity_statistics.minimum_metric,
rel_tol=rel_tol,
)
)
while len(available_users) > 0:
(
diverse_cohort,
diversity_statistics,
) = DiverseUserSelectorUtils.select_diverse_cohort(
data_provider=data_provider,
global_model=linear_model,
users_per_round=users_per_round,
# pyre-fixme[6]: Expected `List[int]` for 4th param but got
# `Union[typing.List[int], range]`.
available_users=available_users,
rng=rng,
num_search_samples=num_candidate_cohorts,
maximize_metric=maximize_metric,
loss_reduction_type=loss_reduction_type,
client_gradient_scaling=client_gradient_scaling,
diversity_metric_type=diversity_metric_type,
)
# Check that the chosen cohort is valid
assertTrue(set(diverse_cohort) <= set(available_users))
assertTrue(
diversity_statistics.minimum_metric
>= 1.0 / min(num_total_users, len(available_users))
)
# The gradient diversity should be equal to the maximum in the statistics
diverse_cohort_diversity = (
DiverseUserSelectorUtils.calculate_diversity_metrics(
data_provider=data_provider,
global_model=linear_model,
user_indices=diverse_cohort,
loss_reduction_type=loss_reduction_type,
client_gradient_scaling=client_gradient_scaling,
diversity_metric_type=diversity_metric_type,
)
)
assertTrue(
math.isclose(
diverse_cohort_diversity.gradient_diversity,
diversity_statistics.maximum_metric,
rel_tol=rel_tol,
)
)
if len(available_users) > users_per_round:
assertEqual(len(diverse_cohort), users_per_round)
assertTrue(
diversity_statistics.maximum_metric
> diversity_statistics.average_metric
)
assertTrue(
diversity_statistics.average_metric
> diversity_statistics.minimum_metric
)
else:
assertEqual(len(diverse_cohort), len(available_users))
assertTrue(
math.isclose(
diversity_statistics.maximum_metric,
diversity_statistics.average_metric,
rel_tol=rel_tol,
)
)
assertTrue(
math.isclose(
diversity_statistics.average_metric,
diversity_statistics.minimum_metric,
rel_tol=rel_tol,
)
)
available_users = [
idx for idx in available_users if idx not in diverse_cohort
]
# Try this again with minimizing the diversity metric
num_total_users, users_per_round = 10, 3
num_data_per_user, dim_data = 6, 40
shard_size, local_batch_size = 6, 6
num_candidate_cohorts = 5
loss_reduction_type = "mean"
client_gradient_scaling = "sum"
diversity_metric_type = DiversityMetricType.gradient_diversity
maximize_metric = False
available_users = range(num_total_users)
linear_model = LinearFLModel(D_in=dim_data, D_out=1)
random_dataset = RandomDataset(
num_users=num_total_users,
num_data_per_user=num_data_per_user,
dim_data=dim_data,
)
data_provider, _ = RandomDataset.create_data_provider_and_loader(
random_dataset, shard_size, local_batch_size, linear_model
)
rng = torch.Generator()
(
diverse_cohort,
diversity_statistics,
) = DiverseUserSelectorUtils.select_diverse_cohort(
data_provider=data_provider,
global_model=linear_model,
users_per_round=num_total_users,
# pyre-fixme[6]: Expected `List[int]` for 4th param but got `range`.
available_users=available_users,
rng=rng,
num_search_samples=num_candidate_cohorts,
maximize_metric=maximize_metric,
loss_reduction_type=loss_reduction_type,
client_gradient_scaling=client_gradient_scaling,
diversity_metric_type=diversity_metric_type,
)
assertEqual(set(diverse_cohort), set(available_users))
assertTrue(
math.isclose(
diversity_statistics.maximum_metric,
diversity_statistics.average_metric,
rel_tol=rel_tol,
)
)
assertTrue(
math.isclose(
diversity_statistics.average_metric,
diversity_statistics.minimum_metric,
rel_tol=rel_tol,
)
)
while len(available_users) > 0:
(
diverse_cohort,
diversity_statistics,
) = DiverseUserSelectorUtils.select_diverse_cohort(
data_provider=data_provider,
global_model=linear_model,
users_per_round=users_per_round,
# pyre-fixme[6]: Expected `List[int]` for 4th param but got
# `Union[typing.List[int], range]`.
available_users=available_users,
rng=rng,
num_search_samples=num_candidate_cohorts,
maximize_metric=maximize_metric,
loss_reduction_type=loss_reduction_type,
client_gradient_scaling=client_gradient_scaling,
diversity_metric_type=diversity_metric_type,
)
# Check that the chosen cohort is valid
assertTrue(set(diverse_cohort) <= set(available_users))
assertTrue(
diversity_statistics.minimum_metric
>= 1.0 / min(num_total_users, len(available_users))
)
# The gradient diversity should be equal to the minimum in the statistics
diverse_cohort_diversity = (
DiverseUserSelectorUtils.calculate_diversity_metrics(
data_provider=data_provider,
global_model=linear_model,
user_indices=diverse_cohort,
loss_reduction_type=loss_reduction_type,
client_gradient_scaling=client_gradient_scaling,
diversity_metric_type=diversity_metric_type,
)
)
assertTrue(
math.isclose(
diverse_cohort_diversity.gradient_diversity,
diversity_statistics.minimum_metric,
rel_tol=rel_tol,
)
)
if len(available_users) > users_per_round:
assertEqual(len(diverse_cohort), users_per_round)
assertTrue(
diversity_statistics.maximum_metric
> diversity_statistics.average_metric
)
assertTrue(
diversity_statistics.average_metric
> diversity_statistics.minimum_metric
)
else:
assertEqual(len(diverse_cohort), len(available_users))
assertTrue(
math.isclose(
diversity_statistics.maximum_metric,
diversity_statistics.average_metric,
rel_tol=rel_tol,
)
)
assertTrue(
math.isclose(
diversity_statistics.average_metric,
diversity_statistics.minimum_metric,
rel_tol=rel_tol,
)
)
available_users = [
idx for idx in available_users if idx not in diverse_cohort
]
| canife-main | FLSim/flsim/active_user_selectors/tests/test_diverse_user_selector.py |
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
from hydra.core.config_store import ConfigStore
from .data_sharder import (
BroadcastSharderConfig,
ColumnSharderConfig,
PowerLawSharderConfig,
RandomSharderConfig,
RoundRobinSharderConfig,
SequentialSharderConfig,
)
ConfigStore.instance().store(
name="base_random_sharder",
node=RandomSharderConfig,
group="sharder",
)
ConfigStore.instance().store(
name="base_sequential_sharder",
node=SequentialSharderConfig,
group="sharder",
)
ConfigStore.instance().store(
name="base_round_robin_sharder",
node=RoundRobinSharderConfig,
group="sharder",
)
ConfigStore.instance().store(
name="base_column_sharder",
node=ColumnSharderConfig,
group="sharder",
)
ConfigStore.instance().store(
name="base_broadcast_sharder",
node=BroadcastSharderConfig,
group="sharder",
)
ConfigStore.instance().store(
name="base_power_law_sharder",
node=PowerLawSharderConfig,
group="sharder",
)
| canife-main | FLSim/flsim/data/__init__.py |
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
from __future__ import annotations
import abc
import random
from collections import defaultdict
from dataclasses import dataclass
from typing import Any, Dict, Iterable, List, Tuple, TypeVar
import numpy as np
import torch
from flsim.utils.config_utils import fullclassname, init_self_cfg
from omegaconf import MISSING
from torch.utils.data import Dataset
Shardable = TypeVar("Shardable", Iterable[Dict[str, Any]], Dataset)
class FLDataSharder(abc.ABC):
"""This class takes in a file, and partitions it into a list of datasets.
It supports random partitioning, broadcasting,
round-robining and sharding by a column
"""
def __init__(self, **kwargs):
init_self_cfg(
self,
component_class=__class__,
config_class=FLDataSharderConfig,
**kwargs,
)
@classmethod
def _set_defaults_in_cfg(cls, cfg):
pass
@abc.abstractmethod
def shard_for_row(self, csv_row: Dict[Any, Any]) -> List[Any]:
"""
Determine which shard a row should belong to.
"""
pass
def shard_rows(self, data_rows: Shardable) -> Iterable[Tuple[str, Any]]:
"""Partition a set of rows into multiple sets using a sharding strategy.
Args:
data_rows: Iterable[Dict[str, Any]]]: iterable over dictionary mapping column
name to value.
"""
shards = defaultdict(list)
# pyre-fixme[16]: `Dataset` has no attribute `__iter__`.
for one_row in data_rows:
for shard_id in self.shard_for_row(one_row):
shards[str(shard_id)].append(one_row)
return shards.items()
class RandomSharder(FLDataSharder):
"""Splits training data randomly. num_shards should be specified.
Assigns first num_shards rows to different shards to avoid empty shards."""
def __init__(self, **kwargs):
init_self_cfg(
self,
component_class=__class__,
config_class=RandomSharderConfig,
**kwargs,
)
super().__init__(**kwargs)
self._assignments: List[int] = list(range(self.cfg.num_shards))
random.shuffle(self._assignments)
def shard_for_row(self, csv_row: Dict[Any, Any]) -> List[int]:
if self._assignments:
return [self._assignments.pop(0)]
# pyre-fixme[16]: `RandomSharder` has no attribute `cfg`.
return [random.randint(0, self.cfg.num_shards - 1)]
def shard_rows(self, data_rows: Shardable) -> Iterable[Tuple[str, Any]]:
# sanity check to avoid empty shards
shards = super().shard_rows(data_rows)
assert (
sum(1 for _shard in shards)
# pyre-fixme[16]: `RandomSharder` has no attribute `cfg`.
>= self.cfg.num_shards
), "number of rows must be at least the number of shards"
return shards
class SequentialSharder(FLDataSharder):
"""Assign first N rows to shard A, the next N rows to shard B, and so
on. Mostly used for testing purpose (e.g. sanity-check with
conventional SGD training based on PyTorch Dataset and its batching
mechanism).
"""
def __init__(self, **kwargs):
init_self_cfg(
self,
component_class=__class__,
config_class=SequentialSharderConfig,
**kwargs,
)
super().__init__(**kwargs)
self.filled_in_current_bucket = 0
self.index = 0
def shard_for_row(self, csv_row: Dict[Any, Any]) -> List[int]:
# pyre-fixme[16]: `SequentialSharder` has no attribute `cfg`.
if self.filled_in_current_bucket == self.cfg.examples_per_shard:
self.index += 1
self.filled_in_current_bucket = 1
else:
self.filled_in_current_bucket += 1
return [self.index]
class BroadcastSharder(FLDataSharder):
"""Copy each training datum to all shards. num_shards should be specified"""
def __init__(self, **kwargs):
init_self_cfg(
self,
component_class=__class__,
config_class=BroadcastSharderConfig,
**kwargs,
)
super().__init__(**kwargs)
def shard_for_row(self, csv_row: Dict[Any, Any]) -> List[int]:
# pyre-fixme[16]: `BroadcastSharder` has no attribute `cfg`.
return list(range(self.cfg.num_shards))
class ColumnSharder(FLDataSharder):
"""Specify a column name used to shard.
It should be the last column in the file,
and sharding_column must be specified.
"""
def __init__(self, **kwargs):
init_self_cfg(
self,
component_class=__class__,
config_class=ColumnSharderConfig,
**kwargs,
)
super().__init__(**kwargs)
def shard_for_row(self, csv_row: Dict[Any, Any]) -> List[Any]:
# pyre-fixme[16]: `ColumnSharder` has no attribute `cfg`.
unwrapped_colindex = self.cfg.sharding_col
if unwrapped_colindex.isdigit():
unwrapped_colindex = int(unwrapped_colindex)
assert unwrapped_colindex < len(csv_row), "Sharding index out of bounds: "
f"{unwrapped_colindex}"
shard_idx = csv_row[unwrapped_colindex]
# shard_idx can be a 0-dim tensor when a table column is an integer.
if isinstance(shard_idx, torch.Tensor):
shard_idx = shard_idx.item()
return [shard_idx]
class RoundRobinSharder(FLDataSharder):
"""Splits training in a round-robin fashion between all shards"""
def __init__(self, **kwargs):
init_self_cfg(
self,
component_class=__class__,
config_class=RoundRobinSharderConfig,
**kwargs,
)
super().__init__(**kwargs)
self._last_shard: int = -1
def shard_for_row(self, csv_row: Dict[Any, Any]) -> List[int]:
# pyre-fixme[16]: `RoundRobinSharder` has no attribute `cfg`.
self._last_shard = (self._last_shard + 1) % self.cfg.num_shards
return [self._last_shard]
def shard_rows(self, data_rows: Shardable) -> Iterable[Tuple[str, Any]]:
# sanity check to avoid empty shards
shards = super().shard_rows(data_rows)
assert (
sum(1 for _shard in shards)
# pyre-fixme[16]: `RoundRobinSharder` has no attribute `cfg`.
>= self.cfg.num_shards
), "number of rows must be at least the number of shards"
return shards
class PowerLawSharder(FLDataSharder):
"""
Splits training data based on power law distribution with order
alpha where alpha between [0.0, 1.0] to ensure right skewed
This strategy will shard in a round robin at first to ensure all shard
will get at least one example. After that, it will shard following the power
law distribution
"""
def __init__(self, **kwargs):
init_self_cfg(
self,
component_class=__class__,
config_class=PowerLawSharderConfig,
**kwargs,
)
super().__init__(**kwargs)
assert 0.0 < self.cfg.alpha <= 1.0, "alpha must be in the interval (0, 1]"
self._last_shard: int = -1
self._weights = np.random.power(self.cfg.alpha, self.cfg.num_shards)
# normalize to sum to 1.0
self._weights = self._weights / sum(self._weights)
self._choices = np.arange(0, self.cfg.num_shards)
def shard_for_row(self, csv_row: Dict[Any, Any]) -> List[int]:
# pyre-fixme[16]: `PowerLawSharder` has no attribute `cfg`.
if self._last_shard < self.cfg.num_shards - 1:
self._last_shard += 1
return [self._last_shard]
else:
return [np.random.choice(self._choices, p=self._weights)]
def shard_rows(self, data_rows: Shardable) -> Iterable[Tuple[str, Any]]:
# sanity check to avoid empty shards
shards = super().shard_rows(data_rows)
assert (
sum(1 for _shard in shards)
# pyre-fixme[16]: `PowerLawSharder` has no attribute `cfg`.
>= self.cfg.num_shards
), "number of rows must be at least the number of shards"
return shards
@dataclass
class FLDataSharderConfig:
_target_: str = MISSING
_recursive_: bool = False
@dataclass
class RandomSharderConfig(FLDataSharderConfig):
_target_: str = fullclassname(RandomSharder)
num_shards: int = MISSING
@dataclass
class SequentialSharderConfig(FLDataSharderConfig):
_target_: str = fullclassname(SequentialSharder)
examples_per_shard: int = MISSING
@dataclass
class BroadcastSharderConfig(FLDataSharderConfig):
_target_: str = fullclassname(BroadcastSharder)
num_shards: int = MISSING
@dataclass
class ColumnSharderConfig(FLDataSharderConfig):
_target_: str = fullclassname(ColumnSharder)
sharding_col: str = MISSING
@dataclass
class RoundRobinSharderConfig(FLDataSharderConfig):
_target_: str = fullclassname(RoundRobinSharder)
num_shards: int = MISSING
@dataclass
class PowerLawSharderConfig(FLDataSharderConfig):
_target_: str = fullclassname(PowerLawSharder)
num_shards: int = MISSING
alpha: float = MISSING
| canife-main | FLSim/flsim/data/data_sharder.py |
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import abc
from typing import Any, Dict
# see https://fb.workplace.com/groups/fbcode/permalink/2440457449324413/
# @manual=third-party//pandas:pandas-py
import pandas as pd
from flsim.interfaces.dataset import FLDataset
class FLCSVDataset(FLDataset):
"""Abstract class that can be easily extended by implementing one light
method (_get_processed_row_from_single_raw_row()) that is based on
PyTorch's Dataset class. For example,
```
class AdsCVRDataset(FLCSVDataset):
def _get_processed_row_from_single_raw_row(self, raw_row: Any):
return {
"float_features": torch.Tensor(featurize(raw_row["feat"])),
"label": torch.Tensor(raw_row["label"]),
}
```
then, you can simply enumerate over this Dataset as you would usually
with PyTorch Dataset to iterate over training/eval/test batches.
However, note that you will most likely be encapsulating this within
a concrete implementation of IFLDataLoader to use this within FL
simulation with minimal effort (e.g. see FLAdsDataLoader).
"""
def __init__(self, path: str):
self.data_frame = pd.read_csv(path)
def __len__(self):
return len(self.data_frame)
def __getitem__(self, idx):
raw_row = self.data_frame.iloc[idx]
return self._get_processed_row_from_single_raw_row(raw_row)
@abc.abstractmethod
def _get_processed_row_from_single_raw_row(self, raw_row: Any) -> Dict[str, Any]:
"""This method should be overridden by the child class. One should
provide logic to convert a raw row read from data into a processed
row (i.e. dictionary where key is column name and value is whatever
model expects such as tensor for features and integer for labels).
"""
pass
| canife-main | FLSim/flsim/data/csv_dataset.py |
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
from __future__ import annotations
from abc import ABC, abstractmethod
from typing import Any, Iterable, Iterator, List, Optional
from flsim.interfaces.model import IFLModel
class IFLUserData(ABC):
"""
Wraps data for a single user
IFLUserData is responsible for
1. Keeping track of the number of examples for a particular user
2. Keeping track of the number of batches for a particular user
3. Providing an iterator over all the user batches
"""
def num_total_examples(self) -> int:
"""
Returns the number of examples
"""
return self.num_train_examples() + self.num_eval_examples()
def num_total_batches(self) -> int:
"""
Returns the number of batches
"""
return self.num_train_batches() + self.num_eval_batches()
@abstractmethod
def num_train_examples(self) -> int:
"""
Returns the number of train examples
"""
@abstractmethod
def num_eval_examples(self) -> int:
"""
Returns the number of eval examples
"""
@abstractmethod
def num_train_batches(self) -> int:
"""
Returns the number of training batches
"""
@abstractmethod
def num_eval_batches(self) -> int:
"""
Returns the number of eval batches
"""
@abstractmethod
def train_data(self) -> Iterator[Any]:
"""
Returns the training batches
"""
@abstractmethod
def eval_data(self) -> Iterator[Any]:
"""
Returns the eval batches
"""
class IFLDataProvider(ABC):
"""
Provides data to the trainer
IFLDataProvider is responsible for
1. Enforcing a uniform interface that trainer expects
2. Transforming data into what IFLModel.fl_forward() is going to consume
3. Keeping track of the sharded client data
"""
@abstractmethod
def train_user_ids(self) -> List[int]:
"""
Returns a list of user ids in the data set
"""
@abstractmethod
def num_train_users(self) -> int:
"""
Returns the number of users in train set
"""
@abstractmethod
def get_train_user(self, user_index: int) -> IFLUserData:
"""
Returns train user from user_index
"""
@abstractmethod
def train_users(self) -> Iterable[IFLUserData]:
"""
Returns training users iterable
"""
@abstractmethod
def eval_users(self) -> Iterable[IFLUserData]:
"""
Returns evaluation users iterable
"""
@abstractmethod
def test_users(self) -> Iterable[IFLUserData]:
"""
Returns test users iterable
"""
class FLUserDataFromList(IFLUserData):
"""
Util class to create an IFLUserData from a list of user batches
"""
def __init__(
self, data: Iterable, model: IFLModel, eval_batches: Optional[Iterable] = None
):
self.data = data
self._num_examples: int = 0
self._num_batches: int = 0
self.model = model
self.training_batches = []
self.eval_batches = eval_batches if eval_batches is not None else []
for batch in self.data:
training_batch = self.model.fl_create_training_batch(batch=batch)
self.training_batches.append(training_batch)
self._num_examples += model.get_num_examples(training_batch)
self._num_batches += 1
def train_data(self):
for batch in self.training_batches:
yield batch
def eval_data(self):
for batch in self.eval_batches:
yield self.model.fl_create_training_batch(batch=batch)
def num_batches(self):
return self._num_batches
def num_train_examples(self):
return self._num_examples
def num_eval_batches(self):
return 0
def num_train_batches(self):
return self._num_batches
def num_eval_examples(self):
return 0
class FLDataProviderFromList(IFLDataProvider):
r"""
Util class to help ease the transition to IFLDataProvider
=======
Args:
train_user_list: (Iterable[Iterable[Any]]): train data
eval_user_list: (Iterable[Iterable[Any]]): eval data
test_user_list (Iterable[Iterable[Any]]): test data
model: (IFLModel): the IFLModel to create training batch for
"""
def __init__(
self,
train_user_list: Iterable[Iterable[Any]],
eval_user_list: Iterable[Iterable[Any]],
test_user_list: Iterable[Iterable[Any]],
model: IFLModel,
):
self.train_user_list = train_user_list
self.eval_user_list = eval_user_list
self.test_user_list = test_user_list
self.model = model
self._train_users = {
user_id: FLUserDataFromList(
data=user_data, eval_batches=user_data, model=model
)
for user_id, user_data in enumerate(train_user_list)
}
self._eval_users = {
user_id: FLUserDataFromList(data=[], eval_batches=user_data, model=model)
for user_id, user_data in enumerate(eval_user_list)
}
self._test_users = {
user_id: FLUserDataFromList(data=[], eval_batches=user_data, model=model)
for user_id, user_data in enumerate(test_user_list)
}
def train_user_ids(self):
return list(self._train_users.keys())
def num_train_users(self):
return len(self.train_user_ids())
def get_train_user(self, user_index: int):
if user_index in self._train_users:
return self._train_users[user_index]
else:
raise IndexError(f"Index {user_index} not in {self.train_user_ids()}")
def train_users(self):
return list(self._train_users.values())
def eval_users(self):
return list(self._eval_users.values())
def test_users(self):
return list(self._test_users.values())
| canife-main | FLSim/flsim/data/data_provider.py |
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
from typing import Any, Iterable, Optional, Type
import torch
from flsim.data.data_sharder import FLDataSharder
from flsim.interfaces.data_loader import IFLDataLoader
from flsim.interfaces.dataset import FLDataset
from torch.utils.data import Dataset
class FLDatasetDataLoader(IFLDataLoader):
def __init__(
self,
dataset_class: Type[FLDataset],
train_path: str,
test_path: str,
eval_path: str,
sharder: FLDataSharder,
):
# pyre-fixme[19]: Expected 0 positional arguments.
self.train_dataset = dataset_class(train_path)
# pyre-fixme[19]: Expected 0 positional arguments.
self.test_dataset = dataset_class(test_path)
# pyre-fixme[19]: Expected 0 positional arguments.
self.eval_dataset = dataset_class(eval_path)
self.sharder = sharder
def fl_train_set(self, **kwargs) -> Iterable[Iterable[Any]]:
train_batches = []
for sharded_rows in self.sharder.shard_rows(self.train_dataset):
train_batches.append(sharded_rows)
return train_batches
def fl_eval_set(self, **kwargs) -> Iterable[Any]:
return self.eval_dataset
def fl_test_set(self, **kwargs) -> Iterable[Any]:
return self.test_dataset
class FLDatasetDataLoaderWithBatch(IFLDataLoader):
def __init__(
self,
train_dataset: Dataset,
test_dataset: Dataset,
eval_dataset: Dataset,
sharder: FLDataSharder,
train_batch_size: int,
eval_batch_size: Optional[int] = None,
test_batch_size: Optional[int] = None,
):
assert train_batch_size > 0, "Batch size should be a positive integer."
self.train_dataset = train_dataset
self.test_dataset = test_dataset
self.eval_dataset = eval_dataset
self.sharder = sharder
self.train_batch_size = train_batch_size
self.eval_batch_size = eval_batch_size
self.test_batch_size = test_batch_size
self._num_total_users: int = -1
@property
def num_total_users(self):
assert (
self._num_total_users != -1
), "num_total_users is valid only after fl_train_set() has been called"
return self._num_total_users
def fl_train_set(self, **kwargs) -> Iterable[Iterable[Any]]:
self._num_total_users = 0
rank = kwargs.get("rank", 0)
world_size = kwargs.get("world_size", 1)
final_train_batches, self._num_total_users = self._create_batches_from_dataset(
self.train_dataset, rank=rank, world_size=world_size
)
return final_train_batches
def fl_eval_set(self, **kwargs) -> Iterable[Any]:
collate_fn = kwargs.get("collate_fn", None)
if collate_fn is not None:
raise ValueError("collate fn is not used")
final_eval_batches, _ = self._create_batches_from_dataset(
self.eval_dataset, rank=0, world_size=1
)
return final_eval_batches
def fl_test_set(self, **kwargs) -> Iterable[Any]:
collate_fn = kwargs.get("collate_fn", None)
if collate_fn is not None:
raise ValueError("collate fn is not used")
final_test_batches, _ = self._create_batches_from_dataset(
self.test_dataset, rank=0, world_size=1
)
return final_test_batches
def _create_batches_from_dataset(
self, dataset: Dataset, rank: int, world_size: int
):
train_batches = [user_data for _, user_data in self.sharder.shard_rows(dataset)]
# batch train_batches collected above
final_train_batches = []
num_total_users = 0
# fetch attributes for each row
keys = list(train_batches[0][0].keys())
for one_user_data in train_batches:
batched_user_data = []
for i, single_data in enumerate(one_user_data):
if i % self.train_batch_size == 0:
batched_user_data.append([])
batched_user_data[-1].append(single_data)
new_batched_user_data = []
for a_batched_user_data in batched_user_data:
batched_data_rows = {}
for key in keys:
batched_data_rows[key] = []
for single_user_data in a_batched_user_data:
for key in keys:
batched_data_rows[key].append(single_user_data[key])
for key in keys:
batched_data_rows[key] = torch.stack(batched_data_rows[key])
new_batched_user_data.append(batched_data_rows)
# divide the total number of users evenly into world_size # of workers
if num_total_users % world_size == rank:
final_train_batches.append(new_batched_user_data)
# count the total number of users
num_total_users += 1
return final_train_batches, num_total_users
| canife-main | FLSim/flsim/data/dataset_data_loader.py |
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
from typing import Any, Dict
import pkg_resources
import pytest
import torch
from flsim.common.pytest_helper import assertEqual
from flsim.data.csv_dataset import FLCSVDataset
from flsim.data.data_sharder import ColumnSharderConfig
from flsim.data.dataset_data_loader import FLDatasetDataLoaderWithBatch
from hydra.utils import instantiate
@pytest.fixture(scope="class")
def prepare_dataset_data_loader_with_batch(request) -> None:
request.cls.test_csv_path = "test_resources/data.csv"
request.cls.total_data_count = 15
request.cls.train_batch_size = 1
request.cls.eval_batch_size = 1
request.cls.test_batch_size = 1
class TestDataset(FLCSVDataset):
def _get_processed_row_from_single_raw_row(self, raw_row: Any) -> Dict[str, Any]:
return {
"userid": torch.Tensor([raw_row["userid"]]),
"label": torch.Tensor([raw_row["label"]]),
}
@pytest.mark.usefixtures("prepare_dataset_data_loader_with_batch")
class TestDatasetDataLoaderWithBatch:
def test_batch_size(self) -> None:
# pyre-ignore[16]: for pytest fixture
file_path = pkg_resources.resource_filename(__name__, self.test_csv_path)
dataset = TestDataset(file_path)
fl_data_sharder = instantiate(ColumnSharderConfig(sharding_col="userid"))
data_loader = FLDatasetDataLoaderWithBatch(
dataset,
dataset,
dataset,
fl_data_sharder,
# pyre-ignore[16]: for pytest fixture
self.train_batch_size,
# pyre-ignore[16]: for pytest fixture
self.eval_batch_size,
# pyre-ignore[6]
self.test_batch_size,
)
assertEqual(
len(list(data_loader.fl_train_set())),
# pyre-ignore[16]: for pytest fixture
self.total_data_count / self.train_batch_size,
)
assertEqual(
len(list(data_loader.fl_eval_set())),
self.total_data_count / self.eval_batch_size,
)
assertEqual(
len(list(data_loader.fl_test_set())),
self.total_data_count / self.test_batch_size,
)
| canife-main | FLSim/flsim/data/tests/test_dataset_dataloader_with_batch.py |
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
| canife-main | FLSim/flsim/data/tests/__init__.py |
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import math
import random
import string
import torch
from flsim.common.pytest_helper import assertEqual, assertTrue
from flsim.data.data_sharder import (
BroadcastSharderConfig,
ColumnSharderConfig,
PowerLawSharderConfig,
RandomSharderConfig,
RoundRobinSharderConfig,
SequentialSharderConfig,
)
from flsim.data.dataset_data_loader import FLDatasetDataLoaderWithBatch
from flsim.utils.sample_model import TestDataSetting
from flsim.utils.tests.helpers.test_data_utils import DummyAlphabetDataset, Utils
from hydra.utils import instantiate
class MockData:
@staticmethod
def provide_data():
"""Generate 26 rows of data. Each row:
"label":0 or 1, "text":int between 0 and 25
"user_id": between 'a' and 'z'
"""
characters = Utils.get_characters(num=26)
return [
{
TestDataSetting.LABEL_COL_NAME: Utils.get_label(character),
TestDataSetting.TEXT_COL_NAME: Utils.get_text(character),
TestDataSetting.USER_ID_COL_NAME: torch.tensor(
ord(character), dtype=torch.int8
),
}
for character in characters
]
class TestDataSharder:
def test_random_sharder(self) -> None:
"""Only tests random sharding strategy in this test case. All the other
strategies are tested in test_shard_rows().
"""
random.seed(1)
random_sharder = instantiate(
RandomSharderConfig(num_shards=TestDataSetting.NUM_SHARDS)
)
for i in range(random_sharder.cfg.num_shards + 1):
shard = random_sharder.shard_for_row(
MockData.provide_data()[i % sum(1 for row in MockData.provide_data())]
)
if i == 0:
assertEqual(shard, [6])
elif i == random_sharder.cfg.num_shards:
assertEqual(shard, [3])
def test_shard_rows_random(self) -> None:
random_sharder = instantiate(
RandomSharderConfig(num_shards=TestDataSetting.NUM_SHARDS)
)
random_sharded_rows = list(
random_sharder.shard_rows(
MockData.provide_data()[
: min(
int(random_sharder.cfg.num_shards * 1.2 + 1),
sum(1 for row in MockData.provide_data()),
)
]
)
)
assertEqual(len(random_sharded_rows), random_sharder.cfg.num_shards)
def test_shard_rows_power_law(self) -> None:
power_law_sharder = instantiate(PowerLawSharderConfig(num_shards=2, alpha=0.2))
power_law_sharded_rows = list(
power_law_sharder.shard_rows(MockData.provide_data())
)
num_examples_per_user = [len(p) for _, p in power_law_sharded_rows]
assertEqual(len(num_examples_per_user), power_law_sharder.cfg.num_shards)
# assert that top user will have more than 20% of the examples
assertTrue(max(num_examples_per_user) / sum(num_examples_per_user) > 0.20)
def test_shard_rows_broadcast(self) -> None:
broadcast_sharder = instantiate(
BroadcastSharderConfig(num_shards=TestDataSetting.NUM_SHARDS)
)
# all rows from dataset should be replicated to all shards
assertEqual(
[
user_data
for _, user_data in broadcast_sharder.shard_rows(
MockData.provide_data()
)
],
[
MockData.provide_data()
for shard_idx in range(TestDataSetting.NUM_SHARDS)
],
)
def test_shard_rows_column(self) -> None:
column_sharder = instantiate(
ColumnSharderConfig(sharding_col=TestDataSetting.USER_ID_COL_NAME)
)
# each data row should be assigned to a unique shard, since column
# sharding is based on user id here and each mocked user id is unique
assertEqual(
[
user_data
for _, user_data in column_sharder.shard_rows(MockData.provide_data())
],
[[one_data_row] for one_data_row in MockData.provide_data()],
)
def test_shard_rows_round_robin(self) -> None:
round_robin_sharder = instantiate(
RoundRobinSharderConfig(num_shards=TestDataSetting.NUM_SHARDS)
)
sharded_rows_from_round_robin = [
user_data
for _, user_data in round_robin_sharder.shard_rows(MockData.provide_data())
]
# there are 26 data rows here and it should be sharded with round-
# robin fashion with 10 shards. e.g. 1th, 11th, 21th data row
# should be in the same shard.
for shard_index in range(round_robin_sharder.cfg.num_shards):
assertEqual(
sharded_rows_from_round_robin[shard_index],
[
one_data_row
for row_index, one_data_row in enumerate(MockData.provide_data())
if row_index % round_robin_sharder.cfg.num_shards == shard_index
],
)
def test_shard_rows_sequential(self) -> None:
sequential_sharder = instantiate(SequentialSharderConfig(examples_per_shard=2))
sharded_rows_from_sequential = [
user_data
for _, user_data in sequential_sharder.shard_rows(MockData.provide_data())
]
assertEqual(
len(sharded_rows_from_sequential),
len(string.ascii_lowercase) / sequential_sharder.cfg.examples_per_shard,
)
dataset = MockData.provide_data()
for shard_index, data_for_a_shard in enumerate(sharded_rows_from_sequential):
start_index = shard_index * sequential_sharder.cfg.examples_per_shard
assertEqual(
data_for_a_shard,
dataset[
start_index : start_index
+ sequential_sharder.cfg.examples_per_shard
],
)
def test_distributed_user_sharding(self) -> None:
# mock 26 rows
shard_size = 4
local_batch_size = 2
world_size = 4
# mock world_size parallel creation of fl_train_set
for rank in range(world_size):
fl_data_sharder = instantiate(
SequentialSharderConfig(examples_per_shard=shard_size)
)
data_loader = FLDatasetDataLoaderWithBatch(
MockData.provide_data(),
MockData.provide_data(),
MockData.provide_data(),
fl_data_sharder,
local_batch_size, # train_batch_size
local_batch_size, # eval_batch_size
local_batch_size, # test_batch_size
)
train_set = list(data_loader.fl_train_set(rank=rank, world_size=world_size))
assertEqual(data_loader.num_total_users, math.ceil(26 / shard_size))
number_of_users_on_worker = 2 if rank in (0, 1, 2) else 1
train_set_size = len(train_set)
assertEqual(train_set_size, number_of_users_on_worker)
def test_pytorch_dataset_wrapper(self) -> None:
# mock 26 rows
shard_size = 4
local_batch_size = 2
fl_data_sharder = instantiate(
SequentialSharderConfig(examples_per_shard=shard_size)
)
dummy_dataset = DummyAlphabetDataset()
data_loader = FLDatasetDataLoaderWithBatch(
dummy_dataset,
dummy_dataset,
dummy_dataset,
fl_data_sharder,
local_batch_size, # train_batch_size
local_batch_size, # eval_batch_size
local_batch_size, # test_batch_size
)
data_loader.fl_train_set()
assertEqual(data_loader.num_total_users, math.ceil(26 / shard_size))
eval_set = list(data_loader.fl_eval_set())
assertEqual(len(eval_set), math.ceil(26 / shard_size))
| canife-main | FLSim/flsim/data/tests/test_data_sharder.py |
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
from enum import IntEnum
import torch
import torch.nn as nn
from flsim.utils.distributed.fl_distributed import FLDistributedUtils, OperationType
from flsim.utils.fl.common import FLModelParamUtils
class AggregationType(IntEnum):
"""
Type of averaging for the aggregator
"""
AVERAGE = 0
SUM = 1
WEIGHTED_AVERAGE = 2
WEIGHTED_SUM = 3
class Aggregator:
"""
Util class to handle aggregation logic such as
{weighted, unweighted}_summation, {weighted, unweighted}_averaging
Please do not extend this class
"""
def __init__(
self,
module: nn.Module,
aggregation_type: AggregationType,
only_federated_params: bool = True,
):
self._buffer_module: nn.Module = FLModelParamUtils.clone(module)
self.device = next(self._buffer_module.parameters()).device
self._sum_weights: torch.Tensor = torch.zeros(1, device=self.device)
self.only_federated_params = only_federated_params
FLModelParamUtils.zero_weights(
self._buffer_module, only_federated_params=self.only_federated_params
)
self.aggregation_type = aggregation_type
def zero_weights(self):
FLModelParamUtils.zero_weights(
self._buffer_module, only_federated_params=self.only_federated_params
)
self._sum_weights = torch.zeros(1, device=self.device)
def add_update(self, delta: nn.Module, weight: float):
weight = weight if self._is_weighted else 1.0
FLModelParamUtils.add_model(delta, self._buffer_module, self._buffer_module)
self._sum_weights += weight
def apply_weight_to_update(self, delta: nn.Module, weight: float):
weight = weight if self._is_weighted else 1.0
FLModelParamUtils.multiply_model_by_weight(
model=delta,
weight=weight,
model_to_save=delta,
)
def aggregate(
self, distributed_op: OperationType = OperationType.SUM_AND_BROADCAST
) -> nn.Module:
FLDistributedUtils.synchronize_model_across_workers(
operation=distributed_op,
model=self._buffer_module,
weights=self._sum_weights,
)
if self._is_averaged and self.sum_weights.item() != 0:
FLModelParamUtils.multiply_model_by_weight(
model=self._buffer_module,
weight=1.0 / self.sum_weights.item(),
model_to_save=self._buffer_module,
)
return self._buffer_module
@property
def sum_weights(self) -> torch.Tensor:
return self._sum_weights
@property
def _is_weighted(self) -> bool:
return self.aggregation_type in [
AggregationType.WEIGHTED_AVERAGE,
AggregationType.WEIGHTED_SUM,
]
@property
def _is_averaged(self) -> bool:
return self.aggregation_type in [
AggregationType.WEIGHTED_AVERAGE,
AggregationType.AVERAGE,
]
| canife-main | FLSim/flsim/servers/aggregator.py |
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
from __future__ import annotations
from dataclasses import dataclass
from typing import Optional
import torch
from flsim.active_user_selectors.simple_user_selector import (
UniformlyRandomActiveUserSelectorConfig,
)
from flsim.channels.base_channel import IdentityChannel, IFLChannel
from flsim.channels.message import Message
from flsim.channels.pq_utils.pq import PQ
from flsim.channels.product_quantization_channel import ProductQuantizationChannel
from flsim.data.data_provider import IFLDataProvider
from flsim.interfaces.model import IFLModel
from flsim.optimizers.server_optimizers import FedAvgOptimizerConfig
from flsim.privacy.common import PrivacyBudget, PrivacySetting
from flsim.privacy.privacy_engine import IPrivacyEngine
from flsim.privacy.privacy_engine_factory import NoiseType, PrivacyEngineFactory
from flsim.privacy.user_update_clip import UserUpdateClipper
from flsim.servers.aggregator import AggregationType, Aggregator
from flsim.servers.sync_servers import ISyncServer, SyncServerConfig
from flsim.utils.config_utils import fullclassname, init_self_cfg
from flsim.utils.distributed.fl_distributed import FLDistributedUtils, OperationType
from flsim.utils.fl.common import FLModelParamUtils
from hydra.utils import instantiate
from omegaconf import OmegaConf
class SyncDPSGDServer(ISyncServer):
"""
User level DP-SGD Server implementing https://arxiv.org/abs/1710.06963
Args:
global_model: IFLModel: Global (server model) to be updated between rounds
users_per_round: int: User per round to calculate sampling rate
num_total_users: int: Total users in the dataset to calculate sampling rate
Sampling rate = users_per_round / num_total_users
channel: Optional[IFLChannel]: Communication channel between server and clients
"""
def __init__(
self,
*,
global_model: IFLModel,
channel: Optional[IFLChannel] = None,
**kwargs,
):
init_self_cfg(
self,
component_class=__class__, # pyre-fixme[10]: Name `__class__` is used but not defined.
config_class=SyncDPSGDServerConfig,
**kwargs,
)
assert (
self.cfg.aggregation_type == AggregationType.AVERAGE # pyre-ignore[16]
), "DP training must be done with simple averaging and uniform weights."
self.privacy_budget = PrivacyBudget()
self._clipping_value = self.cfg.privacy_setting.clipping_value
self._optimizer = instantiate(
config=self.cfg.server_optimizer,
model=global_model.fl_get_module(),
)
self._global_model: IFLModel = global_model
self._user_update_clipper: UserUpdateClipper = UserUpdateClipper()
self._aggregator: Aggregator = Aggregator(
module=global_model.fl_get_module(),
aggregation_type=self.cfg.aggregation_type,
only_federated_params=self.cfg.only_federated_params,
)
self._privacy_engine: Optional[IPrivacyEngine] = None
self._active_user_selector = instantiate(self.cfg.active_user_selector)
self._channel: IFLChannel = channel or IdentityChannel()
self._clip_factors = [] # TODO: Canary modification
@classmethod
def _set_defaults_in_cfg(cls, cfg):
if OmegaConf.is_missing(cfg.active_user_selector, "_target_"):
cfg.active_user_selector = UniformlyRandomActiveUserSelectorConfig()
if OmegaConf.is_missing(cfg.server_optimizer, "_target_"):
cfg.server_optimizer = FedAvgOptimizerConfig()
@property
def global_model(self):
return self._global_model
def select_clients_for_training(
self,
num_total_users,
users_per_round,
data_provider: Optional[IFLDataProvider] = None,
epoch: Optional[int] = None,
):
if self._privacy_engine is None:
self._privacy_engine: IPrivacyEngine = PrivacyEngineFactory.create(
# pyre-ignore[16]
self.cfg.privacy_setting,
users_per_round,
num_total_users,
noise_type=NoiseType.GAUSSIAN,
)
self._privacy_engine.attach(self._global_model.fl_get_module())
return self._active_user_selector.get_user_indices(
num_total_users=num_total_users,
users_per_round=users_per_round,
data_provider=data_provider,
global_model=self.global_model,
epoch=epoch,
)
def init_round(self):
self._aggregator.zero_weights()
self._optimizer.zero_grad()
self._privacy_engine.attach(self._global_model.fl_get_module())
def _init_and_boradcast_qparams(self, client_delta):
if isinstance(self._channel, ProductQuantizationChannel):
seed_centroids = {}
state_dict = client_delta.fl_get_module().state_dict()
for name, param in state_dict.items():
if (
param.ndim > 1
and param.numel() >= self._channel.cfg.min_numel_to_quantize
):
pq = PQ(
param.data.size(),
self._channel.cfg.max_block_size,
self._channel.cfg.num_codebooks,
self._channel.cfg.max_num_centroids,
self._channel.cfg.num_k_means_iter,
self._channel.cfg.verbose,
)
centroids, _ = pq.encode(param.data.cpu())
seed_centroids[name] = centroids
self._channel.seed_centroids = seed_centroids
# TODO: Canary modification, for debugging
def _output_model(self, model, message="", scale=False):
if scale:
print(f"{message} {torch.cat([p.detach().flatten()*(1.0/0.8991111469452855)*4 for p in model.parameters()])}")
else:
print(f"{message} {torch.cat([p.flatten() for p in model.parameters()])}")
def receive_update_from_client(self, message: Message, enable_clipping=True):
message = self._channel.client_to_server(message)
# self._output_model(message.model.fl_get_module(), "Model update receieved")
self._aggregator.apply_weight_to_update(
delta=message.model.fl_get_module(), weight=message.weight
)
# self._output_model(message.model.fl_get_module(), "Model update applied weight")
# TODO: Needed to scale canary grads
clip_factor = self._user_update_clipper.clip(
message.model.fl_get_module(), max_norm=self._clipping_value, enable_clipping=enable_clipping
)
# self._output_model(message.model.fl_get_module(), "Model update clipped")
# self._output_model(message.model.fl_get_module(), "Model update clipped and rescaled", scale=True)
self._clip_factors.append(clip_factor) # TODO: Canary modification
self._aggregator.add_update(
delta=message.model.fl_get_module(), weight=message.weight
)
def step(self):
assert self._privacy_engine is not None, "PrivacyEngine is not initialized"
aggregated_model = self._aggregator.aggregate(distributed_op=OperationType.SUM)
if FLDistributedUtils.is_master_worker():
self._privacy_engine.add_noise(
aggregated_model,
self._clipping_value / self._aggregator.sum_weights.item(),
)
FLDistributedUtils.synchronize_model_across_workers(
operation=OperationType.BROADCAST,
model=aggregated_model,
weights=self._aggregator.sum_weights,
)
FLModelParamUtils.set_gradient(
model=self._global_model.fl_get_module(),
reference_gradient=aggregated_model,
)
self._optimizer.step()
self.privacy_budget = self._privacy_engine.get_privacy_spent()
return aggregated_model # TODO: Canary modification.
@dataclass
class SyncDPSGDServerConfig(SyncServerConfig):
_target_: str = fullclassname(SyncDPSGDServer)
aggregation_type: AggregationType = AggregationType.AVERAGE
privacy_setting: PrivacySetting = PrivacySetting()
| canife-main | FLSim/flsim/servers/sync_dp_servers.py |
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
from hydra.core.config_store import ConfigStore # @manual
from .sync_dp_servers import SyncDPSGDServerConfig
from .sync_secagg_servers import SyncSecAggServerConfig
from .sync_servers import SyncServerConfig
ConfigStore.instance().store(
name="base_sync_server",
node=SyncServerConfig,
group="server",
)
ConfigStore.instance().store(
name="base_sync_dp_server",
node=SyncDPSGDServerConfig,
group="server",
)
ConfigStore.instance().store(
name="base_sync_secagg_server",
node=SyncSecAggServerConfig,
group="server",
)
| canife-main | FLSim/flsim/servers/__init__.py |
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
from __future__ import annotations
import abc
from dataclasses import dataclass
from typing import List, Optional
from flsim.active_user_selectors.simple_user_selector import (
ActiveUserSelectorConfig,
UniformlyRandomActiveUserSelectorConfig,
)
from flsim.channels.base_channel import IdentityChannel, IFLChannel
from flsim.channels.message import Message
from flsim.channels.pq_utils.pq import PQ
from flsim.channels.product_quantization_channel import ProductQuantizationChannel
from flsim.data.data_provider import IFLDataProvider
from flsim.interfaces.model import IFLModel
from flsim.optimizers.server_optimizers import (
FedAvgOptimizerConfig,
OptimizerType,
ServerOptimizerConfig,
)
from flsim.servers.aggregator import AggregationType, Aggregator
from flsim.utils.config_utils import fullclassname, init_self_cfg
from flsim.utils.fl.common import FLModelParamUtils
from hydra.utils import instantiate
from omegaconf import OmegaConf
class ISyncServer(abc.ABC):
"""
Interface for Sync servers, all sync server should
implement this interface.
Responsibilities:
Wrapper for aggregator and optimizer.
Collects client updates and sends to aggregator.
Changes the global model using aggregator and optimizer.
"""
@abc.abstractmethod
def init_round(self):
"""
Clears the buffer and zero out grad in optimizer
"""
raise NotImplementedError()
@abc.abstractmethod
def receive_update_from_client(self, message: Message):
"""
Receives new update from client
"""
raise NotImplementedError()
@abc.abstractmethod
def step(self):
"""
Update the global model
"""
raise NotImplementedError()
@abc.abstractmethod
def select_clients_for_training(
self,
num_total_users: int,
users_per_round: int,
data_provider: Optional[IFLDataProvider] = None,
epoch: Optional[int] = None,
) -> List[int]:
"""
Selects clients to participate in a round of training.
The selection scheme depends on the underlying selector. This
can include: random, sequential, high loss etc.
Args:
num_total_users ([int]): Number of total users (population size).
users_per_round ([int]]): Number of users per round.
data_provider (Optional[IFLDataProvider], optional): This is useful when the selection scheme
is high loss. Defaults to None.
epoch (Optional[int], optional): [description]. This is useful when the selection scheme
is high loss. Defaults to None.
Returns:
List[int]: A list of client indices
"""
pass
@property
def global_model(self) -> IFLModel:
"""
Returns the current global model
"""
raise NotImplementedError()
class SyncServer(ISyncServer):
def __init__(
self,
*,
global_model: IFLModel,
channel: Optional[IFLChannel] = None,
**kwargs,
):
init_self_cfg(
self,
component_class=__class__, # pyre-fixme[10]: Name `__class__` is used but not defined.
config_class=SyncServerConfig,
**kwargs,
)
self._optimizer = OptimizerType.create_optimizer(
# pyre-fixme[16]: `SyncServer` has no attribute `cfg`.
config=self.cfg.server_optimizer,
model=global_model.fl_get_module(),
)
self._global_model: IFLModel = global_model
self._aggregator: Aggregator = Aggregator(
module=global_model.fl_get_module(),
aggregation_type=self.cfg.aggregation_type,
only_federated_params=self.cfg.only_federated_params,
)
self._active_user_selector = instantiate(self.cfg.active_user_selector)
self._channel: IFLChannel = channel or IdentityChannel()
@classmethod
def _set_defaults_in_cfg(cls, cfg):
if OmegaConf.is_missing(cfg.active_user_selector, "_target_"):
cfg.active_user_selector = UniformlyRandomActiveUserSelectorConfig()
if OmegaConf.is_missing(cfg.server_optimizer, "_target_"):
cfg.server_optimizer = FedAvgOptimizerConfig()
@property
def global_model(self):
return self._global_model
def select_clients_for_training(
self,
num_total_users,
users_per_round,
data_provider: Optional[IFLDataProvider] = None,
epoch: Optional[int] = None,
):
return self._active_user_selector.get_user_indices(
num_total_users=num_total_users,
users_per_round=users_per_round,
data_provider=data_provider,
global_model=self.global_model,
epoch=epoch,
)
def _init_and_boradcast_qparams(self, client_delta):
if isinstance(self._channel, ProductQuantizationChannel):
seed_centroids = {}
state_dict = client_delta.fl_get_module().state_dict()
for name, param in state_dict.items():
if (
param.ndim > 1
and param.numel() >= self._channel.cfg.min_numel_to_quantize
):
pq = PQ(
param.data.size(),
self._channel.cfg.max_block_size,
self._channel.cfg.num_codebooks,
self._channel.cfg.max_num_centroids,
self._channel.cfg.num_k_means_iter,
self._channel.cfg.verbose,
)
centroids, _ = pq.encode(param.data.cpu())
seed_centroids[name] = centroids
self._channel.seed_centroids = seed_centroids
def init_round(self):
self._aggregator.zero_weights()
self._optimizer.zero_grad()
def receive_update_from_client(self, message: Message):
message = self._channel.client_to_server(message)
self._aggregator.apply_weight_to_update(
delta=message.model.fl_get_module(), weight=message.weight
)
self._aggregator.add_update(
delta=message.model.fl_get_module(), weight=message.weight
)
def step(self):
aggregated_model = self._aggregator.aggregate()
FLModelParamUtils.set_gradient(
model=self._global_model.fl_get_module(),
reference_gradient=aggregated_model,
)
self._optimizer.step()
@dataclass
class SyncServerConfig:
_target_: str = fullclassname(SyncServer)
_recursive_: bool = False
only_federated_params: bool = True
aggregation_type: AggregationType = AggregationType.WEIGHTED_AVERAGE
server_optimizer: ServerOptimizerConfig = ServerOptimizerConfig()
active_user_selector: ActiveUserSelectorConfig = ActiveUserSelectorConfig()
| canife-main | FLSim/flsim/servers/sync_servers.py |
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
from __future__ import annotations
from dataclasses import dataclass
from typing import Optional, Tuple
from flsim.active_user_selectors.simple_user_selector import (
ActiveUserSelectorConfig,
UniformlyRandomActiveUserSelectorConfig,
)
from flsim.channels.base_channel import IdentityChannel, IFLChannel
from flsim.channels.message import Message
from flsim.data.data_provider import IFLDataProvider
from flsim.interfaces.model import IFLModel
from flsim.optimizers.server_optimizers import FedAvgOptimizerConfig
from flsim.secure_aggregation.secure_aggregator import (
FixedPointConfig,
SecureAggregator,
utility_config_flatter,
)
from flsim.servers.aggregator import AggregationType, Aggregator
from flsim.servers.sync_servers import ISyncServer, SyncServerConfig
from flsim.utils.config_utils import fullclassname, init_self_cfg
from flsim.utils.fl.common import FLModelParamUtils
from hydra.utils import instantiate
from omegaconf import OmegaConf
class SyncSecAggServer(ISyncServer):
def __init__(
self,
*,
global_model: IFLModel,
channel: Optional[IFLChannel] = None,
**kwargs,
):
init_self_cfg(
self,
component_class=__class__, # pyre-fixme[10]: Name `__class__` is used but not defined.
config_class=SyncSecAggServerConfig,
**kwargs,
)
self._optimizer = instantiate(
# pyre-fixme[16]: `SyncServer` has no attribute `cfg`.
config=self.cfg.server_optimizer,
model=global_model.fl_get_module(),
)
self._global_model: IFLModel = global_model
self._aggregator: Aggregator = Aggregator(
module=global_model.fl_get_module(),
aggregation_type=self.cfg.aggregation_type,
only_federated_params=self.cfg.only_federated_params,
)
self._secure_aggregator = SecureAggregator(
utility_config_flatter(
global_model.fl_get_module(),
self.cfg.fixedpoint,
)
)
self._active_user_selector = instantiate(self.cfg.active_user_selector)
self._channel: IFLChannel = channel or IdentityChannel()
@classmethod
def _set_defaults_in_cfg(cls, cfg):
if OmegaConf.is_missing(cfg.active_user_selector, "_target_"):
cfg.active_user_selector = UniformlyRandomActiveUserSelectorConfig()
if OmegaConf.is_missing(cfg.server_optimizer, "_target_"):
cfg.server_optimizer = FedAvgOptimizerConfig()
@property
def global_model(self):
return self._global_model
def select_clients_for_training(
self,
num_total_users,
users_per_round,
data_provider: Optional[IFLDataProvider] = None,
epoch: Optional[int] = None,
):
return self._active_user_selector.get_user_indices(
num_total_users=num_total_users,
users_per_round=users_per_round,
data_provider=data_provider,
global_model=self.global_model,
epoch=epoch,
)
def init_round(self):
self._aggregator.zero_weights()
self._optimizer.zero_grad()
def receive_update_from_client(self, message: Message):
message = self._channel.client_to_server(message)
self._aggregator.apply_weight_to_update(
delta=message.model.fl_get_module(), weight=message.weight
)
self._secure_aggregator.params_to_fixedpoint(message.model.fl_get_module())
self._secure_aggregator.apply_noise_mask(
message.model.fl_get_module().named_parameters()
)
self._aggregator.add_update(
delta=message.model.fl_get_module(), weight=message.weight
)
self._secure_aggregator.update_aggr_overflow_and_model(
model=self._aggregator._buffer_module
)
def step(self):
aggregated_model = self._aggregator.aggregate()
self._secure_aggregator.apply_denoise_mask(aggregated_model.named_parameters())
self._secure_aggregator.params_to_float(aggregated_model)
FLModelParamUtils.set_gradient(
model=self._global_model.fl_get_module(),
reference_gradient=aggregated_model,
)
self._optimizer.step()
def calc_avg_overflow_percentage(
self,
num_users: int,
model: IFLModel,
) -> Tuple[float, float]:
return self._secure_aggregator.calc_avg_overflow_percentage(
num_users, model.fl_get_module()
)
@dataclass
class SyncSecAggServerConfig(SyncServerConfig):
"""
Contains configurations for a server with Secure Aggregation
"""
_target_: str = fullclassname(SyncSecAggServer)
aggregation_type: AggregationType = AggregationType.WEIGHTED_AVERAGE
fixedpoint: Optional[FixedPointConfig] = None
active_user_selector: ActiveUserSelectorConfig = ActiveUserSelectorConfig()
| canife-main | FLSim/flsim/servers/sync_secagg_servers.py |
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
from dataclasses import dataclass
from typing import List
import numpy as np
import pytest
from flsim.active_user_selectors.simple_user_selector import (
UniformlyRandomActiveUserSelectorConfig,
)
from flsim.channels.base_channel import IdentityChannel
from flsim.channels.half_precision_channel import HalfPrecisionChannel
from flsim.channels.message import Message
from flsim.common.pytest_helper import assertEmpty, assertEqual
from flsim.optimizers.server_optimizers import (
FedAdamOptimizerConfig,
FedAvgOptimizerConfig,
FedAvgWithLROptimizerConfig,
FedLAMBOptimizerConfig,
FedLARSOptimizerConfig,
OptimizerType,
)
from flsim.servers.aggregator import AggregationType
from flsim.servers.sync_servers import SyncServerConfig
from flsim.utils.fl.common import FLModelParamUtils
from flsim.utils.test_utils import (
SampleNet,
create_model_with_value,
model_parameters_equal_to_value,
verify_models_equivalent_after_training,
)
from hydra.utils import instantiate
@dataclass
class MockClientUpdate:
deltas: List[float]
weights: List[float]
expected_value: float
class TestSyncServer:
def _create_client_updates(self, num_clients, aggregation_type) -> MockClientUpdate:
deltas = [i + 1 for i in range(num_clients)]
weights = [i + 1 for i in range(num_clients)]
if aggregation_type == AggregationType.WEIGHTED_AVERAGE:
expected_value = float(np.average(deltas, weights=weights))
elif aggregation_type == AggregationType.AVERAGE:
expected_value = float(np.average(deltas))
elif aggregation_type == AggregationType.WEIGHTED_SUM:
expected_value = float(sum(d * w for d, w in zip(deltas, weights)))
elif aggregation_type == AggregationType.SUM:
expected_value = float(sum(deltas))
# pyre-fixme[6]: Expected `List[float]` for 1st param but got `List[int]`.
# pyre-fixme[6]: Expected `List[float]` for 2nd param but got `List[int]`.
# pyre-fixme[61]: `expected_value` is undefined, or not always defined.
return MockClientUpdate(deltas, weights, expected_value)
def _run_one_round_comparison(
self,
optimizer,
server,
optim_model,
server_model,
client_updates,
):
server.init_round()
optimizer.zero_grad()
for delta, weight in zip(client_updates.deltas, client_updates.weights):
server.receive_update_from_client(
Message(model=SampleNet(create_model_with_value(delta)), weight=weight)
)
FLModelParamUtils.set_gradient(
model=optim_model,
reference_gradient=create_model_with_value(client_updates.expected_value),
)
optimizer.step()
server.step()
return server_model, optim_model
def _compare_optim_and_server(
self, opt_config, num_rounds, num_clients, agg_type
) -> None:
server_model = SampleNet(create_model_with_value(0))
optim_model = create_model_with_value(0)
server = instantiate(
SyncServerConfig(aggregation_type=agg_type, server_optimizer=opt_config),
global_model=server_model,
)
optimizer = OptimizerType.create_optimizer(optim_model, opt_config)
client_updates = self._create_client_updates(
num_clients, aggregation_type=agg_type
)
for _ in range(num_rounds):
server_model, optim_model = self._run_one_round_comparison(
optimizer, server, optim_model, server_model, client_updates
)
error_msg = verify_models_equivalent_after_training(
server_model, optim_model
)
assertEmpty(error_msg, msg=error_msg)
@pytest.mark.parametrize(
"aggregation_type",
[
AggregationType.AVERAGE,
AggregationType.WEIGHTED_AVERAGE,
],
)
@pytest.mark.parametrize(
"num_clients",
[10, 1],
)
@pytest.mark.parametrize(
"num_rounds",
[10, 1],
)
def test_fed_avg_sync_server(
self, aggregation_type, num_clients, num_rounds
) -> None:
server_model = SampleNet(create_model_with_value(0))
server = instantiate(
SyncServerConfig(
aggregation_type=aggregation_type,
server_optimizer=FedAvgOptimizerConfig(),
),
global_model=server_model,
)
client_updates = self._create_client_updates(
num_clients, aggregation_type=aggregation_type
)
for round_num in range(num_rounds):
server.init_round()
for delta, weight in zip(client_updates.deltas, client_updates.weights):
server.receive_update_from_client(
Message(
model=SampleNet(create_model_with_value(delta)), weight=weight
)
)
server.step()
error_msg = model_parameters_equal_to_value(
server_model, -client_updates.expected_value * (round_num + 1)
)
assertEmpty(error_msg, msg=error_msg)
@pytest.mark.parametrize(
"aggregation_type",
[
AggregationType.AVERAGE,
AggregationType.WEIGHTED_AVERAGE,
AggregationType.SUM,
AggregationType.WEIGHTED_SUM,
],
)
@pytest.mark.parametrize(
"num_clients",
[10, 1],
)
@pytest.mark.parametrize(
"num_rounds",
[10, 1],
)
def test_fed_sgd_sync_server(
self, aggregation_type, num_clients, num_rounds
) -> None:
opt_config = FedAvgWithLROptimizerConfig(lr=0.1)
self._compare_optim_and_server(
opt_config,
agg_type=aggregation_type,
num_rounds=num_rounds,
num_clients=num_clients,
)
@pytest.mark.parametrize(
"aggregation_type",
[
AggregationType.AVERAGE,
AggregationType.WEIGHTED_AVERAGE,
AggregationType.SUM,
AggregationType.WEIGHTED_SUM,
],
)
@pytest.mark.parametrize(
"num_clients",
[10, 1],
)
@pytest.mark.parametrize(
"num_rounds",
[10, 1],
)
def test_fed_adam_sync_server(
self, aggregation_type, num_clients, num_rounds
) -> None:
opt_config = FedAdamOptimizerConfig(lr=0.1)
self._compare_optim_and_server(
opt_config,
agg_type=aggregation_type,
num_rounds=num_rounds,
num_clients=num_clients,
)
@pytest.mark.parametrize(
"aggregation_type",
[
AggregationType.AVERAGE,
AggregationType.WEIGHTED_AVERAGE,
AggregationType.SUM,
AggregationType.WEIGHTED_SUM,
],
)
@pytest.mark.parametrize(
"num_clients",
[10, 1],
)
@pytest.mark.parametrize(
"num_rounds",
[10, 1],
)
def test_fed_lars_sync_server(
self, aggregation_type, num_clients, num_rounds
) -> None:
opt_config = FedLARSOptimizerConfig(lr=0.001)
self._compare_optim_and_server(
opt_config,
agg_type=aggregation_type,
num_rounds=num_rounds,
num_clients=num_clients,
)
@pytest.mark.parametrize(
"aggregation_type",
[
AggregationType.AVERAGE,
AggregationType.WEIGHTED_AVERAGE,
AggregationType.SUM,
AggregationType.WEIGHTED_SUM,
],
)
@pytest.mark.parametrize(
"num_clients",
[10, 1],
)
@pytest.mark.parametrize(
"num_rounds",
[10, 1],
)
def test_fed_lamb_sync_server(
self, aggregation_type, num_clients, num_rounds
) -> None:
opt_config = FedLAMBOptimizerConfig(lr=0.001)
self._compare_optim_and_server(
opt_config,
agg_type=aggregation_type,
num_rounds=num_rounds,
num_clients=num_clients,
)
def test_select_clients_for_training(self) -> None:
"""
Selects 10 clients out of 100. SyncServer with seed = 0 should
return the same indices as those of uniform random selector.
"""
selector_config = UniformlyRandomActiveUserSelectorConfig(user_selector_seed=0)
uniform_selector = instantiate(selector_config)
server = instantiate(
SyncServerConfig(active_user_selector=selector_config),
global_model=SampleNet(create_model_with_value(0)),
)
server_selected_indices = server.select_clients_for_training(
num_total_users=100, users_per_round=10
)
uniform_selector_indices = uniform_selector.get_user_indices(
num_total_users=100, users_per_round=10
)
assertEqual(server_selected_indices, uniform_selector_indices)
@pytest.mark.parametrize(
"channel",
[HalfPrecisionChannel(), IdentityChannel()],
)
def test_server_channel_integration(self, channel) -> None:
"""From Client to Server, the channel should quantize and then dequantize the message
therefore there should be no change in the model
"""
server = instantiate(
SyncServerConfig(),
global_model=SampleNet(create_model_with_value(0)),
channel=channel,
)
delta = create_model_with_value(1)
init = FLModelParamUtils.clone(delta)
server.receive_update_from_client(Message(model=SampleNet(delta), weight=1.0))
error_msg = verify_models_equivalent_after_training(delta, init)
assertEmpty(error_msg, msg=error_msg)
| canife-main | FLSim/flsim/servers/tests/test_sync_servers.py |
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
| canife-main | FLSim/flsim/servers/tests/__init__.py |
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
from tempfile import mkstemp
import pytest
import torch.distributed as dist
import torch.multiprocessing as mp
from flsim.common.pytest_helper import (
assertAlmostEqual,
assertEmpty,
assertEqual,
assertNotEqual,
)
from flsim.servers.aggregator import AggregationType, Aggregator
from flsim.utils.distributed.fl_distributed import FLDistributedUtils, OperationType
from flsim.utils.test_utils import (
create_model_with_value,
model_parameters_equal_to_value,
)
def init_process(
rank,
world_size,
aggregator,
models,
file_loc,
pipe,
distributed_op,
):
FLDistributedUtils.dist_init(
rank=rank,
world_size=world_size,
init_method=f"file://{file_loc}",
use_cuda=False,
)
aggregator.zero_weights()
for i, m in enumerate(models):
if i % world_size == rank:
weight = i + 1
aggregator.apply_weight_to_update(delta=m, weight=weight)
aggregator.add_update(m, weight=weight)
module = aggregator.aggregate(distributed_op)
sums, weights = 0.0, 0.0
all_sum = [(p.sum(), p.numel()) for p in module.parameters()]
for s, w in all_sum:
sums += float(s)
weights += float(w)
pipe.send(sums / weights)
dist.destroy_process_group()
def run_multiprocess_aggregation_test(
aggregator,
num_processes=1,
num_models=4,
distributed_op=OperationType.SUM_AND_BROADCAST,
):
_, tmpfile = mkstemp(dir="/tmp")
pipe_out, pipe_in = mp.Pipe(False)
models = [create_model_with_value(1.0) for i in range(num_models)]
processes = []
results = []
FLDistributedUtils.WORLD_SIZE = num_processes
for pid in range(num_processes):
p = mp.Process(
target=init_process,
args=(
pid,
num_processes,
aggregator,
models,
tmpfile,
pipe_in,
distributed_op,
),
)
p.start()
processes.append(p)
results.append(pipe_out)
for p in processes:
p.join()
res = [r.recv() for r in results]
return res
AGGREGATION_TYPES = [
AggregationType.AVERAGE,
AggregationType.WEIGHTED_AVERAGE,
AggregationType.SUM,
AggregationType.WEIGHTED_SUM,
]
class TestAggregator:
def test_zero_weights(self) -> None:
model = create_model_with_value(0)
ag = Aggregator(module=model, aggregation_type=AggregationType.AVERAGE)
weight = 1.0
steps = 5
for _ in range(steps):
delta = create_model_with_value(1.0)
ag.apply_weight_to_update(delta=delta, weight=weight)
ag.add_update(delta=delta, weight=weight)
assertEqual(ag.sum_weights.item(), weight * steps)
ag.zero_weights()
assertEqual(ag.sum_weights.item(), 0)
@pytest.mark.parametrize(
"agg_type,num_process,num_models,expected_value",
[
(AggregationType.AVERAGE, 4, 10, 1.0),
(AggregationType.WEIGHTED_AVERAGE, 4, 10, 1.0),
(AggregationType.WEIGHTED_SUM, 4, 10, 55.0),
(AggregationType.SUM, 4, 10, 10.0),
],
)
def test_multiprocess_aggregation(
self, agg_type, num_process, num_models, expected_value
):
model = create_model_with_value(0)
ag = Aggregator(module=model, aggregation_type=agg_type)
results = run_multiprocess_aggregation_test(
ag, num_processes=num_process, num_models=num_models
)
for result in results:
assertAlmostEqual(result, expected_value, places=5)
@pytest.mark.parametrize(
"agg_type,expected_value",
[
(AggregationType.AVERAGE, 1.0),
(AggregationType.WEIGHTED_AVERAGE, 1.0),
(AggregationType.WEIGHTED_SUM, 55.0),
(AggregationType.SUM, 10.0),
],
)
def test_aggregate(self, agg_type, expected_value):
model = create_model_with_value(0)
ag = Aggregator(module=model, aggregation_type=agg_type)
ag.zero_weights()
for i in range(10):
delta = create_model_with_value(1.0)
weight = i + 1
ag.apply_weight_to_update(delta=delta, weight=weight)
ag.add_update(delta=delta, weight=weight)
model = ag.aggregate()
error_msg = model_parameters_equal_to_value(model, expected_value)
assertEmpty(error_msg, msg=error_msg)
@pytest.mark.parametrize(
"agg_type,expected_value",
[
(AggregationType.AVERAGE, 10.0),
(AggregationType.WEIGHTED_AVERAGE, 55.0),
(AggregationType.WEIGHTED_SUM, 55.0),
(AggregationType.SUM, 10.0),
],
)
def test_add_update(self, agg_type, expected_value):
model = create_model_with_value(0)
ag = Aggregator(module=model, aggregation_type=agg_type)
ag.zero_weights()
for i in range(10):
delta = create_model_with_value(1.0)
weight = i + 1
ag.apply_weight_to_update(delta=delta, weight=weight)
ag.add_update(delta=delta, weight=weight)
assertEqual(ag.sum_weights.item(), expected_value)
@pytest.mark.parametrize(
"agg_type,dist_op",
[
(AggregationType.AVERAGE, OperationType.SUM),
(AggregationType.WEIGHTED_AVERAGE, OperationType.SUM),
(AggregationType.WEIGHTED_SUM, OperationType.SUM),
(AggregationType.SUM, OperationType.SUM),
],
)
def test_distributed_op_aggregation(self, agg_type, dist_op):
"""
Test aggregation with only SUM and no BROADCAST then each worker should have
different parameters.
"""
model = create_model_with_value(0)
ag = Aggregator(module=model, aggregation_type=agg_type)
results = run_multiprocess_aggregation_test(
ag,
num_processes=4,
num_models=10,
distributed_op=dist_op,
)
for r, v in zip(results, results[1:]):
assertNotEqual(r, v)
| canife-main | FLSim/flsim/servers/tests/test_aggregator.py |
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
from flsim.channels.message import Message
from flsim.common.pytest_helper import assertEqual
from flsim.secure_aggregation.secure_aggregator import FixedPointConfig
from flsim.servers.sync_secagg_servers import SyncSecAggServerConfig
from flsim.utils.test_utils import (
SampleNet,
TwoFC,
create_model_with_value,
model_parameters_equal_to_value,
)
from hydra.utils import instantiate
class TestSyncSecAggServer:
def _create_server(self, model, fixedpoint, channel=None):
return instantiate(
SyncSecAggServerConfig(fixedpoint=fixedpoint),
global_model=model,
channel=channel,
)
def test_sync_secagg_server_init(self) -> None:
"""
Tests whether secure aggregator object is initiated
"""
model = SampleNet(TwoFC())
# test secure aggregation with flat FP config
fixedpoint = FixedPointConfig(num_bytes=2, scaling_factor=100)
server = self._create_server(model, fixedpoint=fixedpoint)
assertEqual(len(server._secure_aggregator.converters), 4)
assertEqual( # verify an arbitrary layer of the model
server._secure_aggregator.converters["fc2.bias"].scaling_factor, 100
)
def test_secure_aggregator_receive_update_from_client(self) -> None:
"""
Tests whether secure aggregator operations work correctly
when a model update is received and server model is updated
"""
scaling_factor = 100
fixedpoint = FixedPointConfig(num_bytes=2, scaling_factor=scaling_factor)
server = self._create_server(
SampleNet(create_model_with_value(0)), fixedpoint=fixedpoint
)
server.init_round()
m1_param = 7.2345
m1_w = 3.0
model1 = create_model_with_value(m1_param)
server.receive_update_from_client(Message(SampleNet(model1), weight=m1_w))
m2_param = -3.45612
m2_w = 7.0
model2 = create_model_with_value(m2_param)
server.receive_update_from_client(Message(SampleNet(model2), weight=m2_w))
expected_param = float(
round(m1_param * scaling_factor * m1_w + m2_param * scaling_factor * m2_w)
)
server.step()
mismatched = model_parameters_equal_to_value(
server.global_model.fl_get_module(),
-(expected_param / scaling_factor) / (m1_w + m2_w),
)
assertEqual(mismatched, "", mismatched)
| canife-main | FLSim/flsim/servers/tests/test_sync_secagg_servers.py |
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import itertools
import math
from tempfile import mkstemp
from unittest.mock import MagicMock
import numpy as np
import pytest
import torch
import torch.distributed as dist
import torch.multiprocessing as mp
from flsim.channels.base_channel import IdentityChannel
from flsim.channels.half_precision_channel import HalfPrecisionChannel
from flsim.channels.message import Message
from flsim.common.pytest_helper import assertEmpty, assertEqual
from flsim.privacy.common import PrivacySetting
from flsim.privacy.privacy_engine import GaussianPrivacyEngine
from flsim.servers.aggregator import AggregationType
from flsim.servers.sync_dp_servers import SyncDPSGDServerConfig
from flsim.servers.sync_servers import FedAvgOptimizerConfig, SyncServerConfig
from flsim.utils.distributed.fl_distributed import FLDistributedUtils
from flsim.utils.fl.common import FLModelParamUtils
from flsim.utils.test_utils import (
SampleNet,
create_model_with_value,
model_parameters_equal_to_value,
verify_models_equivalent_after_training,
)
from hydra.utils import instantiate
class TestSyncDPSGDServer:
@classmethod
def _get_num_params(cls, model):
return sum(p.numel() for p in model.parameters())
@classmethod
def init_process(
cls,
rank: int,
world_size: int,
clip: float,
noise: float,
models,
file_loc,
pipe,
use_cuda: bool,
) -> None:
use_cuda = use_cuda and torch.cuda.is_available()
FLDistributedUtils.dist_init(
rank=rank,
world_size=world_size,
init_method=f"file://{file_loc}",
use_cuda=use_cuda,
)
server_model = create_model_with_value(0)
if use_cuda:
server_model.cuda()
server = cls._create_server(
SampleNet(server_model),
clipping_value=clip,
noise_multiplier=noise,
)
server.init_round()
for i, m in enumerate(models):
if i % world_size == rank:
weight = i + 1
if use_cuda:
m.cuda()
server.receive_update_from_client(
Message(model=SampleNet(m), weight=weight)
)
server.step()
sums, weights = 0.0, 0.0
all_sum = [
(p.sum(), p.numel())
for p in server.global_model.fl_get_module().parameters()
]
for s, w in all_sum:
sums += float(s)
weights += float(w)
pipe.send(sums / weights)
dist.destroy_process_group()
@classmethod
def run_multiprocess_server_test(
cls,
clip,
noise,
num_processes: int = 1,
num_models: int = 4,
use_cuda: bool = False,
):
_, tmpfile = mkstemp(dir="/tmp")
pipe_out, pipe_in = mp.Pipe(False)
mp.set_start_method("spawn", force=True)
models = [create_model_with_value(1.0) for i in range(num_models)]
processes = []
results = []
for pid in range(num_processes):
p = mp.Process(
target=cls.init_process,
args=(
pid,
num_processes,
clip,
noise,
models,
tmpfile,
pipe_in,
use_cuda,
),
)
p.start()
processes.append(p)
results.append(pipe_out)
for p in processes:
p.join()
res = [r.recv() for r in results]
return res
@classmethod
def _create_server(
cls,
server_model,
num_rounds: int = 1,
num_clients: int = 1,
clipping_value: float = 1.0,
noise_multiplier: float = 1.0,
channel=None,
):
server = instantiate(
SyncDPSGDServerConfig(
aggregation_type=AggregationType.AVERAGE,
server_optimizer=FedAvgOptimizerConfig(),
privacy_setting=PrivacySetting(
clipping_value=clipping_value,
noise_multiplier=noise_multiplier,
noise_seed=0,
),
),
global_model=server_model,
channel=channel,
)
server.select_clients_for_training(
num_total_users=num_rounds * num_clients, users_per_round=num_clients
)
return server
@pytest.mark.parametrize(
"clipping_value, num_clients", itertools.product([1, 1e10], [1, 10])
)
def test_no_noise_with_clip(self, clipping_value: float, num_clients: int) -> None:
"""
Test DP-SGD with no noise and with user norm clipping.
"""
server_model = SampleNet(create_model_with_value(0))
num_rounds = 10
delta_param = 1.0
num_params = self._get_num_params(server_model.fl_get_module())
clipped_delta = math.sqrt(clipping_value**2 / num_params)
expected_value = -float(min(np.average(clipped_delta), delta_param))
server = self._create_server(
server_model, num_rounds, num_clients, clipping_value, noise_multiplier=0
)
for round_num in range(num_rounds):
server.init_round()
for _ in range(num_clients):
delta = SampleNet(create_model_with_value(delta_param))
server.receive_update_from_client(Message(model=delta, weight=1.0))
server.step()
error_msg = model_parameters_equal_to_value(
server_model, expected_value * (round_num + 1)
)
assertEmpty(error_msg, msg=error_msg)
@pytest.mark.parametrize(
"clipping_value, noise, num_clients",
itertools.product([1], [1], [1, 10]),
)
def test_noise_and_clip(
self, clipping_value: float, noise: float, num_clients: int
) -> None:
"""
Test user-level DP-SGD.
We assume the following:
1. Server model init at 0
2. Trains for 10 rounds and take the simple average of the client updates.
3. The learning rate = 1.0.
4. The norm of each user delta is greater than the clipping value
The DP-SGD update rule is: w_t = w_t-1 - lr * (avg(grad) + sgd_noise)
With the above assumptions, w_t = 0 - 1.0 * (avg(grad) + sgd_noise) = -(avg(grad) + sgd_noise)
"""
server_model = SampleNet(create_model_with_value(0))
num_rounds = 10
num_params = self._get_num_params(server_model.fl_get_module())
clipped_delta = math.sqrt(clipping_value**2 / num_params)
server = self._create_server(
server_model,
num_rounds,
num_clients,
clipping_value,
noise_multiplier=noise,
)
GaussianPrivacyEngine._generate_noise = MagicMock(
side_effect=lambda size, sensitivity: torch.ones(size) * sensitivity
)
for round_num in range(num_rounds):
server.init_round()
for _ in range(num_clients):
delta = SampleNet(create_model_with_value(1.0))
server.receive_update_from_client(Message(model=delta, weight=1.0))
server.step()
expected_value = float(
-np.average(clipped_delta) - (noise * clipping_value / num_clients)
)
error_msg = model_parameters_equal_to_value(
server_model, expected_value * (round_num + 1)
)
assertEmpty(error_msg, msg=error_msg)
def test_no_noise_no_clip(self) -> None:
"""
Test that DP-SGD server with no clipping and no noise is the same as vanilla SyncServer
"""
global_value = 0
client_value = 1.0
dp_model = SampleNet(create_model_with_value(global_value))
no_dp_model = SampleNet(create_model_with_value(global_value))
num_rounds = 10
num_clients = 10
dp_server = self._create_server(
dp_model,
num_rounds,
num_clients=num_clients,
clipping_value=1e10,
noise_multiplier=0,
)
no_dp_server = instantiate(
SyncServerConfig(
aggregation_type=AggregationType.AVERAGE,
server_optimizer=FedAvgOptimizerConfig(),
),
global_model=no_dp_model,
)
for _ in range(num_rounds):
no_dp_server.init_round()
dp_server.init_round()
for _ in range(num_clients):
dp_server.receive_update_from_client(
Message(
model=SampleNet(
create_model_with_value(global_value - client_value)
),
weight=1.0,
)
)
no_dp_server.receive_update_from_client(
Message(
model=SampleNet(
create_model_with_value(global_value - client_value)
),
weight=1.0,
)
)
dp_server.step()
no_dp_server.step()
error_msg = verify_models_equivalent_after_training(dp_model, no_dp_model)
assertEmpty(error_msg, msg=error_msg)
def test_noise_added_correctly(self) -> None:
"""
Test where noise is a fixed value, 0.8
update = global (all 0) - local (all 2.0) = all 2.0
update norm = sqrt(num_params*delta^2)=sqrt(21*2^2)=sqrt(84)= 9.165
and this will be clipped to clipping_value of 7, which
means that the parameters of the clipped update will be all equal to sqrt(49/21)= 1.52
w_t = w_t-1 - lr * (avg(grad) + sgd_noise)
w_t = 0 - 1.0 * (avg(grad) + sgd_noise) = -(avg(grad) + sgd_noise) = -(1.52 + 0.8) = -2.32
"""
num_clients = 10
clipping_value = 7.0
noise = 0.8
global_value = 0.0
client_value = -2.0
server_model = SampleNet(create_model_with_value(global_value))
num_params = self._get_num_params(server_model.fl_get_module())
clipped_delta = math.sqrt(clipping_value**2 / num_params)
expected_value = float(-np.average(clipped_delta) - noise)
server = self._create_server(
server_model,
num_rounds=1,
num_clients=num_clients,
clipping_value=clipping_value,
noise_multiplier=noise,
)
GaussianPrivacyEngine._generate_noise = MagicMock(return_value=noise)
server.init_round()
for _ in range(num_clients):
delta = create_model_with_value(global_value - client_value)
server.receive_update_from_client(Message(model=SampleNet(delta), weight=1))
server.step()
error_msg = model_parameters_equal_to_value(server_model, expected_value)
assertEmpty(error_msg, msg=error_msg)
@pytest.mark.parametrize(
"channel",
[HalfPrecisionChannel(), IdentityChannel()],
)
def test_dp_server_channel_integration(self, channel) -> None:
"""From Client to Server, the channel should quantize and then dequantize the message
therefore there should be no change in the model
"""
server = self._create_server(
SampleNet(create_model_with_value(0)),
num_rounds=1,
num_clients=10,
clipping_value=10,
noise_multiplier=0,
channel=channel,
)
delta = create_model_with_value(1)
init = FLModelParamUtils.clone(delta)
server.receive_update_from_client(Message(model=SampleNet(delta), weight=1.0))
error_msg = verify_models_equivalent_after_training(delta, init)
assertEmpty(error_msg, msg=error_msg)
@pytest.mark.parametrize(
"noise",
[0, 1],
)
@pytest.mark.parametrize(
"clip",
[1, 10],
)
@pytest.mark.parametrize(
"use_cuda",
[False, True],
)
@pytest.mark.parametrize(
"num_processes",
[1, 2, 4],
)
def test_sync_dp_server_with_multiple_processes(
self, noise, clip, use_cuda: bool, num_processes: int
) -> None:
if use_cuda and not torch.cuda.is_available():
return
expected_result = self.run_multiprocess_server_test(
clip=clip, noise=noise, num_processes=1, num_models=4, use_cuda=use_cuda
)[0]
results = self.run_multiprocess_server_test(
clip=clip,
noise=noise,
num_processes=num_processes,
num_models=4,
use_cuda=use_cuda,
)
for result in results:
assertEqual(expected_result, result)
| canife-main | FLSim/flsim/servers/tests/test_sync_dp_servers.py |
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
from __future__ import annotations
from dataclasses import dataclass
from enum import IntEnum
from typing import Optional
from flsim.channels.base_channel import IdentityChannel
from flsim.interfaces.model import IFLModel
from flsim.reducers.dp_round_reducer import DPRoundReducer, DPRoundReducerConfig
from flsim.utils.config_utils import fullclassname, init_self_cfg
from flsim.utils.fl.common import FLModelParamUtils
from omegaconf import MISSING
from torch import nn
class EstimatorType(IntEnum):
UNBIASED = 0
BIASED = 1
class WeightedDPRoundReducer(DPRoundReducer):
r"""
A differentially private round reducer that allows client models
to provide weighted updates.
There are two different estimators supported `BIASED` and `UNBIASED`, which only
differ when in average reduction. For sum reduction both sensitivities
are the same and are equivalent to ``max_weight * clipping_value``.
"""
def __init__(
self,
*,
global_model: IFLModel,
num_users_per_round: int,
total_number_of_users: int,
channel: Optional[IdentityChannel] = None,
name: Optional[str] = None,
**kwargs,
):
init_self_cfg(
self,
component_class=__class__, # pyre-fixme[10]: Name `__class__` is used but not defined.
config_class=WeightedDPRoundReducerConfig,
**kwargs,
)
super().__init__(
global_model=global_model,
num_users_per_round=num_users_per_round,
total_number_of_users=total_number_of_users,
channel=channel,
name=name,
**kwargs,
)
assert self.is_weighted, "Please use DPRoundReducer for unweighted cases"
# pyre-fixme[16]: `WeightedDPRoundReducer` has no attribute `cfg`.
self.min_weight = self.cfg.min_weight
self.max_weight = self.cfg.max_weight
self.mean_weight = self.cfg.mean_weight
self.estimator_type = self.cfg.estimator_type
self._check_boundaries()
@classmethod
def _set_defaults_in_cfg(cls, cfg):
pass
def _check_boundaries(self):
"""
Checks for min, max, and mean values of config
"""
if (
self.min_weight < 0
or self.max_weight < self.min_weight
or self.mean_weight < self.min_weight
or self.mean_weight > self.max_weight
):
self.logger.error("Weight boundaries in config are not defined properly")
if self.estimator_type == EstimatorType.UNBIASED and self.mean_weight <= 0:
self.logger.error(
"For unbiased sensitivity estimation mean_weight needs to be positive."
)
def clamp_weight(self, weight: float) -> float:
if not self.min_weight <= weight <= self.max_weight:
self.logger.error(
"min/max client weight boundaries are violated!"
"Client weight is being adjusted"
)
weight = max(min(weight, self.max_weight), self.min_weight)
return weight
def update_reduced_module(self, delta_module: nn.Module, weight: float) -> None:
weight = self.clamp_weight(weight)
super().update_reduced_module(delta_module, weight)
def check_total_weight(self, total_weight: float):
r"""
Boundary check for total weights.
"""
lower_bound = self.num_users_per_round * self.min_weight
upper_bound = self.num_users_per_round * self.max_weight
is_bounded = lower_bound <= total_weight <= upper_bound
if not is_bounded:
self.logger.error(
f"Summed weights {total_weight} do not fall within expected range [{lower_bound}, {upper_bound}]"
)
def sensitivity(self, total_weight: float):
r"""
Calculates the sensitivity of the final result.
Note:
Sensitivity for weighted averaging may modify the result to decrease
sensitivity for BIASED case.
"""
self.check_total_weight(total_weight)
if not self.is_averaged:
return self._sum_estimator()
elif self.estimator_type == EstimatorType.UNBIASED:
return self._unbiased_estimator()
else:
return self._biased_estimator(total_weight=total_weight)
def _sum_estimator(self) -> float:
return self.clipping_value * self.max_weight
def _unbiased_estimator(self) -> float:
"""
For weighted averaged reductions, the unbiased estimator calculates the true
weighted average of the models and the sensitivity of it will be:
(clipping_value * max_weight) / (min_weight * users_per_round)
"""
return (
(self.clipping_value * self.max_weight)
/ self.num_users_per_round
* self.min_weight
)
def _biased_estimator(self, total_weight: float) -> float:
"""
For the biased estimator the weighted average is biased, where the
average is calculated by weighted sum of the models divided by
max(num_clients_per_round * mean_weight, total_weight) and
Sensitivity
(clipping_value * max_weight) / (mean_weight * num_clients_per_round)
"""
weight_modifier = total_weight / max(
total_weight, self.mean_weight * self.num_users_per_round
)
FLModelParamUtils.linear_comb_models(
self.reduced_module,
weight_modifier,
self.reduced_module,
0.0,
self.reduced_module,
# pyre-fixme[16]: `WeightedDPRoundReducer` has no attribute `cfg`.
only_federated_params=self.cfg.only_federated_params,
)
return (
self.clipping_value
* self.max_weight
/ (self.num_users_per_round * self.mean_weight)
)
@dataclass
class WeightedDPRoundReducerConfig(DPRoundReducerConfig):
r"""
Contains configurations for a private round reducer based that
also allows for weights.
Note:
Allowing weights in dp should generally be avoided unless weights
are in the same range. If weights are extremely different, one
might as well throw updates from clients with smaller weights
away as they will be drawned in noise.
"""
_target_: str = fullclassname(WeightedDPRoundReducer)
min_weight: float = 1e-6
max_weight: float = float("inf")
mean_weight: float = 1e-6
estimator_type: EstimatorType = EstimatorType.UNBIASED
@dataclass
class DeltaDirectionWeightedDPReducerConfig(WeightedDPRoundReducerConfig):
_target_: str = MISSING
| canife-main | FLSim/flsim/reducers/weighted_dp_round_reducer.py |
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
from hydra.core.config_store import ConfigStore # @manual
from .base_round_reducer import RoundReducerConfig
from .dp_round_reducer import DPRoundReducerConfig
from .weighted_dp_round_reducer import WeightedDPRoundReducerConfig
ConfigStore.instance().store(
name="base_reducer",
node=RoundReducerConfig,
group="reducer",
)
ConfigStore.instance().store(
name="base_dp_reducer",
node=DPRoundReducerConfig,
group="reducer",
)
ConfigStore.instance().store(
name="base_weighted_dp_reducer",
node=WeightedDPRoundReducerConfig,
group="reducer",
)
| canife-main | FLSim/flsim/reducers/__init__.py |
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
"""
This file defines the concept of a base round aggregator for a
federated learning setting. Also defines basic config,
for an FL aggregator. Aggregator is responsible for just efficient
state_dict gathering and potentially later on user-level differential
privacy. It is different from the concept of the server which modifies
updates the global model.
"""
from __future__ import annotations
import abc
import logging
from dataclasses import dataclass
from enum import IntEnum
from itertools import chain
from typing import Optional, Tuple
import torch
from flsim.channels.base_channel import IdentityChannel
from flsim.channels.message import Message
from flsim.common.logger import Logger
from flsim.interfaces.model import IFLModel
from flsim.utils.config_utils import fullclassname, init_self_cfg
from flsim.utils.distributed.fl_distributed import FLDistributedUtils, OperationType
from flsim.utils.fl.common import FLModelParamUtils
from omegaconf import MISSING
from torch import nn
class ReductionType(IntEnum):
"""
Type of reduction for the RoundReducer
"""
AVERAGE = 0
SUM = 1
WEIGHTED_AVERAGE = 2
WEIGHTED_SUM = 3
class ReductionPrecision(IntEnum):
"""
Defines the precision of the aggregated module.
It can help a little bit if the reduced module has a double precision.
DEFAULT keeps the precision of the reference module.
"""
DEFAULT = 0
FLOAT = 1
DOUBLE = 2
@property
def dtype(self) -> Optional[torch.dtype]:
return (
torch.float64
if self == ReductionPrecision.DOUBLE
else torch.float32
if self == ReductionPrecision.FLOAT
else None # ReductionPrecision.DEFAULT
)
class IFLRoundReducer(abc.ABC):
"""
Interface for RoundReducers.
"""
logger: logging.Logger = Logger.get_logger(__name__)
def __init__(self, **kwargs):
init_self_cfg(
self,
component_class=__class__,
config_class=IFLRoundReducerConfig,
**kwargs,
)
@classmethod
def _set_defaults_in_cfg(cls, cfg):
pass
@abc.abstractmethod
def collect_update(self, delta: IFLModel) -> None:
"""
Given a updated model from the client, add it to this reducer.
"""
pass
@abc.abstractmethod
def reduce(self):
"""
Reduce all the updates collected thus far and return the results.
"""
pass
@abc.abstractmethod
def reset(self) -> None:
"""
Initializes / Resets round reducers internals.
"""
pass
@dataclass
class IFLRoundReducerConfig:
_target_: str = MISSING
_recursive_: bool = False
only_federated_params: bool = True
class RoundReducer(IFLRoundReducer):
"""
Base Class for an aggregator which gets parameters
from different clients and aggregates them together.
"""
logger: logging.Logger = Logger.get_logger(__name__)
def __init__(
self,
*,
global_model: IFLModel,
num_users_per_round: Optional[int] = None,
total_number_of_users: Optional[int] = None,
channel: Optional[IdentityChannel] = None,
name: Optional[str] = None,
**kwargs,
):
init_self_cfg(
self,
component_class=__class__, # pyre-fixme[10]: Name `__class__` is used but not defined.
config_class=RoundReducerConfig,
**kwargs,
)
super().__init__(**kwargs)
# pyre-fixme[16]: `RoundReducer` has no attribute `cfg`.
self.dtype = self.cfg.precision.dtype
self.channel = channel or IdentityChannel()
self.name = name or "unnamed_aggregator"
self.num_users_per_round = num_users_per_round
self.total_number_of_users = total_number_of_users
# TODO these are specific to mean reducer [this implementation]
# we will probably need a level of inheritence here and hide
# these frome the main class.
self.sum_weights: torch.Tensor = torch.zeros(1)
self.ref_model: IFLModel = global_model
self.reduced_module: nn.Module = FLModelParamUtils.clone(
global_model.fl_get_module(), self.dtype
)
self._zero_weights()
def set_num_total_users(self, num_total_users):
self.total_number_of_users = num_total_users
def collect_update(self, delta: IFLModel, weight: float) -> None:
# 0. Receive delta from client through channel
delta = self.receive_through_channel(delta)
# 1. reduce the delta into local state
self.update_reduced_module(delta.fl_get_module(), weight)
def _reduce_all(self, op: OperationType = OperationType.SUM_AND_BROADCAST):
"""
reduce models accross all workers if multi-processing is used.
reduction type is defined by `config.reduction_type`, see
`ReductionType`.
Returns:
number of the models that have been collected. For weighted
reductions types returns sum of all model weights.
Note:
The weights are sum of weights only for weighted reduction types
see 'ReductionType`, for simple reduction it is the number of models
that have been reduced.
"""
state_dict = FLModelParamUtils.get_state_dict(
self.reduced_module,
# pyre-fixme[16]: `RoundReducer` has no attribute `cfg`.
only_federated_params=self.cfg.only_federated_params,
)
FLDistributedUtils.distributed_operation(
chain([self.sum_weights], state_dict.values()), op
)
if self.sum_weights.item() <= 0:
return 0.0
total_weight = float(self.sum_weights.item())
if self.is_averaged:
# reduced_module = reduced_module / total_weight
FLModelParamUtils.multiply_model_by_weight(
model=self.reduced_module,
weight=1 / total_weight,
model_to_save=self.reduced_module,
only_federated_params=self.cfg.only_federated_params,
)
def receive_through_channel(self, model: IFLModel) -> IFLModel:
"""
Receives a reference to a state (referred to as model state_dict)
over the channel. Any channel effect is applied as part of this
receive function.
"""
message = self.channel.client_to_server(Message(model))
return message.model
@property
def current_results(self) -> Tuple[nn.Module, float]:
return self.reduced_module, float(self.sum_weights.item())
def reduce(self) -> Tuple[nn.Module, float]:
self._reduce_all()
return self.reduced_module, float(self.sum_weights.item())
def reset(self, ref_model: IFLModel) -> None:
"""
Initializes / Resets round reducers internals.
"""
self.ref_model = ref_model
self._zero_weights()
def _zero_weights(self):
"""
Reset parameters and weights to zero
"""
FLModelParamUtils.zero_weights(
self.reduced_module, only_federated_params=self.cfg.only_federated_params
)
device = next(self.reduced_module.parameters()).device
self.sum_weights = torch.zeros(1, device=device, dtype=self.dtype)
def update_reduced_module(self, delta_module: nn.Module, weight: float) -> None:
# TODO num_samples is used as the default weight, this needs revisit
if not self.is_weighted:
weight = 1.0
FLModelParamUtils.linear_comb_models(
self.reduced_module,
1.0,
delta_module,
weight,
self.reduced_module,
# pyre-fixme[16]: `RoundReducer` has no attribute `cfg`.
only_federated_params=self.cfg.only_federated_params,
)
self.sum_weights += weight
if self.logger.isEnabledFor(logging.DEBUG):
self.logger.debug(
"L1 norm of aggregated parameters:",
sum(p.abs().sum() for p in self.reduced_module.parameters()),
)
@property
def is_weighted(self):
return self.cfg.reduction_type in (
ReductionType.WEIGHTED_SUM,
ReductionType.WEIGHTED_AVERAGE,
)
@property
def is_averaged(self):
return self.cfg.reduction_type in (
ReductionType.WEIGHTED_AVERAGE,
ReductionType.AVERAGE,
)
@dataclass
class RoundReducerConfig(IFLRoundReducerConfig):
_target_: str = fullclassname(RoundReducer)
reduction_type: ReductionType = ReductionType.WEIGHTED_AVERAGE
precision: ReductionPrecision = ReductionPrecision.DEFAULT
| canife-main | FLSim/flsim/reducers/base_round_reducer.py |
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
from __future__ import annotations
from dataclasses import dataclass
from itertools import chain
from typing import Optional, Tuple
from flsim.channels.base_channel import IdentityChannel
from flsim.interfaces.model import IFLModel
from flsim.privacy.common import PrivacyBudget, PrivacySetting
from flsim.privacy.privacy_engine import IPrivacyEngine
from flsim.privacy.privacy_engine_factory import NoiseType, PrivacyEngineFactory
from flsim.privacy.user_update_clip import UserUpdateClipper
from flsim.reducers.base_round_reducer import RoundReducer, RoundReducerConfig
from flsim.utils.config_utils import fullclassname, init_self_cfg
from flsim.utils.distributed.fl_distributed import FLDistributedUtils, OperationType
from flsim.utils.fl.common import FLModelParamUtils
from torch import nn
class DPRoundReducer(RoundReducer):
def __init__(
self,
*,
global_model: IFLModel,
num_users_per_round: int,
total_number_of_users: int,
channel: Optional[IdentityChannel] = None,
name: Optional[str] = None,
**kwargs,
):
init_self_cfg(
self,
component_class=__class__, # pyre-fixme[10]: Name `__class__` is used but not defined.
config_class=DPRoundReducerConfig,
**kwargs,
)
super().__init__(
global_model=global_model,
num_users_per_round=num_users_per_round,
total_number_of_users=total_number_of_users,
channel=channel,
name=name,
**kwargs,
)
self.num_users_per_round = num_users_per_round
self.privacy_on = (
# pyre-ignore[16]
self.cfg.privacy_setting.noise_multiplier >= 0
and self.cfg.privacy_setting.clipping_value < float("inf")
)
self.clipping_value = self.cfg.privacy_setting.clipping_value
self.user_update_clipper = UserUpdateClipper(self.dtype)
if self.privacy_on:
self.privacy_engine: IPrivacyEngine = PrivacyEngineFactory.create(
self.cfg.privacy_setting,
num_users_per_round,
total_number_of_users,
noise_type=self.cfg.noise_type,
)
self.privacy_engine.attach(global_model=self.ref_model.fl_get_module())
self._privacy_budget = PrivacyBudget(
delta=self.cfg.privacy_setting.target_delta
)
@classmethod
def _set_defaults_in_cfg(cls, cfg):
pass
def update_reduced_module(self, delta_module: nn.Module, weight: float) -> None:
"""
Please refer to ``RoundReducer.update_reduced_module`` for more info.
Notes
-----
"""
if self.privacy_on:
self.user_update_clipper.clip(delta_module, self.clipping_value)
super().update_reduced_module(delta_module, weight)
def reduce(self) -> Tuple[nn.Module, float]:
if not self.privacy_on:
return super().reduce()
# only sum in the rank 0 reducer (no broadcast yet)
self._reduce_all(OperationType.SUM) # OperationType.SUM)
self.logger.debug(f"Sum of weights after aggregation: {self.sum_weights}")
if FLDistributedUtils.is_master_worker():
total_weights = float(self.sum_weights.item())
if abs(total_weights - self.num_users_per_round) > 1e-5:
self.logger.error(
f"total weights {total_weights} is not equal to "
f"number of users {self.num_users_per_round}. "
"Please make sure reduction_type=AVERGAE."
)
"""
The final amount of noise added must be equal to
(max_norm * noise_multiplier) / users_per_round, similar to
Google's user-level DP https://arxiv.org/pdf/1710.06963.pdf.
Note that in the _generate_noise() function, the noise_multiplier
is already multiplied.
"""
self.privacy_engine.add_noise(
self.reduced_module, self.clipping_value / total_weights
)
# broadcast the new noisy model to all workers.
state_dict = FLModelParamUtils.get_state_dict(
self.reduced_module,
# pyre-fixme[16]: `DPRoundReducer` has no attribute `cfg`.
only_federated_params=self.cfg.only_federated_params,
)
FLDistributedUtils.distributed_operation(
chain([self.sum_weights], state_dict.values()), OperationType.BROADCAST
)
self.logger.debug(
f"Sum of client weights after reduction on worker: {self.sum_weights}"
)
self._privacy_budget = self.privacy_engine.get_privacy_spent()
self.logger.info(f"User Privacy Budget: {self._privacy_budget}")
return self.reduced_module, float(self.sum_weights.item())
@property
def privacy_budget(self) -> PrivacyBudget:
return self._privacy_budget
def reset(self, ref_model: IFLModel) -> None:
super().reset(ref_model)
if self.privacy_on:
self.privacy_engine.attach(global_model=self.ref_model.fl_get_module())
@dataclass
class DPRoundReducerConfig(RoundReducerConfig):
"""
Contains configurations for a round reducer based on DP (user-level dp)
"""
_target_: str = fullclassname(DPRoundReducer)
privacy_setting: PrivacySetting = PrivacySetting()
noise_type: NoiseType = NoiseType.GAUSSIAN
| canife-main | FLSim/flsim/reducers/dp_round_reducer.py |
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import math
from dataclasses import dataclass
from tempfile import mkstemp
from typing import List, Type
from unittest.mock import MagicMock
import numpy as np
import pytest
import torch.distributed as dist
import torch.multiprocessing as mp
from flsim.clients.base_client import Client, ClientConfig
from flsim.common.pytest_helper import (
assertAlmostEqual,
assertEqual,
assertFalse,
assertIsInstance,
assertNotEqual,
assertTrue,
)
from flsim.privacy.common import PrivacySetting
from flsim.reducers.base_round_reducer import (
ReductionType,
RoundReducer,
RoundReducerConfig,
)
from flsim.reducers.dp_round_reducer import DPRoundReducer, DPRoundReducerConfig
from flsim.reducers.weighted_dp_round_reducer import (
EstimatorType,
WeightedDPRoundReducer,
WeightedDPRoundReducerConfig,
)
from flsim.utils import test_utils as utils
from flsim.utils.async_trainer.async_example_weights import EqualExampleWeightConfig
from flsim.utils.async_trainer.async_staleness_weights import (
PolynomialStalenessWeightConfig,
)
from flsim.utils.async_trainer.async_weights import AsyncWeight, AsyncWeightConfig
from flsim.utils.distributed.fl_distributed import FLDistributedUtils
from flsim.utils.fl.common import FLModelParamUtils
from hydra.utils import instantiate
from omegaconf import OmegaConf
def create_ref_model(ref_model_param_value):
ref_model = utils.SampleNet(utils.TwoFC())
ref_model.fl_get_module().fill_all(ref_model_param_value)
return ref_model
def get_dp_round_reducer(
ref_model=None,
clipping_value: float = 99999.99,
reduction_type=ReductionType.AVERAGE,
noise_multiplier: int = 0,
num_users_per_round: int = 1,
total_number_of_users: int = 1,
reset: bool = True,
):
ref_model = ref_model or utils.SampleNet(utils.TwoFC())
privacy_setting = PrivacySetting(
noise_multiplier=noise_multiplier, clipping_value=clipping_value
)
dp_rr = DPRoundReducer(
**OmegaConf.structured(
DPRoundReducerConfig(
reduction_type=reduction_type, privacy_setting=privacy_setting
)
),
global_model=ref_model,
num_users_per_round=num_users_per_round,
total_number_of_users=total_number_of_users,
)
if reset:
dp_rr.reset(ref_model)
return dp_rr
def init_process(
rank, world_size, reducer, models, file_loc, pipe, backend: str = "gloo"
) -> None:
"""
Initialize the distributed environment for multi-process testing, and
runs a simple reduction task and returns the mean of all the parameters
int reduced module.
"""
dist.init_process_group(
backend, init_method=f"file://{file_loc}", rank=rank, world_size=world_size
)
reducer.reset(models[0])
for i, m in enumerate(models):
if i % world_size == rank:
reducer.collect_update(m, float(i + 1))
reducer.reduce()
sums, weights = 0.0, 0.0
all_sum = [(p.sum(), p.numel()) for p in reducer.reduced_module.parameters()]
for s, w in all_sum:
sums += float(s)
weights += float(w)
pipe.send(sums / weights)
dist.destroy_process_group()
def run_reduction_test(reducer, num_processes: int = 1, num_models: int = 4):
"""
Used in multiprocess test only.
Runs a simple scenario in multiple processes.
Models are sequentially initilized with
a constant value for params, i.e. 1, 2, ..., num_models.
"""
_, tmpfile = mkstemp(dir="/tmp")
pipe_out, pipe_in = mp.Pipe(False)
# reducer.reduced_module.share_memory()
models = [utils.SampleNet(utils.TwoFC()) for _ in range(num_models)]
for i, m in enumerate(models):
m.fl_get_module().fill_all(float(i + 1))
processes = []
results = []
FLDistributedUtils.WORLD_SIZE = num_processes
for pid in range(num_processes):
p = mp.Process(
target=init_process,
args=(pid, num_processes, reducer, models, tmpfile, pipe_in),
)
p.start()
processes.append(p)
results.append(pipe_out)
for p in processes:
p.join()
res = [r.recv() for r in results]
return res
class TestRoundReducerBase:
def _fake_client(self, global_value, client_value, weight):
clnt = Client(dataset=None, **OmegaConf.structured(ClientConfig()))
def fill(model, *args):
model.fl_get_module().fill_all(global_value - client_value)
return model, weight
clnt.generate_local_update = MagicMock(side_effect=fill)
return clnt
def _create_fake_clients(
self, global_param_value, num_clients, client_param_value, client_weight
) -> List[Client]:
# initialize clients, each with model parameters equal to client_param_values
return [
self._fake_client(
global_value=global_param_value,
client_value=client_param_value,
weight=client_weight,
)
for _ in range(num_clients)
]
class TestRoundReducer(TestRoundReducerBase):
def get_round_reducer(
self,
model=None,
reduction_type=ReductionType.WEIGHTED_AVERAGE,
reset: bool = True,
):
model = model or utils.SampleNet(utils.TwoFC())
round_reducer = RoundReducer(
**OmegaConf.structured(RoundReducerConfig(reduction_type=reduction_type)),
global_model=model,
)
return round_reducer
def test_reset(self) -> None:
rr = self.get_round_reducer()
mismatched = utils.model_parameters_equal_to_value(rr.reduced_module, 0.0)
assertEqual(mismatched, "", mismatched)
# do it again
rr.reduced_module.fill_all(1.0)
rr.reset(utils.SampleNet(utils.TwoFC()))
mismatched = utils.model_parameters_equal_to_value(rr.reduced_module, 0.0)
assertEqual(mismatched, "", mismatched)
def test_receive_through_channel(self) -> None:
# expected channel effects,
rr = self.get_round_reducer()
model = utils.SampleNet(utils.TwoFC())
# check channel is pass through
# TODO modify when there is actuall channel
model2 = rr.receive_through_channel(model)
mismatched = utils.verify_models_equivalent_after_training(model2, model)
assertEqual(mismatched, "", mismatched)
def test_update_reduced_module(self) -> None:
model = utils.SampleNet(utils.TwoFC())
rr = self.get_round_reducer(model)
model.fl_get_module().fill_all(0.2)
rr.update_reduced_module(model.fl_get_module(), 3.0)
model.fl_get_module().fill_all(0.3)
rr.update_reduced_module(model.fl_get_module(), 2.0)
mismatched = utils.model_parameters_equal_to_value(
rr.reduced_module, 3 * 0.2 + 2 * 0.3
)
assertEqual(mismatched, "", mismatched)
def test_collect_update(self) -> None:
param_values = [0.1 * i for i in range(100)]
weights = [i % 10 for i in range(100)]
global_param = 1.0
clients = [
self._fake_client(global_param, p, w) for p, w in zip(param_values, weights)
]
ref_model = utils.SampleNet(utils.TwoFC())
rr = self.get_round_reducer(ref_model)
for clnt in clients:
model, weight = clnt.generate_local_update(utils.SampleNet(utils.TwoFC()))
rr.collect_update(model, weight)
expected_sum_weights = sum(weights)
expected_param_values = sum(
(global_param - param_value) * w
for param_value, w in zip(param_values, weights)
)
experiment_model, experiment_weight = rr.current_results
assertAlmostEqual(expected_sum_weights, experiment_weight, 5)
mismatched = utils.model_parameters_equal_to_value(
experiment_model, expected_param_values
)
assertEqual(mismatched, "", mismatched)
def test_reduction_types_sum(self) -> None:
model = utils.SampleNet(utils.TwoFC())
rr = self.get_round_reducer(model, reduction_type=ReductionType.SUM)
results = run_reduction_test(rr, num_processes=1, num_models=2)
value_expected = float(sum(i + 1 for i in range(2)))
for r in results:
assertAlmostEqual(r, value_expected, 5)
def test_reduction_types_avg(self) -> None:
model = utils.SampleNet(utils.TwoFC())
rr = self.get_round_reducer(model, reduction_type=ReductionType.AVERAGE)
results = run_reduction_test(rr, num_processes=2, num_models=4)
value_expected = sum(i + 1 for i in range(4)) / 4
for r in results:
assertAlmostEqual(r, value_expected, 5)
def test_reduction_types_weighted_sum(self) -> None:
model = utils.SampleNet(utils.TwoFC())
rr = self.get_round_reducer(model, reduction_type=ReductionType.WEIGHTED_SUM)
results = run_reduction_test(rr, num_processes=3, num_models=6)
value_expected = float(sum((i + 1) ** 2 for i in range(6)))
for r in results:
assertAlmostEqual(r, value_expected, 5)
def test_reduction_types_weighted_avg(self) -> None:
model = utils.SampleNet(utils.TwoFC())
rr = self.get_round_reducer(
model, reduction_type=ReductionType.WEIGHTED_AVERAGE
)
results = run_reduction_test(rr, num_processes=4, num_models=8)
value_expected = float(sum((i + 1) ** 2 for i in range(8))) / sum(
i + 1 for i in range(8)
)
for r in results:
assertAlmostEqual(r, value_expected, 5)
def test_multiprocess_reduce(self) -> None:
# test multi processing.
model = utils.SampleNet(utils.TwoFC())
num_models = 4
value_expected = float(sum((i + 1) ** 2 for i in range(num_models))) / sum(
i + 1 for i in range(num_models)
)
# test 1 process
r1 = self.get_round_reducer(model, reset=False)
results = run_reduction_test(r1, num_processes=1, num_models=num_models)
for r in results:
assertAlmostEqual(r, value_expected, 5)
# test 4 processes
r2 = self.get_round_reducer(model)
results = run_reduction_test(r2, num_processes=2, num_models=num_models)
for r in results:
assertAlmostEqual(r, value_expected, 5)
def test_logging_level(self) -> None:
rr = self.get_round_reducer()
assertTrue(utils.check_inherit_logging_level(rr, 50))
assertTrue(utils.check_inherit_logging_level(rr, 10))
class TestDPRoundReducer(TestRoundReducerBase):
def test_dp_off(self) -> None:
ref_model = create_ref_model(ref_model_param_value=3.0)
# when clipping_value is inf, sensetivity is inf -> dp not supported
dp_rr = get_dp_round_reducer(
ref_model, clipping_value=float("inf"), noise_multiplier=0
)
assertFalse(dp_rr.privacy_on)
# noise < 0 means no dp
dp_rr = get_dp_round_reducer(
ref_model, clipping_value=10.0, noise_multiplier=-1
)
assertFalse(dp_rr.privacy_on)
def test_collect_update_with_clipping(self) -> None:
"""
Tests whether the model updates associated with the new models sent
from clients are clipped correctly.
"""
num_clients = 100
global_value = 5.0
clients = self._create_fake_clients(
global_param_value=global_value,
num_clients=num_clients,
client_param_value=3.0,
client_weight=1.0,
)
ref_model = create_ref_model(ref_model_param_value=global_value)
dp_rr = get_dp_round_reducer(
ref_model,
clipping_value=6.0,
num_users_per_round=num_clients,
total_number_of_users=num_clients,
)
for client in clients:
delta, weight = client.generate_local_update(utils.SampleNet(utils.TwoFC()))
dp_rr.collect_update(delta, weight)
"""
delta = global (all 5.0) - local (all 3.0) = all 2.0
delta norm = sqrt(num_params*delta^2)=sqrt(21*2^2)=sqrt(84)= 9.16515138991168
and this will be clipped to clipping_value of 6.0, which
means that the parameters of the clipped update will be all equal
to sqrt(36/21)= 1.309307341415954
"""
expected_param_values = (1.309307341415954) * num_clients
collected_model_updates, _ = dp_rr.current_results
mismatched = utils.model_parameters_equal_to_value(
collected_model_updates, expected_param_values
)
assertEqual(mismatched, "", mismatched)
def test_clipping_when_noise_zero(self) -> None:
"""
Tests when noise multiplier is zero, calling add_noise() in reduce()
does not change the model after clipping.
"""
num_clients = 50
global_value = 8.0
clients = self._create_fake_clients(
global_param_value=global_value,
num_clients=num_clients,
client_param_value=2.0,
client_weight=1.0,
)
ref_model = create_ref_model(ref_model_param_value=global_value)
dp_rr = get_dp_round_reducer(
ref_model,
clipping_value=15.0,
noise_multiplier=0,
num_users_per_round=num_clients,
total_number_of_users=num_clients,
)
for client in clients:
delta, weight = client.generate_local_update(utils.SampleNet(utils.TwoFC()))
dp_rr.collect_update(delta, weight)
"""
update = global (all 8.0) - local (all 2.0) = all 6.0
update norm = sqrt(num_params*delta^2)=sqrt(21*6^2)=sqrt(756)= 27.49545416973504
and this will be clipped to clipping_value of 15, which
means that the parameters of the clipped update will be all equal
to sqrt(15^2/21)= 3.273268353539886
"""
dp_rr.reduce()
# asserts callnig add_noise did not change anything
expected_param_values = ((3.273268353539886 * num_clients) / num_clients) + 0
model_after_noise, sum_weights = dp_rr.current_results
mismatched = utils.model_parameters_equal_to_value(
model_after_noise, expected_param_values
)
assertEqual(mismatched, "", mismatched)
def test_noise_when_clipping_large_value(self) -> None:
"""
Tests 2 things: 1) whether clipping does not happen when
clipping threshold is set to a large value, 2) whether we get
a different model when we add noise and clipping is ineffective
(clipping threshold is set to a large value).
"""
num_clients = 20
global_value = 5.0
clients = self._create_fake_clients(
global_param_value=global_value,
num_clients=num_clients,
client_param_value=3.0,
client_weight=1.0,
)
ref_model = create_ref_model(ref_model_param_value=global_value)
ref_model_before = FLModelParamUtils.clone(ref_model)
dp_rr = get_dp_round_reducer(
ref_model,
clipping_value=10.0,
num_users_per_round=num_clients,
total_number_of_users=num_clients,
)
for client in clients:
delta, weight = client.generate_local_update(utils.SampleNet(utils.TwoFC()))
dp_rr.collect_update(delta, weight)
"""
update = global (all 5.0) - local (all 3.0) = all 2.0
update norm = sqrt(num_params*delta^2)=sqrt(21*2^2)=sqrt(84)= 9.16515138991168
and this will not be clipped, because the clipping_value
is set as a larger value (10 > 9.16515138991168). So the parameters
of the model update will not change and all be equal to 2.
"""
# asserts clipping does not happen
expected_param_values = 2.0 * num_clients
collected_model_updates, _ = dp_rr.current_results
mismatched = utils.model_parameters_equal_to_value(
collected_model_updates, expected_param_values
)
assertEqual(mismatched, "", mismatched)
dp_rr.reduce()
ref_module_after_noise, _ = dp_rr.current_results
# asserts by adding noise, we get a different model
mismatched = utils.verify_models_equivalent_after_training(
ref_model_before.fl_get_module(), ref_module_after_noise
)
assertNotEqual(mismatched, "")
def test_noise_added_correctly(self) -> None:
"""
Tests that the noise is added correctly to model.
"""
num_clients = 100
global_value = 5.0
clients = self._create_fake_clients(
global_param_value=global_value,
num_clients=num_clients,
client_param_value=3.0,
client_weight=1.0,
)
ref_model = create_ref_model(ref_model_param_value=global_value)
dp_rr = get_dp_round_reducer(
ref_model,
clipping_value=7.0,
num_users_per_round=num_clients,
total_number_of_users=num_clients,
)
for client in clients:
delta, weight = client.generate_local_update(utils.SampleNet(utils.TwoFC()))
client.compute_delta(ref_model, delta, delta)
dp_rr.collect_update(delta, weight)
"""
update = global (all 5.0) - local (all 3.0) = all 2.0
update norm = sqrt(num_params*delta^2)=sqrt(21*2^2)=sqrt(84)= 9.16515138991168
and this will be clipped to clipping_value of 7, which
means that the parameters of the clipped update will be all equal
to sqrt(49/21)= 1.527525231651947
"""
dp_rr.privacy_engine._generate_noise = MagicMock(return_value=0.8)
expected_param_values = ((1.527525231651947 * num_clients) / num_clients) + 0.8
dp_rr.reduce()
ref_module_after_noise, _ = dp_rr.current_results
mismatched = utils.model_parameters_equal_to_value(
ref_module_after_noise, expected_param_values
)
assertEqual(mismatched, "", mismatched)
def test_multiprocess_dp_all_processes_the_same(self) -> None:
# test multi processing.
model = utils.SampleNet(utils.TwoFC())
num_models = 4
# test 4 processes
r = get_dp_round_reducer(
model,
clipping_value=1.0,
reduction_type=ReductionType.AVERAGE,
noise_multiplier=1,
num_users_per_round=4,
total_number_of_users=4,
reset=False,
)
results = run_reduction_test(r, num_processes=4, num_models=num_models)
same_value = results[0]
for r in results:
assertAlmostEqual(r, same_value, 5)
@dataclass
class WeightReducerTestSetting:
num_clients: int = 10
clients_per_round: int = 5
clipping_value: float = 1.0
max_staleness: int = 100
noise: float = 1.0
max_weight: float = 1.0
min_weight: float = 0.0
mean_weight: float = 0.5
class TestWeightedDPRoundReducer(TestRoundReducerBase):
def _get_reducer(
self,
ref_model=None,
clipping_value: float = 1e10,
reduction_type=ReductionType.WEIGHTED_SUM,
estimator_type=EstimatorType.UNBIASED,
noise_multiplier: int = 0,
num_users_per_round: int = 1,
total_number_of_users: int = 1,
max_weight: float = 10,
min_weight: float = 1e-6,
mean_weight: float = 1e-6,
):
ref_model = ref_model or utils.SampleNet(utils.TwoFC())
privacy_setting = PrivacySetting(
noise_multiplier=noise_multiplier, clipping_value=clipping_value
)
reducer = WeightedDPRoundReducer(
**OmegaConf.structured(
WeightedDPRoundReducerConfig(
reduction_type=reduction_type,
privacy_setting=privacy_setting,
estimator_type=estimator_type,
max_weight=max_weight,
min_weight=min_weight,
mean_weight=mean_weight,
)
),
global_model=ref_model,
num_users_per_round=num_users_per_round,
total_number_of_users=total_number_of_users,
)
return reducer
def _get_async_weight(self, exponent, avg_staleness: int = 0):
return AsyncWeight(
**OmegaConf.structured(
AsyncWeightConfig(
staleness_weight=PolynomialStalenessWeightConfig(
exponent=exponent, avg_staleness=avg_staleness
),
example_weight=EqualExampleWeightConfig(),
)
)
)
def _get_num_params(self, model):
return sum(p.numel() for p in model.parameters())
def _reduce_weighted_models(
self,
global_model,
settings: WeightReducerTestSetting,
reduction_type: ReductionType,
estimator_type: EstimatorType = EstimatorType.UNBIASED,
global_param: float = 1.0,
client_param: float = 1.0,
client_weight: float = 1.0,
):
clients = self._create_fake_clients(
global_param_value=global_param,
num_clients=settings.num_clients,
client_param_value=client_param,
client_weight=client_weight,
)
reducer = self._get_reducer(
global_model,
reduction_type=reduction_type,
clipping_value=settings.clipping_value,
estimator_type=estimator_type,
num_users_per_round=settings.clients_per_round,
total_number_of_users=settings.num_clients,
max_weight=settings.max_weight,
min_weight=settings.min_weight,
mean_weight=settings.mean_weight,
)
async_weight = self._get_async_weight(exponent=0.5)
weights = []
for client in clients:
delta, model_weight = client.generate_local_update(
utils.SampleNet(utils.TwoFC())
)
staleness = np.random.randint(1, settings.max_staleness)
weight = async_weight.weight(num_examples=model_weight, staleness=staleness)
assertTrue(0.0 <= weight <= 1.0)
reducer.collect_update(delta, weight)
weights.append(weight)
reducer.privacy_engine._generate_noise = MagicMock(return_value=settings.noise)
reducer.reduce()
return reducer, weights
def _test_weighted_avg_reduction(
self, estimator_type, global_param: float, client_param: float, max_clip_norm
) -> str:
delta = global_param - client_param
global_model = create_ref_model(ref_model_param_value=global_param)
num_params = self._get_num_params(global_model.fl_get_module())
user_norm = math.sqrt(num_params * delta**2)
settings = WeightReducerTestSetting(
noise=np.random.sample(),
clipping_value=max_clip_norm,
max_weight=1,
min_weight=1e-6,
mean_weight=1e-6,
)
reducer, model_updates = self._reduce_weighted_models(
global_model=global_model,
settings=settings,
reduction_type=ReductionType.WEIGHTED_AVERAGE,
estimator_type=estimator_type,
client_param=client_param,
client_weight=10,
global_param=global_param,
)
if max_clip_norm <= user_norm:
expected_param_values = (
delta * (settings.clipping_value / user_norm) + settings.noise
)
else:
expected_param_values = delta + settings.noise
ref_module_after_noise, _ = reducer.current_results
return utils.model_parameters_equal_to_value(
ref_module_after_noise, expected_param_values
)
def test_clipped_models_weighted_sum(self) -> None:
"""
Test when models get clipped with weighted sum
1. Compute the expected per user L2 Norm
2. Set the clipping threshold to be between 1 and user norm
3. Expected model param should be
global = init_global - sum(clipped norms * weights) - noise
"""
global_param = 5
client_param = 1
delta = global_param - client_param
global_model = create_ref_model(ref_model_param_value=global_param)
num_params = self._get_num_params(global_model.fl_get_module())
user_norm = math.sqrt(num_params * delta**2)
settings = WeightReducerTestSetting(
num_clients=10,
clients_per_round=10,
noise=np.random.sample(),
# clipping value is between 1 and user norm
clipping_value=np.random.randint(1, user_norm),
)
reducer, weights = self._reduce_weighted_models(
global_model=global_model,
settings=settings,
reduction_type=ReductionType.WEIGHTED_SUM,
client_param=client_param,
global_param=global_param,
)
clipped_deltas = math.sqrt(settings.clipping_value**2 / num_params)
model_updates = sum((w * clipped_deltas for w in weights))
expected_param_values = model_updates + settings.noise
ref_module_after_noise, _ = reducer.current_results
mismatched = utils.model_parameters_equal_to_value(
ref_module_after_noise, expected_param_values
)
assertEqual(mismatched, "", mismatched)
def test_clipped_models_weighted_avg_with_biased_estimator(self) -> None:
"""
Test when models get clipped with weighted avg with biased estimator
where the sensitivity = (clipping_value * max_weight) / (mean_weight * num_clients_per_round)
1. Compute the expected per user L2 Norm
2. Set the clipping threshold to be between 1 and user norm
3. Expected model param should be
delta = init_global - client
clipped_norm = min(max_clip_norm / user_norm, 1.0)
global = delta * clipped_norms - noise
"""
global_param = 5
client_param = 1
mismatched = self._test_weighted_avg_reduction(
EstimatorType.BIASED, global_param, client_param, max_clip_norm=1
)
assertEqual(mismatched, "", mismatched)
def test_clipped_models_weighted_avg_with_unbiased_estimator(self) -> None:
"""
Test when models get clipped with weighted avg with unbiased estimator
where the sensitivity = (clipping_value * max_weight) / (min_weight * users_per_round)
1. Compute the expected per user L2 Norm
2. Set the clipping threshold to be between 1 and user norm
3. Expected model param should be
delta = init_global - client
clipped_norm = min(max_clip_norm / user_norm, 1.0)
global = delta * clipped_norms - noise
4.
"""
global_param = 5
client_param = 1
mismatched = self._test_weighted_avg_reduction(
EstimatorType.UNBIASED, global_param, client_param, max_clip_norm=1
)
assertEqual(mismatched, "", mismatched)
def test_unclipped_models_weighted_avg_with_biased_estimator(self) -> None:
"""
Test when max_clip_norm is greater than user norm with weighted avg
When the models are unclipped then the expected global model is
delta = init_global - client
global = init_global - delta - noise
"""
global_param = 5
client_param = 1
mismatched = self._test_weighted_avg_reduction(
EstimatorType.BIASED, global_param, client_param, max_clip_norm=100
)
assertEqual(mismatched, "", mismatched)
def test_unclipped_models_weighted_avg_with_unbiased_estimator(self) -> None:
"""
Test when max_clip_norm is greater than user norm with weighted avg
When the models are unclipped then the expected global model is
delta = init_global - client
global = init_global - delta - noise
"""
global_param = 5
client_param = 1
mismatched = self._test_weighted_avg_reduction(
EstimatorType.UNBIASED, global_param, client_param, max_clip_norm=100
)
assertEqual(mismatched, "", mismatched)
def test_unclipped_models_weighted_sum(self) -> None:
"""
Test when max_clip_norm is greater than user norm with weighted sum
When the models are unclipped then the expected global model is
delta = init_global - client
global = init_global - sum(delta * weights) - noise
"""
global_param = np.random.randint(2, 10)
client_param = np.random.randint(1, global_param)
delta = global_param - client_param
global_model = create_ref_model(ref_model_param_value=global_param)
num_params = self._get_num_params(global_model.fl_get_module())
user_norm = math.sqrt(num_params * delta**2)
settings = WeightReducerTestSetting(
num_clients=10,
clients_per_round=10,
noise=np.random.sample(),
# clipping value is greater than user norm
clipping_value=user_norm + 1,
)
reducer, weights = self._reduce_weighted_models(
global_model=global_model,
settings=settings,
reduction_type=ReductionType.WEIGHTED_SUM,
client_param=client_param,
global_param=global_param,
)
model_updates = sum((w * delta for w in weights))
expected_param_values = model_updates + settings.noise
ref_module_after_noise, _ = reducer.current_results
mismatched = utils.model_parameters_equal_to_value(
ref_module_after_noise, expected_param_values
)
assertEqual(mismatched, "", mismatched)
def test_weighted_dp_multiprocess_same(self) -> None:
"""
Multiprocess test for weighted DP reducer
"""
model = utils.SampleNet(utils.TwoFC())
# test 4 processes
r4 = get_dp_round_reducer(
model,
clipping_value=1.0,
reduction_type=ReductionType.WEIGHTED_AVERAGE,
noise_multiplier=1,
num_users_per_round=4,
total_number_of_users=4,
reset=False,
)
results_4 = run_reduction_test(r4, num_processes=4, num_models=4)
same_value = results_4[0]
for r in results_4:
assertAlmostEqual(r, same_value, places=5)
class FLRoundReducerTest:
@pytest.mark.parametrize(
"config, expected_type",
[
(RoundReducerConfig(), RoundReducer),
],
)
def test_reducer_creation_from_config(
self, config: Type, expected_type: Type
) -> None:
ref_model = utils.SampleNet(utils.TwoFC())
reducer = instantiate(config, global_model=ref_model)
assertIsInstance(reducer, expected_type)
| canife-main | FLSim/flsim/reducers/tests/test_round_reducer.py |
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
| canife-main | FLSim/flsim/reducers/tests/__init__.py |
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import abc
from typing import Any, Iterable
class IFLDataLoader(abc.ABC):
@abc.abstractmethod
def fl_train_set(self, **kwargs) -> Iterable[Any]:
pass
@abc.abstractmethod
def fl_eval_set(self, **kwargs) -> Iterable[Any]:
pass
@abc.abstractmethod
def fl_test_set(self, **kwargs) -> Iterable[Any]:
pass
| canife-main | FLSim/flsim/interfaces/data_loader.py |
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
| canife-main | FLSim/flsim/interfaces/__init__.py |
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import abc
from enum import Enum, auto
from typing import Any, Dict, List, Optional, Tuple, Union
from flsim.interfaces.batch_metrics import IFLBatchMetrics
from torch import Tensor
class Metric:
r"""
Wraps a metric.
A reportable metric is simply the name of the metric,
and the value of the metric, in its simplest form.
The value could also be a dict of other metrics, which
in this case the metric is a set of other metrics, and
the `is_compound` attribute is set.
"""
def __init__(self, name: str, value: Union[float, List["Metric"]]):
self.name = name
self.value = value
@property
def is_compund(self):
return isinstance(self.value, list)
def __str__(self):
return f"{self.name}: {self.value}"
@classmethod
def from_dict(cls, d: Dict[str, Any]) -> List["Metric"]:
metrics = []
def process_dict(d, metrics):
for k, v in d.items():
assert isinstance(k, str), f"{k} must be a string"
if isinstance(v, dict):
sub_metric = []
process_dict(v, sub_metric)
metrics.append(Metric(k, sub_metric, True))
else:
assert isinstance(
v, (int, float, Tensor)
), f"{v} is not of types int, float, or torch.Tensor"
metrics.append(Metric(k, float(v)))
process_dict(d, metrics)
return metrics
@classmethod
def from_args(cls, **kwargs):
r"""
Simple util to generate Metrics from kwargs.
The usage is simple, metrics need to be passed as named arguments
to the function. The class will throw if the metrics are not
any of the valid types: int, float, tensor of size 1, or a
dictionary of such types with string keys. This latter case is considerd
a metric with sub metrics.
Example:
metric = Metrics.from_args(a=1, b=2.0, c={d=1, e=2})
will result in:
[Metric(a, 1.0), Metric(b, 2.0), Metric(c, [Metric(d, 1.0), Metric(e, 2.0)])]
"""
return cls.from_dict(kwargs)
@classmethod
def to_dict(cls, metrics):
d = {}
def process_list(metrics, d):
for metric in metrics:
assert isinstance(metric, Metric)
value = metric.value
if metric.is_compund:
value = {}
process_list(metric.value, value)
d[metric.name] = value
process_list(metrics, d)
return d
class Channel(Enum):
"""Enum that indicates to which "channel" we'd want to log/print our
metrics to. For example, if on chooses `Channel.TENSORBOARD`, a
metrics reporter would display the information on the TensorBoard.
"""
TENSORBOARD = auto()
STDOUT = auto()
class TrainingStage(Enum):
"""Enum that indicates at which stage training is, which will be used when
reporting metrics to certain channels.
"""
TRAINING = auto()
AGGREGATION = auto()
TEST = auto()
EVAL = auto()
PER_CLIENT_EVAL = auto()
PERSONALIZED_EVAL = auto() # personalized eval on eval users
PERSONALIZED_TEST = auto() # personalized eval on test users
class IFLMetricsReporter(abc.ABC):
"""Interface that all PyML FLMetricsReporter should implement. Each user
will have 1 reporter throughout one's entire training. At the beginning of
user’s training (i.e. at the start of epoch for a user), the user starts by
clearing up any aggregation left by calling reset(). After the batch, each
user collects one’s own metrics by calling add_batch_metrics() method. When
all the batches are completed (i.e. at the end of all local epochs for a user),
a global MetricsReporter (i.e. the MetricsReporter responsible of the whole
training aggregates all the data by calling aggregate() method, which gets
a MetricsReporter of an user who just completed one’s own epoch. Then, after
all users’ local epochs are completed, the global MetricsReporter completes
its global aggreation and report its metrics to given channels for that global
epoch.
Note: 1 global epoch consists of several rounds. In each round, we train
a subset of users and each user goes through a number of local epochs, where
each local epoch consists of multiple batches.
"""
@abc.abstractmethod
def add_batch_metrics(self, metrics: IFLBatchMetrics) -> None:
"""Take in output of training for a batch (of each user).
Aggregates metrics (e.g. accuracy, loss, predictions, etc) from batch
into state.
"""
pass
@abc.abstractmethod
def aggregate(self, one_user_metrics: "IFLMetricsReporter"):
"""Combine metrics from one user into a global metrics."""
pass
@abc.abstractmethod
def report_metrics(
self,
reset: bool,
stage: TrainingStage,
extra_metrics: Optional[List[Metric]] = None,
**kwargs,
) -> Tuple[Any, bool]:
"""Report metrics to certain channels such as stdout, file, TensorBoard,
etc. Also, one may want to reset metrics if needed after reporting.
Return value: A tuple with two elements:
1. A metrics object
2. bool: Were the best eval metrics updated?
Current eval metrics are compared to the best eval metrics. If
current eval metrics are better than best eval metrics, true is returned.
Comparing eval metrics to best eval metrics is common in
many ML training algorithms, e.g, early stopping.
"""
pass
# TODO: is this needed? Do we ever call this externally?
@abc.abstractmethod
def reset(self):
"""Clean up all aggregations so far."""
pass
| canife-main | FLSim/flsim/interfaces/metrics_reporter.py |
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import abc
from typing import Any
import torch.nn as nn
from flsim.interfaces.batch_metrics import IFLBatchMetrics
class IFLModel(abc.ABC):
@abc.abstractmethod
def fl_forward(self, batch: Any) -> IFLBatchMetrics:
pass
@abc.abstractmethod
def fl_create_training_batch(self, **kwargs) -> Any:
pass
@abc.abstractmethod
def fl_get_module(self) -> nn.Module:
pass
@abc.abstractmethod
def fl_cuda(self) -> None:
pass
@abc.abstractmethod
def get_eval_metrics(self, batch: Any) -> IFLBatchMetrics:
pass
@abc.abstractmethod
def get_num_examples(self, batch: Any) -> int:
pass
| canife-main | FLSim/flsim/interfaces/model.py |
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import abc
from dataclasses import dataclass
from omegaconf import MISSING
from torch.utils.data import Dataset
class FLDataset(Dataset, abc.ABC):
"""Abstract class that all Dataset for FL will be implementing for
stronger type-checking and better abstraction. (e.g. FLDatasetDataLoader)
"""
pass
@dataclass
class DatasetConfig:
_target_: str = MISSING
| canife-main | FLSim/flsim/interfaces/dataset.py |
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import abc
from typing import Any, List
import torch
class IFLBatchMetrics(abc.ABC):
"""Each forward run of FL Model (i.e. concrete implementation of IFLModel
in PyML) will return an IFLBatchMetrics object. This is an encapsulation
and abstraction around several useful/common metrics such as loss,
num_examples, predictions, and targets so that IFLMetricsReporter can
aggregate metrics from any kind of model as long as the model returns
IFLBatchMetrics.
"""
@property
@abc.abstractmethod
def loss(self) -> torch.Tensor:
pass
@property
@abc.abstractmethod
def num_examples(self) -> int:
pass
@property
@abc.abstractmethod
def predictions(self) -> List[Any]:
pass
@property
@abc.abstractmethod
def targets(self) -> List[Any]:
pass
@property
@abc.abstractmethod
def model_inputs(self) -> Any:
pass
| canife-main | FLSim/flsim/interfaces/batch_metrics.py |
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
from __future__ import annotations
from collections import OrderedDict
from dataclasses import dataclass
import torch
from flsim.channels.base_channel import FLChannelConfig, IdentityChannel, Message
from flsim.utils.config_utils import fullclassname, init_self_cfg
from torch.quantization.observer import MinMaxObserver, PerChannelMinMaxObserver
class ScalarQuantizationChannel(IdentityChannel):
"""
Implements a channel that emulates scalar quantization from 1 to 8
bits per weight (8 bits per weight corresponds to int8 quantization).
We emulate this by successively quantizing and dequantizing. This way,
the rest of the training is transparent for aggregators, reducers,
trainers and so on.
Notes:
- We can perform either per_tensor quantization (same scale and
zero_point for every parameters in a weight matrix) or per_channel
quantization (each channel has its own scale and zero_point). Set
quantize_per_tensor = False to perform per_channel quantization.
- We rely on the very simple MinMax observers for both per_tensor
and per_channel quantization. This can be refined by leveraging the
HistogramObserver for instance.
- We do not quantize the biases for the moment since their compression
overhead is very small.
- We arbitrarily choose to set the int_repr() of a quantized tensor
to [0, 2 ** n_bits - 1]. This is called unsigned quantization.
- All the quantized tensors share the same type, ie `torch.quint8`.
However, when quantizing to less than 8 bits, this is not memory
efficient since each element is stored over 1 byte anyway. Since
we are interested only in emulation for the moment, that's good.
We could also have relied on the fake_quantize primitives but we
prefer to rely on the true quantization operators.
"""
def __init__(self, **kwargs):
init_self_cfg(
self,
component_class=__class__,
config_class=ScalarQuantizationChannelConfig,
**kwargs,
)
super().__init__(**kwargs)
self.n_bits = self.cfg.n_bits
self.quantize_per_tensor = self.cfg.quantize_per_tensor
assert (
0 < self.n_bits <= 8
), "ScalarQuantizationChannel expects n_bits between 1 and 8 (included)."
self.quant_min = 0
self.quant_max = 2**self.n_bits - 1
self.observer, self.quantizer = self._get_observers_and_quantizers()
def _calc_message_size_client_to_server(self, message: Message):
"""
We compute the size of the compressed message as follows:
- for the weights (compressed): n_bits / 8 bytes per element
- for the biases (not compressed): 4 bytes per element
- for the scales (one for each layer or one for each layer channel
depending on quantize_per_tensor): 8 bytes / element (fp64)
- for the zerp_points (one for each layer or one for each layer channel
depending on quantize_per_tensor): 4 bytes / element (int32)
"""
message_size_bytes = 0
for param in message.model_state_dict.values():
if param.ndim > 1:
# integer representation
message_size_bytes += param.numel() * self.n_bits / 8
# size of scale(s) (fp64) and zero_point(s) (int32)
if self.quantize_per_tensor:
message_size_bytes += ScalarQuantizationChannel.BYTES_PER_FP64
message_size_bytes += ScalarQuantizationChannel.BYTES_PER_FP32
else:
# pyre-ignore[16]: `torch.Tensor` has no attribute `q_per_channel_scales`
n_scales = param.q_per_channel_scales().numel()
# pyre-ignore[16]: `torch.Tensor` has no attribute `q_per_channel_zero_points`
n_zero_points = param.q_per_channel_zero_points().numel()
message_size_bytes += (
ScalarQuantizationChannel.BYTES_PER_FP64 * n_scales
)
message_size_bytes += (
ScalarQuantizationChannel.BYTES_PER_FP32 * n_zero_points
)
else:
message_size_bytes += 4 * param.numel()
return message_size_bytes
def _get_observers_and_quantizers(self):
if self.quantize_per_tensor:
observer = MinMaxObserver(
dtype=torch.quint8,
qscheme=torch.per_tensor_affine,
quant_min=self.quant_min,
quant_max=self.quant_max,
reduce_range=False,
)
quantizer = torch.quantize_per_tensor
else:
observer = PerChannelMinMaxObserver(
dtype=torch.quint8,
qscheme=torch.per_channel_affine,
quant_min=self.quant_min,
quant_max=self.quant_max,
reduce_range=False,
ch_axis=0,
)
quantizer = torch.quantize_per_channel
return observer, quantizer
def _quantize(self, x: torch.Tensor) -> torch.Tensor:
# important to reset values, otherwise takes running min and max
self.observer.reset_min_max_vals()
# forward through the observer to get scale(s) and zero_point(s)
_ = self.observer(x)
scale, zero_point = self.observer.calculate_qparams()
# Emulate quantization. Not a no-op since we lose precision when quantizing.
if self.quantize_per_tensor:
xq = self.quantizer(x, float(scale), int(zero_point), dtype=torch.quint8)
else:
scale = scale.to(x.device)
zero_point = zero_point.to(x.device)
xq = self.quantizer(x, scale, zero_point, axis=0, dtype=torch.quint8)
return xq
@classmethod
def _set_defaults_in_cfg(cls, cfg):
pass
def _on_client_before_transmission(self, message: Message) -> Message:
"""
We quantize the weights but do not quantize the biases since
the overhead is very small. We copy the state dict since the
tensor format changes.
"""
message.populate_state_dict()
new_state_dict = OrderedDict()
for name, param in message.model_state_dict.items():
if param.ndim > 1:
new_state_dict[name] = self._quantize(param.data)
else:
new_state_dict[name] = param.data
message.model_state_dict = new_state_dict
return message
def _on_server_before_transmission(self, message: Message) -> Message:
message.populate_state_dict()
return message
def _on_server_after_reception(self, message: Message) -> Message:
"""
We dequantize the weights and do not dequantize the biases
since they have not been quantized in the first place. We
copy the state dict since the tensor format changes.
"""
new_state_dict = OrderedDict()
for name, param in message.model_state_dict.items():
if param.ndim > 1:
# pyre-ignore[16]: `torch.Tensor` has no attribute `dequantize`
new_state_dict[name] = param.data.dequantize()
else:
new_state_dict[name] = param.data
message.model_state_dict = new_state_dict
message.update_model_()
return message
@dataclass
class ScalarQuantizationChannelConfig(FLChannelConfig):
_target_: str = fullclassname(ScalarQuantizationChannel)
n_bits: int = 8
quantize_per_tensor: bool = True
| canife-main | FLSim/flsim/channels/scalar_quantization_channel.py |
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
from __future__ import annotations
from collections import OrderedDict
from dataclasses import dataclass
from flsim.channels.base_channel import FLChannelConfig, IdentityChannel, Message
from flsim.utils.config_utils import fullclassname, init_self_cfg
class HalfPrecisionChannel(IdentityChannel):
"""
Implements a channel that emulates upload of model from client
to server using half precision (fp16). We emulate this by
casting successively to half() and back to float(). This way,
the rest of the training is transparent for aggregators, reducers,
trainers and so on.
Note that model.half().float() is *not* in general a no-operation,
since we lose a bit of information when casting to half (and that
is the desired behavior). That's why the corresponding channel_test
is done with a larger `atol` in the direction client->server.
We do not override the method ``_during_transmission_client_to_server``
from IdentityChannel since it can be applied here without modification to
measure the message size. Indeed, it relies on ``element_size()``, which is,
as expected, 2 bytes for half precision and 4 bytes for fp32.
"""
def __init__(self, **kwargs):
init_self_cfg(
self,
component_class=__class__,
config_class=HalfPrecisionChannelConfig,
**kwargs,
)
super().__init__(**kwargs)
@classmethod
def _set_defaults_in_cfg(cls, cfg):
pass
def _on_client_before_transmission(self, message: Message) -> Message:
"""
Here we cast all parameters to half precision. We copy the
state dict since the tensor format changes.
"""
message.populate_state_dict()
new_state_dict = OrderedDict()
for name, param in message.model_state_dict.items():
new_state_dict[name] = param.data.half()
message.model_state_dict = new_state_dict
return message
def _on_server_before_transmission(self, message: Message) -> Message:
"""Populate state dict with model before transmission"""
message.populate_state_dict()
return message
def _on_server_after_reception(self, message: Message) -> Message:
"""
We decompress the message by casting back all parameters
to full precision (fp32). Note that this is not a no-op
since there is some loss in precision when casting to half().
We copy the state dict since the tensor format changes.
"""
new_state_dict = OrderedDict()
for name, param in message.model_state_dict.items():
new_state_dict[name] = param.data.float()
message.model_state_dict = new_state_dict
message.update_model_()
return message
@dataclass
class HalfPrecisionChannelConfig(FLChannelConfig):
_target_: str = fullclassname(HalfPrecisionChannel)
| canife-main | FLSim/flsim/channels/half_precision_channel.py |
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
from __future__ import annotations
import math
from collections import OrderedDict
from dataclasses import dataclass
import torch
from flsim.channels.base_channel import FLChannelConfig, IdentityChannel
from flsim.channels.message import Message
from flsim.channels.pq_utils.pq import PQ
from flsim.utils.config_utils import fullclassname, init_self_cfg
class ProductQuantizationChannel(IdentityChannel):
"""
Implements a channel that emulates Product Quantization.
The idea is to split the weight matrices (linear or
convolutional) to a set of subvectors and to learn a
codebook on these subvectors using k-means. More details
on the procedure in the files em.py and pq.py. See paper for
more details: https://arxiv.org/abs/1907.05686.
Notes:
- We do not quantize the biases since their compression
overhead is very small.
- We do not quantize small layers having less than
`min_numel_to_quantize` elements.
- There is the possibility to learn multiple codebooks
per matrix by setting num_codebooks.
"""
def __init__(self, **kwargs):
init_self_cfg(
self,
component_class=__class__,
config_class=ProductQuantizationChannelConfig,
**kwargs,
)
super().__init__(**kwargs)
self.num_updates = 0
self.seed_centroids = {}
@classmethod
def _set_defaults_in_cfg(cls, cfg):
pass
def _compute_entropy(self, assignments, n_bins, eps=1e-5):
counts = torch.histc(assignments.float(), bins=n_bins)
probs = counts / counts.sum()
probs += eps
entropy = -(probs * probs.log()).sum()
return entropy
def _on_server_before_transmission(self, message: Message) -> Message:
message.populate_state_dict()
return message
def _calc_message_size_client_to_server(self, message: Message):
"""
We compute the size of the compressed message as follows:
- for the weights (compressed):
* log(n_centroids) / 8 bytes per element (for the assignment)
* num_codebooks * block_size * n_centroids fp32 elements for the centroids
- for the biases (not compressed): 4 bytes per element
Notes:
- n_centroids is not necessarily equal to max_num_centroids, hence we
recover it from the shape of `param["centroids"]`.
"""
message_size_bytes = 0
for param in message.model_state_dict.values():
# param was compressed with PQ
if type(param) is dict:
block_size = param["centroids"].size(1)
n_subvectors = param["assignments"].size(0)
n_centroids = param["centroids"].size(0) // self.cfg.num_codebooks
assignments_bytes = math.log2(n_centroids) / 8.0 * n_subvectors
centroids_bytes = (
self.cfg.num_codebooks
* n_centroids
* block_size
* ProductQuantizationChannel.BYTES_PER_FP16
)
message_size_bytes += assignments_bytes + centroids_bytes
# param is a non-compressed torch.Tensor
else:
message_size_bytes += (
ProductQuantizationChannel.BYTES_PER_FP32 * param.numel()
)
return message_size_bytes
def _on_client_before_transmission(self, message: Message) -> Message:
"""
We quantize the weights under the form of centroids
and assignments and do not quantize the biases.
"""
message.populate_state_dict()
self.num_updates += 1
# pyre-fixme[16]: `ProductQuantizationChannel` has no attribute `cfg`.
if self.num_updates > self.cfg.num_warmup_updates:
new_state_dict = OrderedDict()
for name, param in message.model_state_dict.items():
# compress only large weight matrices
if param.ndim > 1 and param.numel() >= self.cfg.min_numel_to_quantize:
pq = PQ(
param.data.size(),
self.cfg.max_block_size,
self.cfg.num_codebooks,
self.cfg.max_num_centroids,
self.cfg.num_k_means_iter,
self.cfg.verbose,
)
layer_seed_centroids = self.seed_centroids.get(name)
centroids, assignments = pq.encode(
param.data.cpu(), seed_centroids=layer_seed_centroids
)
# n_bins = centroids.size(0)
# print(f"Relative Entropy: {100 * self._compute_entropy(assignments, n_bins).item() / math.log(n_bins):.0f}%")
compressed_param = {
"sizes": pq.sizes,
"centroids": centroids.data,
"assignments": assignments.data,
}
new_state_dict[name] = compressed_param
# do not compress biases and small layers
else:
new_state_dict[name] = param.data
message.model_state_dict = new_state_dict
return message
def _on_server_after_reception(self, message: Message) -> Message:
"""
We reconstruct the weights from the centroids
and the assignments.
"""
new_state_dict = OrderedDict()
for name, param in message.model_state_dict.items():
# param was compressed with PQ. TODO: more robust check than `type(param)`
if type(param) is dict:
pq = PQ(
param["sizes"],
self.cfg.max_block_size,
self.cfg.num_codebooks,
self.cfg.max_num_centroids,
self.cfg.num_k_means_iter,
self.cfg.verbose,
)
decompressed_param = pq.decode(
param["centroids"].data, param["assignments"].data
)
new_state_dict[name] = decompressed_param
# param is a non-compressed torch.Tensor
else:
new_state_dict[name] = param.data
message.model_state_dict = new_state_dict
message.update_model_()
return message
@dataclass
class ProductQuantizationChannelConfig(FLChannelConfig):
_target_: str = fullclassname(ProductQuantizationChannel)
max_num_centroids: int = 256
min_numel_to_quantize: int = 10
num_codebooks: int = 1
max_block_size: int = 9
num_k_means_iter: int = 20
verbose: bool = False
num_warmup_updates: int = 0
| canife-main | FLSim/flsim/channels/product_quantization_channel.py |
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
from hydra.core.config_store import ConfigStore
from .base_channel import FLChannelConfig
from .half_precision_channel import HalfPrecisionChannelConfig
from .product_quantization_channel import ProductQuantizationChannelConfig
from .scalar_quantization_channel import ScalarQuantizationChannelConfig
from .sparse_mask_channel import SparseMaskChannelConfig
ConfigStore.instance().store(
name="base_identity_channel",
node=FLChannelConfig,
group="channel",
)
ConfigStore.instance().store(
name="base_sparse_mask_channel",
node=SparseMaskChannelConfig,
group="channel",
)
ConfigStore.instance().store(
name="base_half_precision_channel",
node=HalfPrecisionChannelConfig,
group="channel",
)
ConfigStore.instance().store(
name="base_scalar_quantization_channel",
node=ScalarQuantizationChannelConfig,
group="channel",
)
ConfigStore.instance().store(
name="base_product_quantization_channel",
node=ProductQuantizationChannelConfig,
group="channel",
)
| canife-main | FLSim/flsim/channels/__init__.py |
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
from copy import deepcopy
from dataclasses import dataclass, field
from typing import Optional, OrderedDict
import torch.nn as nn
from flsim.interfaces.model import IFLModel
from flsim.utils.count_sketch import CountSketch
from torch import Tensor
@dataclass
class Message:
"""
Generic message dataclass, composed of:
- model: a model containing information that will be sent
- meta: any meta information about a client or a server
for instance.
This dataclass can be extended to your custom needs see the
``Message` example.
Notes:
- The packet may change after transmission through a channel. For
instance, the SketchChannel takes as input a model state dict
and outputs a CountSketch (but no model state dict).
"""
# model
model: IFLModel = field(default_factory=nn.Module)
# add any meta information here
weight: float = field(default_factory=float)
# here we store state dict for convenience
model_state_dict: OrderedDict[str, Tensor] = field(
default_factory=OrderedDict[str, Tensor], init=True
)
# count sketch
count_sketch: Optional[CountSketch] = field(default=None)
def populate_state_dict(self, **kwargs):
"""
We copy the model's state dict and add it as an attribute to the message.
Notes:
- We deepcopy the state dict to avoid side effects in case we manipulate
the state dict in place.
- We rely on a model's state dict as it will be easier to change the
type of the underlying tensors (say int8) versus replacing every
nn.Module with its corresponding counterpart.
"""
self.model_state_dict = deepcopy(self.model.fl_get_module().state_dict())
def update_model_(self):
"""
Updates model with the state dict stored in the message. May be useful
when receiving a `Message` and wanting to update the local model.
"""
assert (
self.model_state_dict
), "Message state dict is empty. Please check if message.state_dict is populated."
self.model.fl_get_module().load_state_dict(self.model_state_dict)
| canife-main | FLSim/flsim/channels/message.py |
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
from __future__ import annotations
import abc
from dataclasses import dataclass
from typing import OrderedDict
from flsim.channels.communication_stats import ChannelStatsCollector
from flsim.channels.message import Message
from flsim.utils.config_utils import fullclassname, init_self_cfg
class IFLChannel(abc.ABC):
"""
Base interface for `IFLChannel` that takes care of transmitting messages
between clients and the server and collecting some metrics on the way.
This is by nature a *bi-directional* channel (server to client and
client to server).
"""
def __init__(self, **kwargs):
init_self_cfg(
self,
component_class=__class__,
config_class=FLChannelConfig,
**kwargs,
)
self.stats_collector = (
ChannelStatsCollector() if self.cfg.report_communication_metrics else None
)
@classmethod
def _set_defaults_in_cfg(cls, cfg):
pass
@abc.abstractmethod
def server_to_client(self, message: Message) -> Message:
"""
Simulates the manipulation and transmission of a `Message` from
the server to a client. Also handles relevant stats accounting.
"""
pass
@abc.abstractmethod
def client_to_server(self, message: Message) -> Message:
"""
Simulates the manipulation and transmission of a `Message` from
a client to the server. Also handles relevant stats accounting.
"""
pass
class IdentityChannel(IFLChannel):
"""
Implements a *bi-directional* channel which is pass-through: the message sent
is identical to the message received when sending from the server to a
client and when sending from a client to the server.
It simulates what happens in the real world. For instance, with the
direction client to server, there are three methods:
- ``_on_client_before_transmission``: this is what happens in the client
right before sending the message, such as compression.
- ``_during_transmission_client_to_server``: this is what happens during the
transmission of the message, such as communication measurements,
communication noise.
- ``_on_server_after_reception``: this is what happens first when the server
receives the message, such as decompression.
Notes:
- We recommend that all channels inherit `IdentityChannel` and override
only the necessary methods.
- Each new channel is responsible for the measurement of its message size,
i.e. it is the user's responsibility to override the right methods.
"""
# defining useful general constants for channel size measurements
BYTES_PER_FP16 = 2
BYTES_PER_FP32 = 4
BYTES_PER_FP64 = 8
BYTES_PER_INT64 = 8
BYTES_PER_BIT = 0.125
def __init__(self, **kwargs):
super().__init__(**kwargs)
@classmethod
def _set_defaults_in_cfg(cls, cfg):
pass
@classmethod
def calc_model_size_float_point(cls, state_dict: OrderedDict):
"""
Calculates model size in bytes given a state dict.
"""
model_size_bytes = sum(
p.numel() * p.element_size() for (_, p) in state_dict.items()
)
return model_size_bytes
def _calc_message_size_client_to_server(self, message: Message):
return self.calc_model_size_float_point(message.model_state_dict)
def _calc_message_size_server_to_client(self, message: Message):
return self.calc_model_size_float_point(message.model_state_dict)
def _on_client_before_transmission(self, message: Message) -> Message:
"""
Implements message manipulation that would be done on a client in the real world
just before transmitting a message to the server.
"""
return message
def _during_transmission_client_to_server(self, message: Message) -> Message:
"""
Manipulation to the message in transit from client to server.
"""
if self.stats_collector:
message_size_bytes = self._calc_message_size_client_to_server(message)
self.stats_collector.collect_channel_stats(
message_size_bytes, client_to_server=True
)
return message
def _on_server_after_reception(self, message: Message) -> Message:
"""
Implements message manipulation that would be done on the server in the real world
just after receiving a message from a client.
"""
return message
def _on_server_before_transmission(self, message: Message) -> Message:
"""
Implements message manipulation that would be done on the server in the real world
just before transmitting a message to a client.
"""
return message
def _during_transmission_server_to_client(self, message: Message) -> Message:
"""
Manipulation to the message in transit from server to client.
"""
if self.stats_collector:
message_size_bytes = self._calc_message_size_server_to_client(message)
self.stats_collector.collect_channel_stats(
message_size_bytes, client_to_server=False
)
return message
def _on_client_after_reception(self, message: Message) -> Message:
"""
Implements message manipulation that would be done on a client in the real world
just after receiving a message from the server.
"""
return message
def client_to_server(self, message: Message) -> Message:
"""
Performs three successive steps to send a message from a client to the server.
"""
# process through channel
message = self._on_client_before_transmission(message)
message = self._during_transmission_client_to_server(message)
message = self._on_server_after_reception(message)
return message
def server_to_client(self, message: Message) -> Message:
"""
Performs three successive steps to send a message from the server to a client.
"""
# process through channel
message = self._on_server_before_transmission(message)
message = self._during_transmission_server_to_client(message)
message = self._on_client_after_reception(message)
return message
@dataclass
class FLChannelConfig:
"""
Base Config for a channel defining channel properties
such as channel drop rate, quantization effects,
channel bandwidth settings, etc.
"""
# Add attributes as needed.
_target_: str = fullclassname(IdentityChannel)
_recursive_: bool = False
# Whether communication metrics (between server and clients) should be
# reported
report_communication_metrics: bool = False
| canife-main | FLSim/flsim/channels/base_channel.py |
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
from enum import Enum, auto
from flsim.utils.fl.stats import RandomVariableStatsTracker
class ChannelDirection(Enum):
CLIENT_TO_SERVER = auto()
SERVER_TO_CLIENT = auto()
class ChannelStatsCollector:
def __init__(self):
self.reset_channel_stats()
def reset_channel_stats(self):
self.communication_stats = {
ChannelDirection.CLIENT_TO_SERVER: RandomVariableStatsTracker(),
ChannelDirection.SERVER_TO_CLIENT: RandomVariableStatsTracker(),
}
def get_channel_stats(self):
return self.communication_stats
def collect_channel_stats(
self, message_size_bytes: float, client_to_server: bool = True
):
"""
Collect statistics about the updates/model transmitted both
for client to server and server to client directions.
"""
direction = (
ChannelDirection.CLIENT_TO_SERVER
if client_to_server
else ChannelDirection.SERVER_TO_CLIENT
)
self.communication_stats[direction].update(message_size_bytes)
| canife-main | FLSim/flsim/channels/communication_stats.py |
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
from __future__ import annotations
from collections import OrderedDict
from dataclasses import dataclass
import torch
from flsim.channels.base_channel import FLChannelConfig, IdentityChannel, Message
from flsim.utils.config_utils import fullclassname, init_self_cfg
class SparseMaskChannel(IdentityChannel):
"""
Implements a channel where the message sent from client to server is
masked by the top-k absolute values in the model parameter weights.
"""
def __init__(self, **kwargs):
init_self_cfg(
self,
component_class=__class__,
config_class=SparseMaskChannelConfig,
**kwargs,
)
super().__init__()
self.proportion_of_zero_weights = self.cfg.proportion_of_zero_weights
self.sparsity_method = self.cfg.sparsity_method
self.compressed_size_measurement = self.cfg.compressed_size_measurement
assert self.cfg.sparsity_method in {
"random",
"topk",
}, "Compression method must be one of 'random' or 'topk'"
assert (
0 <= self.cfg.proportion_of_zero_weights < 1
), "Compression rate must be in [0, 1)"
assert self.cfg.compressed_size_measurement in {
"coo",
"bitmask",
}, "Compressed size measurement must be one of 'coo' or 'bitmask'"
@classmethod
def _set_defaults_in_cfg(cls, cfg):
pass
def _calc_message_size_client_to_server(self, message: Message):
"""
For COO format : We compute the size of the compressed message as follows: for any
parameter, we count the number of non-zero entries. Then, we assume
that the sparse tensor is stored in the COO format: each non-zero
entry is stored with a value (in fp32) and an index (int64 * ndim).
For bitmask format : We compute the size of the compressed message as follows when
serial format is specified: the non-zero entries are transmitted as is in fp32 format,
and the sparsity mask is transmitted as a stream of bits to represent sparse locations.
"""
message_size_bytes = 0
for param in message.model_state_dict.values():
# get number of non-sparse entries (nse)
nse = param.numel() - int(self.proportion_of_zero_weights * param.numel())
# size of the index
if self.compressed_size_measurement == "coo":
message_size_bytes += (
param.ndim * SparseMaskChannel.BYTES_PER_INT64 * nse
)
elif self.compressed_size_measurement == "bitmask":
message_size_bytes += param.numel() * SparseMaskChannel.BYTES_PER_BIT
# size of the values
message_size_bytes += nse * SparseMaskChannel.BYTES_PER_FP32
return message_size_bytes
def _on_server_before_transmission(self, message: Message) -> Message:
message.populate_state_dict()
return message
def _on_server_after_reception(self, message: Message) -> Message:
message.update_model_()
return message
def _on_client_before_transmission(self, message: Message) -> Message:
"""
Here we apply a sparse mask to the parameter updates before sending the message.
Notes:
- There are two options for sparsity: random and topk
- In random sparsity, the mask is randomly selecting parameter weight updates
- In TopK sparsity, sparsity is applied on each parameter's weight update
separately depending on the magnitude of the values; the smallest values
get pruned.
- The message is pruned so that the number of non-sparse entries is
deterministic and constant across runs for a given weight matrix.
"""
message.populate_state_dict()
new_state_dict = OrderedDict()
for name, param in message.model_state_dict.items():
# exact number of elements to prune
num_params_to_prune = int(self.proportion_of_zero_weights * param.numel())
# select flat indices to prune
top_k = torch.topk(
(
torch.rand_like(param.data)
if self.sparsity_method == "random"
else torch.abs(param.data)
).view(-1),
k=num_params_to_prune,
largest=False,
)
# prune top-K
param.data.view(-1)[top_k.indices] = 0
new_state_dict[name] = param.data
message.model_state_dict = new_state_dict
return message
@dataclass
class SparseMaskChannelConfig(FLChannelConfig):
_target_: str = fullclassname(SparseMaskChannel)
proportion_of_zero_weights: float = 0.5
sparsity_method: str = "random"
compressed_size_measurement: str = "bitmask"
| canife-main | FLSim/flsim/channels/sparse_mask_channel.py |
canife-main | FLSim/flsim/channels/pq_utils/__init__.py |
|
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import math
import torch
from sklearn.cluster import KMeans
class PQ:
"""
Quantizes the layer weights W with the standard Product Quantization
technique. This learns n_dict codebooks of codewords of size
block_size from W.
For further reference on using PQ to quantize neural networks, see
"And the Bit Goes Down: Revisiting the Quantization of Neural Networks",
ICLR 2020.
PQ is performed in two steps:
(1) The matrix W (weights or fully-connected or convolutional layer)
is reshaped to (block_size, -1).
- If W is fully-connected (2D), its rows are split into
blocks of size block_size.
- If W is convolutional (4D), its filters are split along the
spatial dimension.
(2) We apply the standard EM/k-means algorithm to the resulting reshaped matrix.
Once W is reshaped to (block_size, n_samples), we learn num_codebooks codebooks
each of size n_samples // num_codebooks (except the last which may have a variable
size). More specifically, the first num_codebooks samples belong to the first dict,
and so on. We use a trick to recover the quantized matrix from the knowledge of
its centroids and assignments: we shift the assignments by a factor n_centroids
every dict. See the decode() function.
Args:
- sizes: sizes of the weight matrix to quantize
- max_block_size: max allowed block size (for the subvectors)
- num_codebooks: number of dicts
- max_num_centroids: max allowed number of centroids
- num_k_means_iter: number of k-means iterations
- verbose: print information after each iteration
Notes:
- PQ works for tensors that are on the CPU or on the GPU.
- We need the original size of the weight matrix to decode, that's why
we include it in the class state.
- We compute internally the actual block_size in _determine_block_size.
The actual block size is defined as the largest block size that is
compatible with the shape of W while being less or equal than max_block_size.
- We compute internally the actual number of centroids in _determine_num_centroids
to avoid quantizing small layers with too much centroids.
"""
def __init__(
self,
sizes: torch.Size,
max_block_size: int = 9,
num_codebooks: int = 1,
max_num_centroids: int = 256,
num_k_means_iter: int = 20,
verbose: bool = False,
set_random_state: bool = False,
):
self.sizes = sizes
self.ndim = len(sizes)
self.num_codebooks = num_codebooks
self.num_k_means_iter = num_k_means_iter
self.verbose = verbose
self.set_random_state = set_random_state
self.block_size = self._determine_block_size(max_block_size)
self.n_centroids = self._determine_num_centroids(max_num_centroids)
def _determine_block_size(self, max_block_size):
"""
Return the largest block size that is compatible with
the shape of W while being less than or equal to max_block_size.
"""
if self.ndim == 2:
_out_features, in_features = self.sizes
allowed_block_sizes = filter(
lambda block_size: in_features % block_size == 0,
range(1, max_block_size + 1),
)
block_size = list(allowed_block_sizes)[-1]
elif self.ndim == 3:
_out_channels, in_channels, k = self.sizes
allowed_block_sizes = filter(
lambda block_size: (in_channels * k) % block_size == 0,
range(1, max_block_size + 1),
)
block_size = list(allowed_block_sizes)[-1]
elif self.ndim == 4:
_out_channels, in_channels, kh, kw = self.sizes
allowed_block_sizes = filter(
lambda block_size: (in_channels * kh * kw) % block_size == 0,
range(1, max_block_size + 1),
)
block_size = list(allowed_block_sizes)[-1]
else:
raise NotImplementedError(self.sizes)
if self.verbose:
print(f"Selected block size {block_size} for W of shape {self.sizes}")
return block_size
def _determine_num_centroids(self, max_num_centroids, max_centroid_factor_bound=4):
"""
W is split into n_subvectors per dict. Returns n_centroids such that:
- n_centroids is a power of two (greater or equal than 2)
- n_centroids <= max_num_centroids
- n_centroids * max_centroid_factor_bound < n_subvectors
Notes:
- This is to avoid quantizing small layers with too much centroids.
- Must be called after determining self.block_size.
"""
n_tot_subvectors = math.prod(self.sizes) // self.block_size
n_subvectors = n_tot_subvectors // self.num_codebooks
assert n_subvectors >= 8, "Not enough subvectors, consider not quantizing."
n_centroids = 2 ** int(math.log2(n_subvectors // max_centroid_factor_bound))
n_centroids = min(max_num_centroids, n_centroids)
if self.verbose:
print(f"Selected n_centroids {n_centroids} for W of shape {self.sizes}")
return n_centroids
def _reshape_and_split(self, W) -> torch.Tensor:
"""
Reshapes the matrix W as expained in step (1).
"""
# fully connected: by convention the weight has size out_features x in_features
if self.ndim == 2:
out_features, in_features = self.sizes
assert (
in_features % self.block_size == 0
), "Linear: in_features must be a multiple of block_size"
W_unsplit = (
W.reshape(out_features, -1, self.block_size)
.permute(2, 1, 0)
.flatten(1, 2)
)
# convolutional: we reshape along the spatial dimension
elif self.ndim == 3:
out_channels, in_channels, k = self.sizes
assert (
in_channels * k
) % self.block_size == 0, (
"Conv: kernel_size kh * kw must be a multiple of block_size"
)
W_unsplit = (
W.reshape(out_channels, -1, self.block_size)
.permute(2, 0, 1)
.flatten(1, 2)
)
# convolutional: we reshape along the spatial dimension
elif self.ndim == 4:
out_channels, in_channels, kh, kw = self.sizes
assert (
in_channels * kh * kw
) % self.block_size == 0, (
"Conv: kernel_size kh * kw must be a multiple of block_size"
)
W_unsplit = (
W.reshape(out_channels, -1, self.block_size)
.permute(2, 0, 1)
.flatten(1, 2)
)
# not implemented
else:
raise NotImplementedError(self.sizes)
# split into self.num_codebooks blocks (last block may be larger)
split = W_unsplit.size(1) // self.num_codebooks
last_split = W_unsplit.size(1) - split * (self.num_codebooks - 1)
splits = [split] * (self.num_codebooks - 1) + [last_split]
return torch.split(W_unsplit, splits, dim=1)
def _offset_assignments(self, assignments: torch.Tensor) -> torch.Tensor:
"""
See ``decode`` for an explanation and illustration.
"""
n_assignments = len(assignments)
subvectors_per_dict = int(math.ceil(n_assignments / self.num_codebooks))
offset = torch.arange(
0, self.num_codebooks * self.n_centroids, self.n_centroids
)
offset = offset.type_as(assignments)
offset = offset.repeat_interleave(subvectors_per_dict)[:n_assignments]
return assignments + offset
def encode(self, W, seed_assignments=None, seed_centroids=None):
"""
Performs num_k_means_iter EM steps as explained in step (2).
"""
# reshape and split W as expained in step (1).
W_reshaped = self._reshape_and_split(W)
# compute centroids for all dicts
all_centroids = []
all_assignments = []
for d in range(self.num_codebooks):
if self.verbose:
print(
f"Building dict {d+1}/{self.num_codebooks} with {self.n_centroids} "
f"centroids for {W_reshaped[d].size(1)} vectors "
f"{'without' if seed_centroids is None else 'with'} seed centroids"
)
# current weight
W_curr = W_reshaped[d]
# run k-means
random_state = 0 if self.set_random_state else None
kmeans = KMeans(
n_clusters=self.n_centroids,
init="random",
n_init=1,
max_iter=self.num_k_means_iter,
tol=0.0001,
verbose=self.verbose,
random_state=random_state,
)
# generic case
if seed_assignments is None and seed_centroids is None:
assignments = kmeans.fit_predict(W_curr.t())
centroids = kmeans.cluster_centers_
# shared centroids
elif seed_centroids is not None:
nc = self.n_centroids
centroids = seed_centroids[nc * d : nc * (d + 1)].cpu()
kmeans._n_threads = 16
kmeans.cluster_centers_ = centroids.double().numpy()
assignments = kmeans.predict(W_curr.t())
# shared assignments, 2 attention points
# (1) the assignments are offset when using multiple codebooks
# (2) the largest chunk of W may be larger than the previous ones
else:
offset = d * W_reshaped[0].size(1)
length = W_reshaped[d].size(1)
assignments = seed_assignments[offset : offset + length].cpu()
assignments -= d * self.n_centroids
centroids = W_curr.new_zeros((self.n_centroids, self.block_size))
for c in range(self.n_centroids):
centroids[c] = W_curr.t()[assignments == c].mean()
assignments = torch.LongTensor(assignments)
centroids = torch.Tensor(centroids)
# remember centroids and assignments
all_centroids.append(centroids)
all_assignments.append(assignments)
# cat centroids and assignments
assignments = torch.cat(all_assignments)
assignments = self._offset_assignments(assignments)
centroids = torch.cat(all_centroids)
return centroids, assignments
def decode(
self,
centroids: torch.Tensor,
assignments: torch.Tensor,
) -> torch.Tensor:
"""
Returns the encoded full weight matrix. Must be called after
the encode function.
We offset assignments, let us illustrate this on an example.
Say num_codebooks = 2 with 3 centroids per dict, and assume that
assignments = [1, 2, 3, 3, 1, 1, 3, 2]. Then, after the offset
the assignments would be [1, 2, 3, 3, 4, 4, 6, 5].
Thus, we can call centroids[assignments] to properly recover W.
Args:
- centroids has size (num_codebooks x n_centroids, block_size)
- assignments has size (n_samples)
"""
# decode in the fully connected case
if self.ndim == 2:
out_features, _ = self.sizes
return (
centroids[assignments]
.reshape(-1, out_features, self.block_size)
.permute(1, 0, 2)
.flatten(1, 2)
)
# decode in the convolutional case
elif self.ndim == 3:
out_channels, in_channels, k = self.sizes
return (
centroids[assignments]
.reshape(-1, in_channels, self.block_size)
.permute(0, 1, 2)
.reshape(out_channels, in_channels, k)
)
# decode in the convolutional case
elif self.ndim == 4:
out_channels, in_channels, kh, kw = self.sizes
return (
centroids[assignments]
.reshape(out_channels, -1, self.block_size)
.permute(0, 1, 2)
.reshape(out_channels, in_channels, kh, kw)
)
# not implemented
else:
raise NotImplementedError(
f"Only supports 2D convolutions and linear layers, but got size {self.sizes}"
)
| canife-main | FLSim/flsim/channels/pq_utils/pq.py |
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
from typing import Type
import pytest
from flsim.channels.base_channel import FLChannelConfig, IdentityChannel
from flsim.channels.communication_stats import ChannelDirection
from flsim.channels.message import Message
from flsim.common.pytest_helper import assertEqual, assertIsInstance
from flsim.utils import test_utils as utils
from flsim.utils.fl.common import FLModelParamUtils
from hydra.utils import instantiate
class TestIdentityChannel:
@pytest.mark.parametrize(
"config",
[FLChannelConfig()],
)
@pytest.mark.parametrize(
"expected_type",
[IdentityChannel],
)
def test_identity_instantiation(self, config: Type, expected_type: Type) -> None:
"""
Tests instantiation of the identity channel.
"""
# test instantiation
channel = instantiate(config)
assertIsInstance(channel, expected_type)
@pytest.mark.parametrize(
"config",
[FLChannelConfig()],
)
@pytest.mark.parametrize(
"expected_type",
[IdentityChannel],
)
def test_identity_server_to_client(self, config: Type, expected_type: Type) -> None:
"""
Tests server to client transmission of the message. Models
before and after transmission should be identical.
"""
# instantiate channel
channel = instantiate(config)
# create dummy model
two_fc = utils.TwoFC()
base_model = utils.SampleNet(two_fc)
download_model = FLModelParamUtils.clone(base_model)
# test server -> client, models should be strictly identical
message = Message(download_model)
message = channel.server_to_client(message)
mismatched = FLModelParamUtils.get_mismatched_param(
[base_model.fl_get_module(), download_model.fl_get_module()]
)
assertEqual(mismatched, "", mismatched)
@pytest.mark.parametrize(
"config",
[FLChannelConfig()],
)
@pytest.mark.parametrize(
"expected_type",
[IdentityChannel],
)
def test_identity_client_to_server(self, config: Type, expected_type: Type) -> None:
"""
Tests client to server transmission of the message. Models
before and after transmission should be identical.
"""
# instantiation
channel = instantiate(config)
# create dummy model
two_fc = utils.TwoFC()
base_model = utils.SampleNet(two_fc)
upload_model = FLModelParamUtils.clone(base_model)
# test client -> server, models should be strictly identical
message = Message(upload_model)
message = channel.client_to_server(message)
mismatched = FLModelParamUtils.get_mismatched_param(
[base_model.fl_get_module(), upload_model.fl_get_module()]
)
assertEqual(mismatched, "", mismatched)
@pytest.mark.parametrize(
"config",
[FLChannelConfig(report_communication_metrics=True)],
)
@pytest.mark.parametrize(
"expected_type",
[IdentityChannel],
)
def test_identity_stats(self, config: Type, expected_type: Type) -> None:
"""
Tests stats measurement. Bytes sent from client to server
and from server to client should be identical.
"""
# instantiation
channel = instantiate(config)
# create dummy model
two_fc = utils.TwoFC()
download_model = utils.SampleNet(FLModelParamUtils.clone(two_fc))
upload_model = utils.SampleNet(FLModelParamUtils.clone(two_fc))
# server -> client
message = Message(download_model)
message = channel.server_to_client(message)
# client -> server
message = Message(upload_model)
message = channel.client_to_server(message)
# test communication stats measurements
stats = channel.stats_collector.get_channel_stats()
client_to_server_bytes = stats[ChannelDirection.CLIENT_TO_SERVER].mean()
server_to_client_bytes = stats[ChannelDirection.SERVER_TO_CLIENT].mean()
assertEqual(client_to_server_bytes, server_to_client_bytes)
| canife-main | FLSim/flsim/channels/tests/test_identity_channel.py |
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
from typing import Type
import pytest
from flsim.channels.communication_stats import ChannelDirection
from flsim.channels.message import Message
from flsim.channels.scalar_quantization_channel import (
ScalarQuantizationChannel,
ScalarQuantizationChannelConfig,
)
from flsim.common.pytest_helper import assertEqual, assertIsInstance
from flsim.utils import test_utils as utils
from flsim.utils.fl.common import FLModelParamUtils
from hydra.utils import instantiate
class TestScalarQuantizationChannel:
@pytest.mark.parametrize(
"config",
[
ScalarQuantizationChannelConfig(
n_bits=8,
quantize_per_tensor=True,
),
],
)
@pytest.mark.parametrize(
"expected_type",
[ScalarQuantizationChannel],
)
def test_int8_instantiation(self, config: Type, expected_type: Type) -> None:
"""
Tests instantiation of the scalar quantization channel.
"""
# test instantiation
channel = instantiate(config)
assertIsInstance(channel, expected_type)
@pytest.mark.parametrize(
"config",
[
ScalarQuantizationChannelConfig(n_bits=8, quantize_per_tensor=True),
],
)
@pytest.mark.parametrize(
"expected_type",
[ScalarQuantizationChannel],
)
def test_int8_server_to_client(self, config: Type, expected_type: Type) -> None:
"""
Tests server to client transmission of the message. Models
before and after transmission should be identical since we
only scalar quantize in the client to server direction.
"""
# instantiation
channel = instantiate(config)
# create dummy model
two_fc = utils.TwoFC()
base_model = utils.SampleNet(two_fc)
download_model = FLModelParamUtils.clone(base_model)
# test server -> client, models should be strictly identical
message = Message(download_model)
message = channel.server_to_client(message)
mismatched = FLModelParamUtils.get_mismatched_param(
[base_model.fl_get_module(), download_model.fl_get_module()]
)
assertEqual(mismatched, "", mismatched)
@pytest.mark.parametrize(
"config",
[
ScalarQuantizationChannelConfig(n_bits=8, quantize_per_tensor=True),
],
)
@pytest.mark.parametrize(
"expected_type",
[ScalarQuantizationChannel],
)
def test_int8_client_to_server(self, config: Type, expected_type: Type) -> None:
"""
Tests client to server transmission of the message. Models
before and after transmission should be almost identical
due to int8 emulation (n_bits = 8 here).
"""
# instantiation
channel = instantiate(config)
assertIsInstance(channel, expected_type)
# create dummy model
two_fc = utils.TwoFC()
base_model = utils.SampleNet(two_fc)
upload_model = FLModelParamUtils.clone(base_model)
# test client -> server, models should be almost equal due to int8 emulation
message = message = Message(upload_model)
message = channel.client_to_server(message)
mismatched = FLModelParamUtils.get_mismatched_param(
[base_model.fl_get_module(), upload_model.fl_get_module()],
rel_epsilon=1e-8,
abs_epsilon=1e-2,
)
assertEqual(mismatched, "", mismatched)
@pytest.mark.parametrize(
"config",
[
ScalarQuantizationChannelConfig(
n_bits=8, quantize_per_tensor=True, report_communication_metrics=True
),
],
)
@pytest.mark.parametrize(
"expected_type",
[ScalarQuantizationChannel],
)
def test_int8_stats(self, config: Type, expected_type: Type) -> None:
"""
Tests stats measurement. We manually compute bytes sent from client
to server for the two weight matrices (remember that we do not quantize
the biases), and check that it's a quarter of the bytes sent from
server to client (subtracting scales and zero_points).
"""
# instantiation
channel = instantiate(config)
# create dummy model
two_fc = utils.TwoFC()
download_model = utils.SampleNet(FLModelParamUtils.clone(two_fc))
upload_model = utils.SampleNet(FLModelParamUtils.clone(two_fc))
# server -> client
message = Message(download_model)
message = channel.server_to_client(message)
# client -> server
message = Message(upload_model)
message = channel.client_to_server(message)
# test communication stats measurements (here per_tensor int8 quantization)
stats = channel.stats_collector.get_channel_stats()
client_to_server_bytes = stats[ChannelDirection.CLIENT_TO_SERVER].mean()
server_to_client_bytes = stats[ChannelDirection.SERVER_TO_CLIENT].mean()
channels_first_layer = 5
channels_second_layer = 1
n_tot_channels = channels_first_layer + channels_second_layer
bias_size_bytes = n_tot_channels * channel.BYTES_PER_FP32
n_layers = 2
scale_zero_point_bytes = n_layers * (
channel.BYTES_PER_FP64 + channel.BYTES_PER_FP32
)
int8_weight_bytes = (
client_to_server_bytes - bias_size_bytes - scale_zero_point_bytes
)
float_weights_bytes = server_to_client_bytes - bias_size_bytes
assertEqual(4 * int8_weight_bytes, float_weights_bytes)
@pytest.mark.parametrize(
"config",
[
ScalarQuantizationChannelConfig(n_bits=4, quantize_per_tensor=False),
],
)
@pytest.mark.parametrize(
"expected_type",
[ScalarQuantizationChannel],
)
def test_quantize_constant(self, config: Type, expected_type: Type) -> None:
"""
Tests client to server transmission of the message. Models
before and after transmission should be identical since we
quantize a model filled with weights and biases equal to 1.
"""
# instantiation
channel = instantiate(config)
# create dummy model with constant weights
two_fc = utils.TwoFC()
two_fc.fill_all(1.0)
base_model = utils.SampleNet(two_fc)
upload_model = FLModelParamUtils.clone(base_model)
# test client -> server, all weights are equal so quantization error is
# zero or every acceptable value of n_bits (here tested with n_bits = 4)
message = Message(upload_model)
message = channel.client_to_server(message)
mismatched = FLModelParamUtils.get_mismatched_param(
[base_model.fl_get_module(), upload_model.fl_get_module()]
)
assertEqual(mismatched, "", mismatched)
| canife-main | FLSim/flsim/channels/tests/test_scalar_quantization_channel.py |
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
| canife-main | FLSim/flsim/channels/tests/__init__.py |
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
from typing import Type
import pytest
from flsim.channels.communication_stats import ChannelDirection
from flsim.channels.half_precision_channel import (
HalfPrecisionChannel,
HalfPrecisionChannelConfig,
)
from flsim.channels.message import Message
from flsim.common.pytest_helper import assertEqual, assertIsInstance
from flsim.utils import test_utils as utils
from flsim.utils.fl.common import FLModelParamUtils
from hydra.utils import instantiate
class TestHalfPrecisionChannel:
@pytest.mark.parametrize(
"config",
[HalfPrecisionChannelConfig()],
)
@pytest.mark.parametrize(
"expected_type",
[HalfPrecisionChannel],
)
def test_fp16_instantiation(self, config: Type, expected_type: Type) -> None:
"""
Tests instantiation of the `fp16` channel.
"""
# instantiation
channel = instantiate(config)
assertIsInstance(channel, expected_type)
@pytest.mark.parametrize(
"config",
[HalfPrecisionChannelConfig()],
)
@pytest.mark.parametrize(
"expected_type",
[HalfPrecisionChannel],
)
def test_fp16_server_to_client(self, config: Type, expected_type: Type) -> None:
"""
Tests server to client transmission of the message. Models
before and after transmission should be identical since
we emulate `fp16` only from client to server.
"""
# test instantiation
channel = instantiate(config)
# create dummy model
two_fc = utils.TwoFC()
base_model = utils.SampleNet(two_fc)
download_model = FLModelParamUtils.clone(base_model)
# test server -> client, models should be strictly identical
message = Message(download_model)
message = channel.server_to_client(message)
mismatched = FLModelParamUtils.get_mismatched_param(
[base_model.fl_get_module(), download_model.fl_get_module()]
)
assertEqual(mismatched, "", mismatched)
@pytest.mark.parametrize(
"config",
[HalfPrecisionChannelConfig()],
)
@pytest.mark.parametrize(
"expected_type",
[HalfPrecisionChannel],
)
def test_fp16_client_to_server(self, config: Type, expected_type: Type) -> None:
"""
Tests client to server transmission of the message. Models
before and after transmission should be almost identical
due to fp16 emulation.
"""
# instantiation
channel = instantiate(config)
# create dummy model
two_fc = utils.TwoFC()
base_model = utils.SampleNet(two_fc)
upload_model = FLModelParamUtils.clone(base_model)
# test client -> server, models should be almost equal due to fp16 emulation
message = Message(upload_model)
message = channel.client_to_server(message)
mismatched = FLModelParamUtils.get_mismatched_param(
[base_model.fl_get_module(), upload_model.fl_get_module()],
rel_epsilon=1e-8,
abs_epsilon=1e-3,
)
assertEqual(mismatched, "", mismatched)
@pytest.mark.parametrize(
"config",
[HalfPrecisionChannelConfig(report_communication_metrics=True)],
)
@pytest.mark.parametrize(
"expected_type",
[HalfPrecisionChannel],
)
def test_fp16_stats(self, config: Type, expected_type: Type) -> None:
"""
Tests stats measurement. Bytes send from server to client should be
twice as big as bytes sent from client to server (`fp16` emulation).
"""
# instantiation
channel = instantiate(config)
assertIsInstance(channel, expected_type)
# create dummy model
two_fc = utils.TwoFC()
base_model = utils.SampleNet(two_fc)
download_model = FLModelParamUtils.clone(base_model)
upload_model = FLModelParamUtils.clone(base_model)
# server -> client
message = Message(download_model)
message = channel.server_to_client(message)
# client -> server
message = Message(upload_model)
message = channel.client_to_server(message)
# test communication stats measurements
stats = channel.stats_collector.get_channel_stats()
client_to_server_bytes = stats[ChannelDirection.CLIENT_TO_SERVER].mean()
server_to_client_bytes = stats[ChannelDirection.SERVER_TO_CLIENT].mean()
assertEqual(2 * client_to_server_bytes, server_to_client_bytes)
| canife-main | FLSim/flsim/channels/tests/test_half_precision_channel.py |
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
from typing import OrderedDict, Type
import pytest
import torch
from flsim.channels.base_channel import Message
from flsim.channels.communication_stats import ChannelDirection
from flsim.channels.sparse_mask_channel import (
SparseMaskChannel,
SparseMaskChannelConfig,
)
from flsim.common.pytest_helper import assertAlmostEqual, assertEqual, assertIsInstance
from flsim.utils import test_utils as utils
from flsim.utils.fl.common import FLModelParamUtils
from flsim.utils.tests.helpers.test_models import FCModel
from hydra.utils import instantiate
from torch.nn.utils import prune
class TestSparseMaskChannel:
@classmethod
def calc_model_sparsity(cls, state_dict: OrderedDict) -> float:
"""
Calculates model sparsity (fraction of zeroed weights in state_dict).
"""
non_zero = 0
tot = 1e-6
for _, param in state_dict.items():
non_zero += torch.count_nonzero(param).item()
tot += float(param.numel())
return 1.0 - non_zero / (tot + 1e-6)
def test_sparse_model_size(self) -> None:
model = FCModel()
# Prune model to a quarter of its size
params_to_prune = [
(model.fc1, "weight"),
(model.fc1, "bias"),
(model.fc2, "weight"),
(model.fc2, "bias"),
(model.fc3, "weight"),
(model.fc3, "bias"),
]
prune.global_unstructured(
params_to_prune,
pruning_method=prune.L1Unstructured,
amount=0.75,
)
for module, name in params_to_prune:
prune.remove(module, name)
# pyre-fixme[6]: Expected `OrderedDict[typing.Any, typing.Any]` for 1st
# param but got `Dict[str, typing.Any]`.
sparsity = self.calc_model_sparsity(model.state_dict())
assertAlmostEqual(
0.75,
sparsity,
delta=0.02, # Accounts for 2 percentage points difference
)
@pytest.mark.parametrize(
"config",
[
SparseMaskChannelConfig(
proportion_of_zero_weights=0.6,
sparsity_method="topk",
),
SparseMaskChannelConfig(
proportion_of_zero_weights=0.6,
sparsity_method="random",
),
],
)
@pytest.mark.parametrize(
"expected_type",
[SparseMaskChannel],
)
def test_random_mask_instantiation(self, config: Type, expected_type: Type) -> None:
"""
Tests instantiation of the random mask channel.
"""
# test instantiation
channel = instantiate(config)
assertIsInstance(channel, expected_type)
@pytest.mark.parametrize(
"config",
[
SparseMaskChannelConfig(
proportion_of_zero_weights=0.6,
sparsity_method="topk",
),
SparseMaskChannelConfig(
proportion_of_zero_weights=0.6,
sparsity_method="random",
),
],
)
@pytest.mark.parametrize(
"expected_type",
[SparseMaskChannel],
)
def test_random_mask_server_to_client(
self, config: Type, expected_type: Type
) -> None:
"""
Tests server to client transmission of the message. Models
before and after transmission should be identical since
we random mask only on the client to server direction.
"""
# instantiation
channel = instantiate(config)
# create dummy model
two_fc = utils.TwoFC()
base_model = utils.SampleNet(two_fc)
download_model = FLModelParamUtils.clone(base_model)
# test server -> client, models should be strictly identical
message = Message(download_model)
message = channel.server_to_client(message)
mismatched = FLModelParamUtils.get_mismatched_param(
[base_model.fl_get_module(), download_model.fl_get_module()]
)
assertEqual(mismatched, "", mismatched)
@pytest.mark.parametrize(
"config",
[
SparseMaskChannelConfig(
proportion_of_zero_weights=0.6,
sparsity_method="topk",
),
SparseMaskChannelConfig(
proportion_of_zero_weights=0.6,
sparsity_method="random",
),
],
)
@pytest.mark.parametrize(
"expected_type",
[SparseMaskChannel],
)
def test_sparse_mask_client_to_server(
self, config: Type, expected_type: Type
) -> None:
"""
Tests client to server transmission of the message. Model
after transmission should have the right sparsity ratio.
"""
# instantiation
channel = instantiate(config)
# create dummy model
two_fc = utils.TwoFC()
upload_model = utils.SampleNet(FLModelParamUtils.clone(two_fc))
# test client -> server, check for sparsity ratio
message = Message(upload_model)
message = channel.client_to_server(message)
sparsity = self.calc_model_sparsity(message.model_state_dict)
# sparsity ratio should be approximately proportion_of_zero_weights
# approximately since we round the number of parameters to prune
# to an integer, see sparse_mask_channel.py
assertAlmostEqual(channel.cfg.proportion_of_zero_weights, sparsity, delta=0.05)
@pytest.mark.parametrize(
"config",
[
SparseMaskChannelConfig(
proportion_of_zero_weights=0.6,
sparsity_method="topk",
),
],
)
@pytest.mark.parametrize(
"expected_type",
[SparseMaskChannel],
)
def test_topk_mask_sparsity(self, config: Type, expected_type: Type) -> None:
"""
Tests that TopK compression has worked ie. the smallest client updates
are masked out.
"""
# instantiation
channel = instantiate(config)
# create dummy model
two_fc = utils.TwoFC()
base_model = utils.SampleNet(two_fc)
upload_model = FLModelParamUtils.clone(base_model)
# test client -> server, check for topk sparse mask
message = Message(upload_model)
message = channel.client_to_server(message)
for name, p in base_model.fl_get_module().named_parameters():
flattened_params = p.flatten().abs()
sparse_indices = flattened_params.abs().argsort()[
: int(config.proportion_of_zero_weights * flattened_params.numel())
]
flattened_message_params = torch.cat(
[torch.flatten(p) for p in message.model_state_dict[name]]
).flatten()
assertEqual(flattened_message_params[sparse_indices].sum(), 0.0)
@pytest.mark.parametrize("sparsity_method", ["topk", "random"])
@pytest.mark.parametrize(
"compressed_size_measurement",
["bitmask", "coo"],
)
@pytest.mark.parametrize(
"expected_type",
[SparseMaskChannel],
)
def test_sparse_mask_stats(
self,
sparsity_method: str,
compressed_size_measurement: str,
expected_type: Type,
) -> None:
"""
Tests stats measurement. We assume that the sparse tensor
is stored in COO format and manually compute the number
of bytes sent from client to server to check that it
matches that the channel computes.
"""
# instantiation
config = SparseMaskChannelConfig(
proportion_of_zero_weights=0.6,
report_communication_metrics=True,
sparsity_method=sparsity_method,
compressed_size_measurement=compressed_size_measurement,
)
channel = instantiate(config)
# create dummy model
two_fc = utils.TwoFC()
upload_model = utils.SampleNet(FLModelParamUtils.clone(two_fc))
# client -> server
message = Message(upload_model)
message = channel.client_to_server(message)
# test communication stats measurements
stats = channel.stats_collector.get_channel_stats()
client_to_server_bytes = stats[ChannelDirection.CLIENT_TO_SERVER].mean()
# compute sizes
n_weights = sum([p.numel() for p in two_fc.parameters() if p.ndim == 2])
n_biases = sum([p.numel() for p in two_fc.parameters() if p.ndim == 1])
non_zero_weights = n_weights - int(
n_weights * channel.cfg.proportion_of_zero_weights
)
non_zero_biases = n_biases - int(
n_biases * channel.cfg.proportion_of_zero_weights
)
n_dim_weights = 2
n_dim_biases = 1
true_size_bytes_weights = (
# size of the index
(
non_zero_weights * SparseMaskChannel.BYTES_PER_INT64 * n_dim_weights
if compressed_size_measurement == "coo"
else SparseMaskChannel.BYTES_PER_BIT * n_weights
)
# size of values
+ non_zero_weights * SparseMaskChannel.BYTES_PER_FP32
)
true_size_bytes_biases = (
# size of the index
(
non_zero_biases * SparseMaskChannel.BYTES_PER_INT64 * n_dim_biases
if compressed_size_measurement == "coo"
else SparseMaskChannel.BYTES_PER_BIT * n_biases
)
# size of values
+ non_zero_biases * SparseMaskChannel.BYTES_PER_FP32
)
# size of the values
true_size_bytes = true_size_bytes_weights + true_size_bytes_biases
assertEqual(client_to_server_bytes, true_size_bytes)
@pytest.mark.parametrize("sparsity_method", ["topk", "random"])
def test_sparsity_after_reception(self, sparsity_method: str) -> None:
"""
Tests if the message received at the server after transmission has
the expected sparsity
"""
# instantiation
config = SparseMaskChannelConfig(
proportion_of_zero_weights=0.6,
report_communication_metrics=True,
sparsity_method=sparsity_method,
)
channel = instantiate(config)
# create dummy model
two_fc = utils.TwoFC()
upload_model = utils.SampleNet(FLModelParamUtils.clone(two_fc))
# client -> server
message = Message(upload_model)
message = channel.client_to_server(message)
# Test that message model has sparsity approximately 0.6
state_dict = message.model.fl_get_module().state_dict()
assertAlmostEqual(self.calc_model_sparsity(state_dict), 0.6, delta=0.05)
| canife-main | FLSim/flsim/channels/tests/test_sparse_mask_channel.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
from setuptools import find_packages, setup
setup(
name="cotracker",
version="1.0",
install_requires=[],
packages=find_packages(exclude="notebooks"),
extras_require={
"all": ["matplotlib", "opencv-python"],
"dev": ["flake8", "black"],
},
)
| co-tracker-main | setup.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import os
import random
import torch
import signal
import socket
import sys
import json
import numpy as np
import argparse
import logging
from pathlib import Path
from tqdm import tqdm
import torch.optim as optim
from torch.utils.data import DataLoader
from torch.cuda.amp import GradScaler
from torch.utils.tensorboard import SummaryWriter
from pytorch_lightning.lite import LightningLite
from cotracker.models.evaluation_predictor import EvaluationPredictor
from cotracker.models.core.cotracker.cotracker import CoTracker
from cotracker.utils.visualizer import Visualizer
from cotracker.datasets.tap_vid_datasets import TapVidDataset
from cotracker.datasets.badja_dataset import BadjaDataset
from cotracker.datasets.fast_capture_dataset import FastCaptureDataset
from cotracker.evaluation.core.evaluator import Evaluator
from cotracker.datasets import kubric_movif_dataset
from cotracker.datasets.utils import collate_fn, collate_fn_train, dataclass_to_cuda_
from cotracker.models.core.cotracker.losses import sequence_loss, balanced_ce_loss
def fetch_optimizer(args, model):
"""Create the optimizer and learning rate scheduler"""
optimizer = optim.AdamW(
model.parameters(), lr=args.lr, weight_decay=args.wdecay, eps=1e-8
)
scheduler = optim.lr_scheduler.OneCycleLR(
optimizer,
args.lr,
args.num_steps + 100,
pct_start=0.05,
cycle_momentum=False,
anneal_strategy="linear",
)
return optimizer, scheduler
def forward_batch(batch, model, args, loss_fn=None, writer=None, step=0):
rgbs = batch.video
trajs_g = batch.trajectory
vis_g = batch.visibility
valids = batch.valid
B, T, C, H, W = rgbs.shape
assert C == 3
B, T, N, D = trajs_g.shape
device = rgbs.device
__, first_positive_inds = torch.max(vis_g, dim=1)
# We want to make sure that during training the model sees visible points
# that it does not need to track just yet: they are visible but queried from a later frame
N_rand = N // 4
# inds of visible points in the 1st frame
nonzero_inds = [torch.nonzero(vis_g[0, :, i]) for i in range(N)]
rand_vis_inds = torch.cat(
[
nonzero_row[torch.randint(len(nonzero_row), size=(1,))]
for nonzero_row in nonzero_inds
],
dim=1,
)
first_positive_inds = torch.cat(
[rand_vis_inds[:, :N_rand], first_positive_inds[:, N_rand:]], dim=1
)
ind_array_ = torch.arange(T, device=device)
ind_array_ = ind_array_[None, :, None].repeat(B, 1, N)
assert torch.allclose(
vis_g[ind_array_ == first_positive_inds[:, None, :]],
torch.ones_like(vis_g),
)
assert torch.allclose(
vis_g[ind_array_ == rand_vis_inds[:, None, :]], torch.ones_like(vis_g)
)
gather = torch.gather(
trajs_g, 1, first_positive_inds[:, :, None, None].repeat(1, 1, N, 2)
)
xys = torch.diagonal(gather, dim1=1, dim2=2).permute(0, 2, 1)
queries = torch.cat([first_positive_inds[:, :, None], xys], dim=2)
predictions, __, visibility, train_data = model(
rgbs=rgbs, queries=queries, iters=args.train_iters, is_train=True
)
vis_predictions, coord_predictions, wind_inds, sort_inds = train_data
trajs_g = trajs_g[:, :, sort_inds]
vis_g = vis_g[:, :, sort_inds]
valids = valids[:, :, sort_inds]
vis_gts = []
traj_gts = []
valids_gts = []
for i, wind_idx in enumerate(wind_inds):
ind = i * (args.sliding_window_len // 2)
vis_gts.append(vis_g[:, ind : ind + args.sliding_window_len, :wind_idx])
traj_gts.append(trajs_g[:, ind : ind + args.sliding_window_len, :wind_idx])
valids_gts.append(valids[:, ind : ind + args.sliding_window_len, :wind_idx])
seq_loss = sequence_loss(coord_predictions, traj_gts, vis_gts, valids_gts, 0.8)
vis_loss = balanced_ce_loss(vis_predictions, vis_gts, valids_gts)
output = {"flow": {"predictions": predictions[0].detach()}}
output["flow"]["loss"] = seq_loss.mean()
output["visibility"] = {
"loss": vis_loss.mean() * 10.0,
"predictions": visibility[0].detach(),
}
return output
def run_test_eval(evaluator, model, dataloaders, writer, step):
model.eval()
for ds_name, dataloader in dataloaders:
predictor = EvaluationPredictor(
model.module.module,
grid_size=6,
local_grid_size=0,
single_point=False,
n_iters=6,
)
if torch.cuda.is_available():
predictor.model = predictor.model.cuda()
metrics = evaluator.evaluate_sequence(
model=predictor,
test_dataloader=dataloader,
dataset_name=ds_name,
train_mode=True,
writer=writer,
step=step,
)
if ds_name == "badja" or ds_name == "fastcapture" or ("kubric" in ds_name):
metrics = {
**{
f"{ds_name}_avg": np.mean(
[v for k, v in metrics.items() if "accuracy" not in k]
)
},
**{
f"{ds_name}_avg_accuracy": np.mean(
[v for k, v in metrics.items() if "accuracy" in k]
)
},
}
print("avg", np.mean([v for v in metrics.values()]))
if "tapvid" in ds_name:
metrics = {
f"{ds_name}_avg_OA": metrics["avg"]["occlusion_accuracy"] * 100,
f"{ds_name}_avg_delta": metrics["avg"]["average_pts_within_thresh"]
* 100,
f"{ds_name}_avg_Jaccard": metrics["avg"]["average_jaccard"] * 100,
}
writer.add_scalars(f"Eval", metrics, step)
class Logger:
SUM_FREQ = 100
def __init__(self, model, scheduler):
self.model = model
self.scheduler = scheduler
self.total_steps = 0
self.running_loss = {}
self.writer = SummaryWriter(log_dir=os.path.join(args.ckpt_path, "runs"))
def _print_training_status(self):
metrics_data = [
self.running_loss[k] / Logger.SUM_FREQ
for k in sorted(self.running_loss.keys())
]
training_str = "[{:6d}] ".format(self.total_steps + 1)
metrics_str = ("{:10.4f}, " * len(metrics_data)).format(*metrics_data)
# print the training status
logging.info(
f"Training Metrics ({self.total_steps}): {training_str + metrics_str}"
)
if self.writer is None:
self.writer = SummaryWriter(log_dir=os.path.join(args.ckpt_path, "runs"))
for k in self.running_loss:
self.writer.add_scalar(
k, self.running_loss[k] / Logger.SUM_FREQ, self.total_steps
)
self.running_loss[k] = 0.0
def push(self, metrics, task):
self.total_steps += 1
for key in metrics:
task_key = str(key) + "_" + task
if task_key not in self.running_loss:
self.running_loss[task_key] = 0.0
self.running_loss[task_key] += metrics[key]
if self.total_steps % Logger.SUM_FREQ == Logger.SUM_FREQ - 1:
self._print_training_status()
self.running_loss = {}
def write_dict(self, results):
if self.writer is None:
self.writer = SummaryWriter(log_dir=os.path.join(args.ckpt_path, "runs"))
for key in results:
self.writer.add_scalar(key, results[key], self.total_steps)
def close(self):
self.writer.close()
class Lite(LightningLite):
def run(self, args):
def seed_everything(seed: int):
random.seed(seed)
os.environ["PYTHONHASHSEED"] = str(seed)
np.random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
seed_everything(0)
def seed_worker(worker_id):
worker_seed = torch.initial_seed() % 2 ** 32
np.random.seed(worker_seed)
random.seed(worker_seed)
g = torch.Generator()
g.manual_seed(0)
eval_dataloaders = []
if "badja" in args.eval_datasets:
eval_dataset = BadjaDataset(
data_root=os.path.join(args.dataset_root, "BADJA"),
max_seq_len=args.eval_max_seq_len,
dataset_resolution=args.crop_size,
)
eval_dataloader_badja = torch.utils.data.DataLoader(
eval_dataset,
batch_size=1,
shuffle=False,
num_workers=8,
collate_fn=collate_fn,
)
eval_dataloaders.append(("badja", eval_dataloader_badja))
if "fastcapture" in args.eval_datasets:
eval_dataset = FastCaptureDataset(
data_root=os.path.join(args.dataset_root, "fastcapture"),
max_seq_len=min(100, args.eval_max_seq_len),
max_num_points=40,
dataset_resolution=args.crop_size,
)
eval_dataloader_fastcapture = torch.utils.data.DataLoader(
eval_dataset,
batch_size=1,
shuffle=False,
num_workers=1,
collate_fn=collate_fn,
)
eval_dataloaders.append(("fastcapture", eval_dataloader_fastcapture))
if "tapvid_davis_first" in args.eval_datasets:
data_root = os.path.join(args.dataset_root, "tapvid_davis/tapvid_davis.pkl")
eval_dataset = TapVidDataset(dataset_type="davis", data_root=data_root)
eval_dataloader_tapvid_davis = torch.utils.data.DataLoader(
eval_dataset,
batch_size=1,
shuffle=False,
num_workers=1,
collate_fn=collate_fn,
)
eval_dataloaders.append(("tapvid_davis", eval_dataloader_tapvid_davis))
evaluator = Evaluator(args.ckpt_path)
visualizer = Visualizer(
save_dir=args.ckpt_path,
pad_value=80,
fps=1,
show_first_frame=0,
tracks_leave_trace=0,
)
loss_fn = None
if args.model_name == "cotracker":
model = CoTracker(
stride=args.model_stride,
S=args.sliding_window_len,
add_space_attn=not args.remove_space_attn,
num_heads=args.updateformer_num_heads,
hidden_size=args.updateformer_hidden_size,
space_depth=args.updateformer_space_depth,
time_depth=args.updateformer_time_depth,
)
else:
raise ValueError(f"Model {args.model_name} doesn't exist")
with open(args.ckpt_path + "/meta.json", "w") as file:
json.dump(vars(args), file, sort_keys=True, indent=4)
model.cuda()
train_dataset = kubric_movif_dataset.KubricMovifDataset(
data_root=os.path.join(args.dataset_root, "kubric_movi_f"),
crop_size=args.crop_size,
seq_len=args.sequence_len,
traj_per_sample=args.traj_per_sample,
sample_vis_1st_frame=args.sample_vis_1st_frame,
use_augs=not args.dont_use_augs,
)
train_loader = DataLoader(
train_dataset,
batch_size=args.batch_size,
shuffle=True,
num_workers=args.num_workers,
worker_init_fn=seed_worker,
generator=g,
pin_memory=True,
collate_fn=collate_fn_train,
drop_last=True,
)
train_loader = self.setup_dataloaders(train_loader, move_to_device=False)
print("LEN TRAIN LOADER", len(train_loader))
optimizer, scheduler = fetch_optimizer(args, model)
total_steps = 0
logger = Logger(model, scheduler)
folder_ckpts = [
f
for f in os.listdir(args.ckpt_path)
if not os.path.isdir(f) and f.endswith(".pth") and not "final" in f
]
if len(folder_ckpts) > 0:
ckpt_path = sorted(folder_ckpts)[-1]
ckpt = self.load(os.path.join(args.ckpt_path, ckpt_path))
logging.info(f"Loading checkpoint {ckpt_path}")
if "model" in ckpt:
model.load_state_dict(ckpt["model"])
else:
model.load_state_dict(ckpt)
if "optimizer" in ckpt:
logging.info("Load optimizer")
optimizer.load_state_dict(ckpt["optimizer"])
if "scheduler" in ckpt:
logging.info("Load scheduler")
scheduler.load_state_dict(ckpt["scheduler"])
if "total_steps" in ckpt:
total_steps = ckpt["total_steps"]
logging.info(f"Load total_steps {total_steps}")
elif args.restore_ckpt is not None:
assert args.restore_ckpt.endswith(".pth") or args.restore_ckpt.endswith(
".pt"
)
logging.info("Loading checkpoint...")
strict = True
state_dict = self.load(args.restore_ckpt)
if "model" in state_dict:
state_dict = state_dict["model"]
if list(state_dict.keys())[0].startswith("module."):
state_dict = {
k.replace("module.", ""): v for k, v in state_dict.items()
}
model.load_state_dict(state_dict, strict=strict)
logging.info(f"Done loading checkpoint")
model, optimizer = self.setup(model, optimizer, move_to_device=False)
# model.cuda()
model.train()
save_freq = args.save_freq
scaler = GradScaler(enabled=args.mixed_precision)
should_keep_training = True
global_batch_num = 0
epoch = -1
while should_keep_training:
epoch += 1
for i_batch, batch in enumerate(tqdm(train_loader)):
batch, gotit = batch
if not all(gotit):
print("batch is None")
continue
dataclass_to_cuda_(batch)
optimizer.zero_grad()
assert model.training
output = forward_batch(
batch,
model,
args,
loss_fn=loss_fn,
writer=logger.writer,
step=total_steps,
)
loss = 0
for k, v in output.items():
if "loss" in v:
loss += v["loss"]
logger.writer.add_scalar(
f"live_{k}_loss", v["loss"].item(), total_steps
)
if "metrics" in v:
logger.push(v["metrics"], k)
if self.global_rank == 0:
if total_steps % save_freq == save_freq - 1:
if args.model_name == "motion_diffuser":
pred_coords = model.module.module.forward_batch_test(
batch, interp_shape=args.crop_size
)
output["flow"] = {"predictions": pred_coords[0].detach()}
visualizer.visualize(
video=batch.video.clone(),
tracks=batch.trajectory.clone(),
filename="train_gt_traj",
writer=logger.writer,
step=total_steps,
)
visualizer.visualize(
video=batch.video.clone(),
tracks=output["flow"]["predictions"][None],
filename="train_pred_traj",
writer=logger.writer,
step=total_steps,
)
if len(output) > 1:
logger.writer.add_scalar(
f"live_total_loss", loss.item(), total_steps
)
logger.writer.add_scalar(
f"learning_rate", optimizer.param_groups[0]["lr"], total_steps
)
global_batch_num += 1
self.barrier()
self.backward(scaler.scale(loss))
scaler.unscale_(optimizer)
torch.nn.utils.clip_grad_norm_(model.parameters(), 10.0)
scaler.step(optimizer)
scheduler.step()
scaler.update()
total_steps += 1
if self.global_rank == 0:
if (i_batch >= len(train_loader) - 1) or (
total_steps == 1 and args.validate_at_start
):
if (epoch + 1) % args.save_every_n_epoch == 0:
ckpt_iter = "0" * (6 - len(str(total_steps))) + str(
total_steps
)
save_path = Path(
f"{args.ckpt_path}/model_{args.model_name}_{ckpt_iter}.pth"
)
save_dict = {
"model": model.module.module.state_dict(),
"optimizer": optimizer.state_dict(),
"scheduler": scheduler.state_dict(),
"total_steps": total_steps,
}
logging.info(f"Saving file {save_path}")
self.save(save_dict, save_path)
if (epoch + 1) % args.evaluate_every_n_epoch == 0 or (
args.validate_at_start and epoch == 0
):
run_test_eval(
evaluator,
model,
eval_dataloaders,
logger.writer,
total_steps,
)
model.train()
torch.cuda.empty_cache()
self.barrier()
if total_steps > args.num_steps:
should_keep_training = False
break
print("FINISHED TRAINING")
PATH = f"{args.ckpt_path}/{args.model_name}_final.pth"
torch.save(model.module.module.state_dict(), PATH)
run_test_eval(evaluator, model, eval_dataloaders, logger.writer, total_steps)
logger.close()
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--model_name", default="cotracker", help="model name")
parser.add_argument("--restore_ckpt", help="path to restore a checkpoint")
parser.add_argument("--ckpt_path", help="path to save checkpoints")
parser.add_argument(
"--batch_size", type=int, default=4, help="batch size used during training."
)
parser.add_argument(
"--num_workers", type=int, default=6, help="number of dataloader workers"
)
parser.add_argument(
"--mixed_precision", action="store_true", help="use mixed precision"
)
parser.add_argument("--lr", type=float, default=0.0005, help="max learning rate.")
parser.add_argument(
"--wdecay", type=float, default=0.00001, help="Weight decay in optimizer."
)
parser.add_argument(
"--num_steps", type=int, default=200000, help="length of training schedule."
)
parser.add_argument(
"--evaluate_every_n_epoch",
type=int,
default=1,
help="evaluate during training after every n epochs, after every epoch by default",
)
parser.add_argument(
"--save_every_n_epoch",
type=int,
default=1,
help="save checkpoints during training after every n epochs, after every epoch by default",
)
parser.add_argument(
"--validate_at_start",
action="store_true",
help="whether to run evaluation before training starts",
)
parser.add_argument(
"--save_freq",
type=int,
default=100,
help="frequency of trajectory visualization during training",
)
parser.add_argument(
"--traj_per_sample",
type=int,
default=768,
help="the number of trajectories to sample for training",
)
parser.add_argument(
"--dataset_root", type=str, help="path lo all the datasets (train and eval)"
)
parser.add_argument(
"--train_iters",
type=int,
default=4,
help="number of updates to the disparity field in each forward pass.",
)
parser.add_argument(
"--sequence_len", type=int, default=8, help="train sequence length"
)
parser.add_argument(
"--eval_datasets",
nargs="+",
default=["things", "badja"],
help="what datasets to use for evaluation",
)
parser.add_argument(
"--remove_space_attn",
action="store_true",
help="remove space attention from CoTracker",
)
parser.add_argument(
"--dont_use_augs",
action="store_true",
help="don't apply augmentations during training",
)
parser.add_argument(
"--sample_vis_1st_frame",
action="store_true",
help="only sample trajectories with points visible on the first frame",
)
parser.add_argument(
"--sliding_window_len",
type=int,
default=8,
help="length of the CoTracker sliding window",
)
parser.add_argument(
"--updateformer_hidden_size",
type=int,
default=384,
help="hidden dimension of the CoTracker transformer model",
)
parser.add_argument(
"--updateformer_num_heads",
type=int,
default=8,
help="number of heads of the CoTracker transformer model",
)
parser.add_argument(
"--updateformer_space_depth",
type=int,
default=12,
help="number of group attention layers in the CoTracker transformer model",
)
parser.add_argument(
"--updateformer_time_depth",
type=int,
default=12,
help="number of time attention layers in the CoTracker transformer model",
)
parser.add_argument(
"--model_stride",
type=int,
default=8,
help="stride of the CoTracker feature network",
)
parser.add_argument(
"--crop_size",
type=int,
nargs="+",
default=[384, 512],
help="crop videos to this resolution during training",
)
parser.add_argument(
"--eval_max_seq_len",
type=int,
default=1000,
help="maximum length of evaluation videos",
)
args = parser.parse_args()
logging.basicConfig(
level=logging.INFO,
format="%(asctime)s %(levelname)-8s [%(filename)s:%(lineno)d] %(message)s",
)
Path(args.ckpt_path).mkdir(exist_ok=True, parents=True)
from pytorch_lightning.strategies import DDPStrategy
Lite(
strategy=DDPStrategy(find_unused_parameters=True),
devices="auto",
accelerator="gpu",
precision=32,
# num_nodes=4,
).run(args)
| co-tracker-main | train.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import torch
dependencies = ["torch", "einops", "timm", "tqdm"]
_COTRACKER_URL = (
"https://dl.fbaipublicfiles.com/cotracker/cotracker_stride_4_wind_8.pth"
)
def _make_cotracker_predictor(*, pretrained: bool = True, **kwargs):
from cotracker.predictor import CoTrackerPredictor
predictor = CoTrackerPredictor(checkpoint=None)
if pretrained:
state_dict = torch.hub.load_state_dict_from_url(
_COTRACKER_URL, map_location="cpu"
)
predictor.model.load_state_dict(state_dict)
return predictor
def cotracker_w8(*, pretrained: bool = True, **kwargs):
"""
CoTracker model with stride 4 and window length 8. (The main model from the paper)
"""
return _make_cotracker_predictor(pretrained=pretrained, **kwargs)
| co-tracker-main | hubconf.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import os
import cv2
import torch
import argparse
import numpy as np
from PIL import Image
from cotracker.utils.visualizer import Visualizer, read_video_from_path
from cotracker.predictor import CoTrackerPredictor
DEFAULT_DEVICE = ('cuda' if torch.cuda.is_available() else
'mps' if torch.backends.mps.is_available() else
'cpu')
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument(
"--video_path",
default="./assets/apple.mp4",
help="path to a video",
)
parser.add_argument(
"--mask_path",
default="./assets/apple_mask.png",
help="path to a segmentation mask",
)
parser.add_argument(
"--checkpoint",
default="./checkpoints/cotracker_stride_4_wind_8.pth",
help="cotracker model",
)
parser.add_argument("--grid_size", type=int, default=0, help="Regular grid size")
parser.add_argument(
"--grid_query_frame",
type=int,
default=0,
help="Compute dense and grid tracks starting from this frame ",
)
parser.add_argument(
"--backward_tracking",
action="store_true",
help="Compute tracks in both directions, not only forward",
)
args = parser.parse_args()
# load the input video frame by frame
video = read_video_from_path(args.video_path)
video = torch.from_numpy(video).permute(0, 3, 1, 2)[None].float()
segm_mask = np.array(Image.open(os.path.join(args.mask_path)))
segm_mask = torch.from_numpy(segm_mask)[None, None]
model = CoTrackerPredictor(checkpoint=args.checkpoint)
model = model.to(DEFAULT_DEVICE)
video = video.to(DEFAULT_DEVICE)
pred_tracks, pred_visibility = model(
video,
grid_size=args.grid_size,
grid_query_frame=args.grid_query_frame,
backward_tracking=args.backward_tracking,
# segm_mask=segm_mask
)
print("computed")
# save a video with predicted tracks
seq_name = args.video_path.split("/")[-1]
vis = Visualizer(save_dir="./saved_videos", pad_value=120, linewidth=3)
vis.visualize(video, pred_tracks, pred_visibility, query_frame=args.grid_query_frame)
| co-tracker-main | demo.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import torch
import torch.nn.functional as F
from tqdm import tqdm
from cotracker.models.core.cotracker.cotracker import get_points_on_a_grid
from cotracker.models.core.model_utils import smart_cat
from cotracker.models.build_cotracker import (
build_cotracker,
)
class CoTrackerPredictor(torch.nn.Module):
def __init__(
self, checkpoint="cotracker/checkpoints/cotracker_stride_4_wind_8.pth"
):
super().__init__()
self.interp_shape = (384, 512)
self.support_grid_size = 6
model = build_cotracker(checkpoint)
self.model = model
self.model.eval()
@torch.no_grad()
def forward(
self,
video, # (1, T, 3, H, W)
# input prompt types:
# - None. Dense tracks are computed in this case. You can adjust *query_frame* to compute tracks starting from a specific frame.
# *backward_tracking=True* will compute tracks in both directions.
# - queries. Queried points of shape (1, N, 3) in format (t, x, y) for frame index and pixel coordinates.
# - grid_size. Grid of N*N points from the first frame. if segm_mask is provided, then computed only for the mask.
# You can adjust *query_frame* and *backward_tracking* for the regular grid in the same way as for dense tracks.
queries: torch.Tensor = None,
segm_mask: torch.Tensor = None, # Segmentation mask of shape (B, 1, H, W)
grid_size: int = 0,
grid_query_frame: int = 0, # only for dense and regular grid tracks
backward_tracking: bool = False,
):
if queries is None and grid_size == 0:
tracks, visibilities = self._compute_dense_tracks(
video,
grid_query_frame=grid_query_frame,
backward_tracking=backward_tracking,
)
else:
tracks, visibilities = self._compute_sparse_tracks(
video,
queries,
segm_mask,
grid_size,
add_support_grid=(grid_size == 0 or segm_mask is not None),
grid_query_frame=grid_query_frame,
backward_tracking=backward_tracking,
)
return tracks, visibilities
def _compute_dense_tracks(
self, video, grid_query_frame, grid_size=30, backward_tracking=False
):
*_, H, W = video.shape
grid_step = W // grid_size
grid_width = W // grid_step
grid_height = H // grid_step
tracks = visibilities = None
grid_pts = torch.zeros((1, grid_width * grid_height, 3)).to(video.device)
grid_pts[0, :, 0] = grid_query_frame
for offset in tqdm(range(grid_step * grid_step)):
ox = offset % grid_step
oy = offset // grid_step
grid_pts[0, :, 1] = (
torch.arange(grid_width).repeat(grid_height) * grid_step + ox
)
grid_pts[0, :, 2] = (
torch.arange(grid_height).repeat_interleave(grid_width) * grid_step + oy
)
tracks_step, visibilities_step = self._compute_sparse_tracks(
video=video,
queries=grid_pts,
backward_tracking=backward_tracking,
)
tracks = smart_cat(tracks, tracks_step, dim=2)
visibilities = smart_cat(visibilities, visibilities_step, dim=2)
return tracks, visibilities
def _compute_sparse_tracks(
self,
video,
queries,
segm_mask=None,
grid_size=0,
add_support_grid=False,
grid_query_frame=0,
backward_tracking=False,
):
B, T, C, H, W = video.shape
assert B == 1
video = video.reshape(B * T, C, H, W)
video = F.interpolate(video, tuple(self.interp_shape), mode="bilinear")
video = video.reshape(B, T, 3, self.interp_shape[0], self.interp_shape[1])
if queries is not None:
queries = queries.clone()
B, N, D = queries.shape
assert D == 3
queries[:, :, 1] *= self.interp_shape[1] / W
queries[:, :, 2] *= self.interp_shape[0] / H
elif grid_size > 0:
grid_pts = get_points_on_a_grid(grid_size, self.interp_shape, device=video.device)
if segm_mask is not None:
segm_mask = F.interpolate(
segm_mask, tuple(self.interp_shape), mode="nearest"
)
point_mask = segm_mask[0, 0][
(grid_pts[0, :, 1]).round().long().cpu(),
(grid_pts[0, :, 0]).round().long().cpu(),
].bool()
grid_pts = grid_pts[:, point_mask]
queries = torch.cat(
[torch.ones_like(grid_pts[:, :, :1]) * grid_query_frame, grid_pts],
dim=2,
)
if add_support_grid:
grid_pts = get_points_on_a_grid(self.support_grid_size, self.interp_shape, device=video.device)
grid_pts = torch.cat(
[torch.zeros_like(grid_pts[:, :, :1]), grid_pts], dim=2
)
queries = torch.cat([queries, grid_pts], dim=1)
tracks, __, visibilities, __ = self.model(rgbs=video, queries=queries, iters=6)
if backward_tracking:
tracks, visibilities = self._compute_backward_tracks(
video, queries, tracks, visibilities
)
if add_support_grid:
queries[:, -self.support_grid_size ** 2 :, 0] = T - 1
if add_support_grid:
tracks = tracks[:, :, : -self.support_grid_size ** 2]
visibilities = visibilities[:, :, : -self.support_grid_size ** 2]
thr = 0.9
visibilities = visibilities > thr
# correct query-point predictions
# see https://github.com/facebookresearch/co-tracker/issues/28
# TODO: batchify
for i in range(len(queries)):
queries_t = queries[i, :tracks.size(2), 0].to(torch.int64)
arange = torch.arange(0, len(queries_t))
# overwrite the predictions with the query points
tracks[i, queries_t, arange] = queries[i, :tracks.size(2), 1:]
# correct visibilities, the query points should be visible
visibilities[i, queries_t, arange] = True
tracks[:, :, :, 0] *= W / float(self.interp_shape[1])
tracks[:, :, :, 1] *= H / float(self.interp_shape[0])
return tracks, visibilities
def _compute_backward_tracks(self, video, queries, tracks, visibilities):
inv_video = video.flip(1).clone()
inv_queries = queries.clone()
inv_queries[:, :, 0] = inv_video.shape[1] - inv_queries[:, :, 0] - 1
inv_tracks, __, inv_visibilities, __ = self.model(
rgbs=inv_video, queries=inv_queries, iters=6
)
inv_tracks = inv_tracks.flip(1)
inv_visibilities = inv_visibilities.flip(1)
mask = tracks == 0
tracks[mask] = inv_tracks[mask]
visibilities[mask[:, :, :, 0]] = inv_visibilities[mask[:, :, :, 0]]
return tracks, visibilities
| co-tracker-main | cotracker/predictor.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
| co-tracker-main | cotracker/__init__.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import os
import io
import glob
import torch
import pickle
import numpy as np
import mediapy as media
from PIL import Image
from typing import Mapping, Tuple, Union
from cotracker.datasets.utils import CoTrackerData
DatasetElement = Mapping[str, Mapping[str, Union[np.ndarray, str]]]
def resize_video(video: np.ndarray, output_size: Tuple[int, int]) -> np.ndarray:
"""Resize a video to output_size."""
# If you have a GPU, consider replacing this with a GPU-enabled resize op,
# such as a jitted jax.image.resize. It will make things faster.
return media.resize_video(video, output_size)
def sample_queries_first(
target_occluded: np.ndarray,
target_points: np.ndarray,
frames: np.ndarray,
) -> Mapping[str, np.ndarray]:
"""Package a set of frames and tracks for use in TAPNet evaluations.
Given a set of frames and tracks with no query points, use the first
visible point in each track as the query.
Args:
target_occluded: Boolean occlusion flag, of shape [n_tracks, n_frames],
where True indicates occluded.
target_points: Position, of shape [n_tracks, n_frames, 2], where each point
is [x,y] scaled between 0 and 1.
frames: Video tensor, of shape [n_frames, height, width, 3]. Scaled between
-1 and 1.
Returns:
A dict with the keys:
video: Video tensor of shape [1, n_frames, height, width, 3]
query_points: Query points of shape [1, n_queries, 3] where
each point is [t, y, x] scaled to the range [-1, 1]
target_points: Target points of shape [1, n_queries, n_frames, 2] where
each point is [x, y] scaled to the range [-1, 1]
"""
valid = np.sum(~target_occluded, axis=1) > 0
target_points = target_points[valid, :]
target_occluded = target_occluded[valid, :]
query_points = []
for i in range(target_points.shape[0]):
index = np.where(target_occluded[i] == 0)[0][0]
x, y = target_points[i, index, 0], target_points[i, index, 1]
query_points.append(np.array([index, y, x])) # [t, y, x]
query_points = np.stack(query_points, axis=0)
return {
"video": frames[np.newaxis, ...],
"query_points": query_points[np.newaxis, ...],
"target_points": target_points[np.newaxis, ...],
"occluded": target_occluded[np.newaxis, ...],
}
def sample_queries_strided(
target_occluded: np.ndarray,
target_points: np.ndarray,
frames: np.ndarray,
query_stride: int = 5,
) -> Mapping[str, np.ndarray]:
"""Package a set of frames and tracks for use in TAPNet evaluations.
Given a set of frames and tracks with no query points, sample queries
strided every query_stride frames, ignoring points that are not visible
at the selected frames.
Args:
target_occluded: Boolean occlusion flag, of shape [n_tracks, n_frames],
where True indicates occluded.
target_points: Position, of shape [n_tracks, n_frames, 2], where each point
is [x,y] scaled between 0 and 1.
frames: Video tensor, of shape [n_frames, height, width, 3]. Scaled between
-1 and 1.
query_stride: When sampling query points, search for un-occluded points
every query_stride frames and convert each one into a query.
Returns:
A dict with the keys:
video: Video tensor of shape [1, n_frames, height, width, 3]. The video
has floats scaled to the range [-1, 1].
query_points: Query points of shape [1, n_queries, 3] where
each point is [t, y, x] scaled to the range [-1, 1].
target_points: Target points of shape [1, n_queries, n_frames, 2] where
each point is [x, y] scaled to the range [-1, 1].
trackgroup: Index of the original track that each query point was
sampled from. This is useful for visualization.
"""
tracks = []
occs = []
queries = []
trackgroups = []
total = 0
trackgroup = np.arange(target_occluded.shape[0])
for i in range(0, target_occluded.shape[1], query_stride):
mask = target_occluded[:, i] == 0
query = np.stack(
[
i * np.ones(target_occluded.shape[0:1]),
target_points[:, i, 1],
target_points[:, i, 0],
],
axis=-1,
)
queries.append(query[mask])
tracks.append(target_points[mask])
occs.append(target_occluded[mask])
trackgroups.append(trackgroup[mask])
total += np.array(np.sum(target_occluded[:, i] == 0))
return {
"video": frames[np.newaxis, ...],
"query_points": np.concatenate(queries, axis=0)[np.newaxis, ...],
"target_points": np.concatenate(tracks, axis=0)[np.newaxis, ...],
"occluded": np.concatenate(occs, axis=0)[np.newaxis, ...],
"trackgroup": np.concatenate(trackgroups, axis=0)[np.newaxis, ...],
}
class TapVidDataset(torch.utils.data.Dataset):
def __init__(
self,
data_root,
dataset_type="davis",
resize_to_256=True,
queried_first=True,
):
self.dataset_type = dataset_type
self.resize_to_256 = resize_to_256
self.queried_first = queried_first
if self.dataset_type == "kinetics":
all_paths = glob.glob(os.path.join(data_root, "*_of_0010.pkl"))
points_dataset = []
for pickle_path in all_paths:
with open(pickle_path, "rb") as f:
data = pickle.load(f)
points_dataset = points_dataset + data
self.points_dataset = points_dataset
else:
with open(data_root, "rb") as f:
self.points_dataset = pickle.load(f)
if self.dataset_type == "davis":
self.video_names = list(self.points_dataset.keys())
print("found %d unique videos in %s" % (len(self.points_dataset), data_root))
def __getitem__(self, index):
if self.dataset_type == "davis":
video_name = self.video_names[index]
else:
video_name = index
video = self.points_dataset[video_name]
frames = video["video"]
if isinstance(frames[0], bytes):
# TAP-Vid is stored and JPEG bytes rather than `np.ndarray`s.
def decode(frame):
byteio = io.BytesIO(frame)
img = Image.open(byteio)
return np.array(img)
frames = np.array([decode(frame) for frame in frames])
target_points = self.points_dataset[video_name]["points"]
if self.resize_to_256:
frames = resize_video(frames, [256, 256])
target_points *= np.array([256, 256])
else:
target_points *= np.array([frames.shape[2], frames.shape[1]])
T, H, W, C = frames.shape
N, T, D = target_points.shape
target_occ = self.points_dataset[video_name]["occluded"]
if self.queried_first:
converted = sample_queries_first(target_occ, target_points, frames)
else:
converted = sample_queries_strided(target_occ, target_points, frames)
assert converted["target_points"].shape[1] == converted["query_points"].shape[1]
trajs = (
torch.from_numpy(converted["target_points"])[0].permute(1, 0, 2).float()
) # T, N, D
rgbs = torch.from_numpy(frames).permute(0, 3, 1, 2).float()
segs = torch.ones(T, 1, H, W).float()
visibles = torch.logical_not(torch.from_numpy(converted["occluded"]))[
0
].permute(
1, 0
) # T, N
query_points = torch.from_numpy(converted["query_points"])[0] # T, N
return CoTrackerData(
rgbs,
segs,
trajs,
visibles,
seq_name=str(video_name),
query_points=query_points,
)
def __len__(self):
return len(self.points_dataset)
| co-tracker-main | cotracker/datasets/tap_vid_datasets.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import os
import torch
import imageio
import numpy as np
from cotracker.datasets.utils import CoTrackerData
from torchvision.transforms import ColorJitter, GaussianBlur
from PIL import Image
import cv2
class CoTrackerDataset(torch.utils.data.Dataset):
def __init__(
self,
data_root,
crop_size=(384, 512),
seq_len=24,
traj_per_sample=768,
sample_vis_1st_frame=False,
use_augs=False,
):
super(CoTrackerDataset, self).__init__()
np.random.seed(0)
torch.manual_seed(0)
self.data_root = data_root
self.seq_len = seq_len
self.traj_per_sample = traj_per_sample
self.sample_vis_1st_frame = sample_vis_1st_frame
self.use_augs = use_augs
self.crop_size = crop_size
# photometric augmentation
self.photo_aug = ColorJitter(
brightness=0.2, contrast=0.2, saturation=0.2, hue=0.25 / 3.14
)
self.blur_aug = GaussianBlur(11, sigma=(0.1, 2.0))
self.blur_aug_prob = 0.25
self.color_aug_prob = 0.25
# occlusion augmentation
self.eraser_aug_prob = 0.5
self.eraser_bounds = [2, 100]
self.eraser_max = 10
# occlusion augmentation
self.replace_aug_prob = 0.5
self.replace_bounds = [2, 100]
self.replace_max = 10
# spatial augmentations
self.pad_bounds = [0, 100]
self.crop_size = crop_size
self.resize_lim = [0.25, 2.0] # sample resizes from here
self.resize_delta = 0.2
self.max_crop_offset = 50
self.do_flip = True
self.h_flip_prob = 0.5
self.v_flip_prob = 0.5
def getitem_helper(self, index):
return NotImplementedError
def __getitem__(self, index):
gotit = False
sample, gotit = self.getitem_helper(index)
if not gotit:
print("warning: sampling failed")
# fake sample, so we can still collate
sample = CoTrackerData(
video=torch.zeros(
(self.seq_len, 3, self.crop_size[0], self.crop_size[1])
),
segmentation=torch.zeros(
(self.seq_len, 1, self.crop_size[0], self.crop_size[1])
),
trajectory=torch.zeros((self.seq_len, self.traj_per_sample, 2)),
visibility=torch.zeros((self.seq_len, self.traj_per_sample)),
valid=torch.zeros((self.seq_len, self.traj_per_sample)),
)
return sample, gotit
def add_photometric_augs(self, rgbs, trajs, visibles, eraser=True, replace=True):
T, N, _ = trajs.shape
S = len(rgbs)
H, W = rgbs[0].shape[:2]
assert S == T
if eraser:
############ eraser transform (per image after the first) ############
rgbs = [rgb.astype(np.float32) for rgb in rgbs]
for i in range(1, S):
if np.random.rand() < self.eraser_aug_prob:
for _ in range(
np.random.randint(1, self.eraser_max + 1)
): # number of times to occlude
xc = np.random.randint(0, W)
yc = np.random.randint(0, H)
dx = np.random.randint(
self.eraser_bounds[0], self.eraser_bounds[1]
)
dy = np.random.randint(
self.eraser_bounds[0], self.eraser_bounds[1]
)
x0 = np.clip(xc - dx / 2, 0, W - 1).round().astype(np.int32)
x1 = np.clip(xc + dx / 2, 0, W - 1).round().astype(np.int32)
y0 = np.clip(yc - dy / 2, 0, H - 1).round().astype(np.int32)
y1 = np.clip(yc + dy / 2, 0, H - 1).round().astype(np.int32)
mean_color = np.mean(
rgbs[i][y0:y1, x0:x1, :].reshape(-1, 3), axis=0
)
rgbs[i][y0:y1, x0:x1, :] = mean_color
occ_inds = np.logical_and(
np.logical_and(trajs[i, :, 0] >= x0, trajs[i, :, 0] < x1),
np.logical_and(trajs[i, :, 1] >= y0, trajs[i, :, 1] < y1),
)
visibles[i, occ_inds] = 0
rgbs = [rgb.astype(np.uint8) for rgb in rgbs]
if replace:
rgbs_alt = [
np.array(self.photo_aug(Image.fromarray(rgb)), dtype=np.uint8)
for rgb in rgbs
]
rgbs_alt = [
np.array(self.photo_aug(Image.fromarray(rgb)), dtype=np.uint8)
for rgb in rgbs_alt
]
############ replace transform (per image after the first) ############
rgbs = [rgb.astype(np.float32) for rgb in rgbs]
rgbs_alt = [rgb.astype(np.float32) for rgb in rgbs_alt]
for i in range(1, S):
if np.random.rand() < self.replace_aug_prob:
for _ in range(
np.random.randint(1, self.replace_max + 1)
): # number of times to occlude
xc = np.random.randint(0, W)
yc = np.random.randint(0, H)
dx = np.random.randint(
self.replace_bounds[0], self.replace_bounds[1]
)
dy = np.random.randint(
self.replace_bounds[0], self.replace_bounds[1]
)
x0 = np.clip(xc - dx / 2, 0, W - 1).round().astype(np.int32)
x1 = np.clip(xc + dx / 2, 0, W - 1).round().astype(np.int32)
y0 = np.clip(yc - dy / 2, 0, H - 1).round().astype(np.int32)
y1 = np.clip(yc + dy / 2, 0, H - 1).round().astype(np.int32)
wid = x1 - x0
hei = y1 - y0
y00 = np.random.randint(0, H - hei)
x00 = np.random.randint(0, W - wid)
fr = np.random.randint(0, S)
rep = rgbs_alt[fr][y00 : y00 + hei, x00 : x00 + wid, :]
rgbs[i][y0:y1, x0:x1, :] = rep
occ_inds = np.logical_and(
np.logical_and(trajs[i, :, 0] >= x0, trajs[i, :, 0] < x1),
np.logical_and(trajs[i, :, 1] >= y0, trajs[i, :, 1] < y1),
)
visibles[i, occ_inds] = 0
rgbs = [rgb.astype(np.uint8) for rgb in rgbs]
############ photometric augmentation ############
if np.random.rand() < self.color_aug_prob:
# random per-frame amount of aug
rgbs = [
np.array(self.photo_aug(Image.fromarray(rgb)), dtype=np.uint8)
for rgb in rgbs
]
if np.random.rand() < self.blur_aug_prob:
# random per-frame amount of blur
rgbs = [
np.array(self.blur_aug(Image.fromarray(rgb)), dtype=np.uint8)
for rgb in rgbs
]
return rgbs, trajs, visibles
def add_spatial_augs(self, rgbs, trajs, visibles):
T, N, __ = trajs.shape
S = len(rgbs)
H, W = rgbs[0].shape[:2]
assert S == T
rgbs = [rgb.astype(np.float32) for rgb in rgbs]
############ spatial transform ############
# padding
pad_x0 = np.random.randint(self.pad_bounds[0], self.pad_bounds[1])
pad_x1 = np.random.randint(self.pad_bounds[0], self.pad_bounds[1])
pad_y0 = np.random.randint(self.pad_bounds[0], self.pad_bounds[1])
pad_y1 = np.random.randint(self.pad_bounds[0], self.pad_bounds[1])
rgbs = [
np.pad(rgb, ((pad_y0, pad_y1), (pad_x0, pad_x1), (0, 0))) for rgb in rgbs
]
trajs[:, :, 0] += pad_x0
trajs[:, :, 1] += pad_y0
H, W = rgbs[0].shape[:2]
# scaling + stretching
scale = np.random.uniform(self.resize_lim[0], self.resize_lim[1])
scale_x = scale
scale_y = scale
H_new = H
W_new = W
scale_delta_x = 0.0
scale_delta_y = 0.0
rgbs_scaled = []
for s in range(S):
if s == 1:
scale_delta_x = np.random.uniform(-self.resize_delta, self.resize_delta)
scale_delta_y = np.random.uniform(-self.resize_delta, self.resize_delta)
elif s > 1:
scale_delta_x = (
scale_delta_x * 0.8
+ np.random.uniform(-self.resize_delta, self.resize_delta) * 0.2
)
scale_delta_y = (
scale_delta_y * 0.8
+ np.random.uniform(-self.resize_delta, self.resize_delta) * 0.2
)
scale_x = scale_x + scale_delta_x
scale_y = scale_y + scale_delta_y
# bring h/w closer
scale_xy = (scale_x + scale_y) * 0.5
scale_x = scale_x * 0.5 + scale_xy * 0.5
scale_y = scale_y * 0.5 + scale_xy * 0.5
# don't get too crazy
scale_x = np.clip(scale_x, 0.2, 2.0)
scale_y = np.clip(scale_y, 0.2, 2.0)
H_new = int(H * scale_y)
W_new = int(W * scale_x)
# make it at least slightly bigger than the crop area,
# so that the random cropping can add diversity
H_new = np.clip(H_new, self.crop_size[0] + 10, None)
W_new = np.clip(W_new, self.crop_size[1] + 10, None)
# recompute scale in case we clipped
scale_x = W_new / float(W)
scale_y = H_new / float(H)
rgbs_scaled.append(
cv2.resize(rgbs[s], (W_new, H_new), interpolation=cv2.INTER_LINEAR)
)
trajs[s, :, 0] *= scale_x
trajs[s, :, 1] *= scale_y
rgbs = rgbs_scaled
ok_inds = visibles[0, :] > 0
vis_trajs = trajs[:, ok_inds] # S,?,2
if vis_trajs.shape[1] > 0:
mid_x = np.mean(vis_trajs[0, :, 0])
mid_y = np.mean(vis_trajs[0, :, 1])
else:
mid_y = self.crop_size[0]
mid_x = self.crop_size[1]
x0 = int(mid_x - self.crop_size[1] // 2)
y0 = int(mid_y - self.crop_size[0] // 2)
offset_x = 0
offset_y = 0
for s in range(S):
# on each frame, shift a bit more
if s == 1:
offset_x = np.random.randint(
-self.max_crop_offset, self.max_crop_offset
)
offset_y = np.random.randint(
-self.max_crop_offset, self.max_crop_offset
)
elif s > 1:
offset_x = int(
offset_x * 0.8
+ np.random.randint(-self.max_crop_offset, self.max_crop_offset + 1)
* 0.2
)
offset_y = int(
offset_y * 0.8
+ np.random.randint(-self.max_crop_offset, self.max_crop_offset + 1)
* 0.2
)
x0 = x0 + offset_x
y0 = y0 + offset_y
H_new, W_new = rgbs[s].shape[:2]
if H_new == self.crop_size[0]:
y0 = 0
else:
y0 = min(max(0, y0), H_new - self.crop_size[0] - 1)
if W_new == self.crop_size[1]:
x0 = 0
else:
x0 = min(max(0, x0), W_new - self.crop_size[1] - 1)
rgbs[s] = rgbs[s][y0 : y0 + self.crop_size[0], x0 : x0 + self.crop_size[1]]
trajs[s, :, 0] -= x0
trajs[s, :, 1] -= y0
H_new = self.crop_size[0]
W_new = self.crop_size[1]
# flip
h_flipped = False
v_flipped = False
if self.do_flip:
# h flip
if np.random.rand() < self.h_flip_prob:
h_flipped = True
rgbs = [rgb[:, ::-1] for rgb in rgbs]
# v flip
if np.random.rand() < self.v_flip_prob:
v_flipped = True
rgbs = [rgb[::-1] for rgb in rgbs]
if h_flipped:
trajs[:, :, 0] = W_new - trajs[:, :, 0]
if v_flipped:
trajs[:, :, 1] = H_new - trajs[:, :, 1]
return rgbs, trajs
def crop(self, rgbs, trajs):
T, N, _ = trajs.shape
S = len(rgbs)
H, W = rgbs[0].shape[:2]
assert S == T
############ spatial transform ############
H_new = H
W_new = W
# simple random crop
y0 = (
0
if self.crop_size[0] >= H_new
else np.random.randint(0, H_new - self.crop_size[0])
)
x0 = (
0
if self.crop_size[1] >= W_new
else np.random.randint(0, W_new - self.crop_size[1])
)
rgbs = [
rgb[y0 : y0 + self.crop_size[0], x0 : x0 + self.crop_size[1]]
for rgb in rgbs
]
trajs[:, :, 0] -= x0
trajs[:, :, 1] -= y0
return rgbs, trajs
class KubricMovifDataset(CoTrackerDataset):
def __init__(
self,
data_root,
crop_size=(384, 512),
seq_len=24,
traj_per_sample=768,
sample_vis_1st_frame=False,
use_augs=False,
):
super(KubricMovifDataset, self).__init__(
data_root=data_root,
crop_size=crop_size,
seq_len=seq_len,
traj_per_sample=traj_per_sample,
sample_vis_1st_frame=sample_vis_1st_frame,
use_augs=use_augs,
)
self.pad_bounds = [0, 25]
self.resize_lim = [0.75, 1.25] # sample resizes from here
self.resize_delta = 0.05
self.max_crop_offset = 15
self.seq_names = [
fname
for fname in os.listdir(data_root)
if os.path.isdir(os.path.join(data_root, fname))
]
print("found %d unique videos in %s" % (len(self.seq_names), self.data_root))
def getitem_helper(self, index):
gotit = True
seq_name = self.seq_names[index]
npy_path = os.path.join(self.data_root, seq_name, seq_name + ".npy")
rgb_path = os.path.join(self.data_root, seq_name, "frames")
img_paths = sorted(os.listdir(rgb_path))
rgbs = []
for i, img_path in enumerate(img_paths):
rgbs.append(imageio.v2.imread(os.path.join(rgb_path, img_path)))
rgbs = np.stack(rgbs)
annot_dict = np.load(npy_path, allow_pickle=True).item()
traj_2d = annot_dict["coords"]
visibility = annot_dict["visibility"]
# random crop
assert self.seq_len <= len(rgbs)
if self.seq_len < len(rgbs):
start_ind = np.random.choice(len(rgbs) - self.seq_len, 1)[0]
rgbs = rgbs[start_ind : start_ind + self.seq_len]
traj_2d = traj_2d[:, start_ind : start_ind + self.seq_len]
visibility = visibility[:, start_ind : start_ind + self.seq_len]
traj_2d = np.transpose(traj_2d, (1, 0, 2))
visibility = np.transpose(np.logical_not(visibility), (1, 0))
if self.use_augs:
rgbs, traj_2d, visibility = self.add_photometric_augs(
rgbs, traj_2d, visibility
)
rgbs, traj_2d = self.add_spatial_augs(rgbs, traj_2d, visibility)
else:
rgbs, traj_2d = self.crop(rgbs, traj_2d)
visibility[traj_2d[:, :, 0] > self.crop_size[1] - 1] = False
visibility[traj_2d[:, :, 0] < 0] = False
visibility[traj_2d[:, :, 1] > self.crop_size[0] - 1] = False
visibility[traj_2d[:, :, 1] < 0] = False
visibility = torch.from_numpy(visibility)
traj_2d = torch.from_numpy(traj_2d)
visibile_pts_first_frame_inds = (visibility[0]).nonzero(as_tuple=False)[:, 0]
if self.sample_vis_1st_frame:
visibile_pts_inds = visibile_pts_first_frame_inds
else:
visibile_pts_mid_frame_inds = (visibility[self.seq_len // 2]).nonzero(
as_tuple=False
)[:, 0]
visibile_pts_inds = torch.cat(
(visibile_pts_first_frame_inds, visibile_pts_mid_frame_inds), dim=0
)
point_inds = torch.randperm(len(visibile_pts_inds))[: self.traj_per_sample]
if len(point_inds) < self.traj_per_sample:
gotit = False
visible_inds_sampled = visibile_pts_inds[point_inds]
trajs = traj_2d[:, visible_inds_sampled].float()
visibles = visibility[:, visible_inds_sampled]
valids = torch.ones((self.seq_len, self.traj_per_sample))
rgbs = torch.from_numpy(np.stack(rgbs)).permute(0, 3, 1, 2).float()
segs = torch.ones((self.seq_len, 1, self.crop_size[0], self.crop_size[1]))
sample = CoTrackerData(
video=rgbs,
segmentation=segs,
trajectory=trajs,
visibility=visibles,
valid=valids,
seq_name=seq_name,
)
return sample, gotit
def __len__(self):
return len(self.seq_names)
| co-tracker-main | cotracker/datasets/kubric_movif_dataset.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import torch
import numpy as np
import os
import json
import imageio
import cv2
from enum import Enum
from cotracker.datasets.utils import CoTrackerData, resize_sample
IGNORE_ANIMALS = [
# "bear.json",
# "camel.json",
"cat_jump.json"
# "cows.json",
# "dog.json",
# "dog-agility.json",
# "horsejump-high.json",
# "horsejump-low.json",
# "impala0.json",
# "rs_dog.json"
"tiger.json"
]
class SMALJointCatalog(Enum):
# body_0 = 0
# body_1 = 1
# body_2 = 2
# body_3 = 3
# body_4 = 4
# body_5 = 5
# body_6 = 6
# upper_right_0 = 7
upper_right_1 = 8
upper_right_2 = 9
upper_right_3 = 10
# upper_left_0 = 11
upper_left_1 = 12
upper_left_2 = 13
upper_left_3 = 14
neck_lower = 15
# neck_upper = 16
# lower_right_0 = 17
lower_right_1 = 18
lower_right_2 = 19
lower_right_3 = 20
# lower_left_0 = 21
lower_left_1 = 22
lower_left_2 = 23
lower_left_3 = 24
tail_0 = 25
# tail_1 = 26
# tail_2 = 27
tail_3 = 28
# tail_4 = 29
# tail_5 = 30
tail_6 = 31
jaw = 32
nose = 33 # ADDED JOINT FOR VERTEX 1863
# chin = 34 # ADDED JOINT FOR VERTEX 26
right_ear = 35 # ADDED JOINT FOR VERTEX 149
left_ear = 36 # ADDED JOINT FOR VERTEX 2124
class SMALJointInfo:
def __init__(self):
# These are the
self.annotated_classes = np.array(
[
8,
9,
10, # upper_right
12,
13,
14, # upper_left
15, # neck
18,
19,
20, # lower_right
22,
23,
24, # lower_left
25,
28,
31, # tail
32,
33, # head
35, # right_ear
36,
]
) # left_ear
self.annotated_markers = np.array(
[
cv2.MARKER_CROSS,
cv2.MARKER_STAR,
cv2.MARKER_TRIANGLE_DOWN,
cv2.MARKER_CROSS,
cv2.MARKER_STAR,
cv2.MARKER_TRIANGLE_DOWN,
cv2.MARKER_CROSS,
cv2.MARKER_CROSS,
cv2.MARKER_STAR,
cv2.MARKER_TRIANGLE_DOWN,
cv2.MARKER_CROSS,
cv2.MARKER_STAR,
cv2.MARKER_TRIANGLE_DOWN,
cv2.MARKER_CROSS,
cv2.MARKER_STAR,
cv2.MARKER_TRIANGLE_DOWN,
cv2.MARKER_CROSS,
cv2.MARKER_STAR,
cv2.MARKER_CROSS,
cv2.MARKER_CROSS,
]
)
self.joint_regions = np.array(
[
0,
0,
0,
0,
0,
0,
0,
1,
1,
1,
1,
2,
2,
2,
2,
3,
3,
4,
4,
4,
4,
5,
5,
5,
5,
6,
6,
6,
6,
6,
6,
6,
7,
7,
7,
8,
9,
]
)
self.annotated_joint_region = self.joint_regions[self.annotated_classes]
self.region_colors = np.array(
[
[250, 190, 190], # body, light pink
[60, 180, 75], # upper_right, green
[230, 25, 75], # upper_left, red
[128, 0, 0], # neck, maroon
[0, 130, 200], # lower_right, blue
[255, 255, 25], # lower_left, yellow
[240, 50, 230], # tail, majenta
[245, 130, 48], # jaw / nose / chin, orange
[29, 98, 115], # right_ear, turquoise
[255, 153, 204],
]
) # left_ear, pink
self.joint_colors = np.array(self.region_colors)[self.annotated_joint_region]
class BADJAData:
def __init__(self, data_root, complete=False):
annotations_path = os.path.join(data_root, "joint_annotations")
self.animal_dict = {}
self.animal_count = 0
self.smal_joint_info = SMALJointInfo()
for __, animal_json in enumerate(sorted(os.listdir(annotations_path))):
if animal_json not in IGNORE_ANIMALS:
json_path = os.path.join(annotations_path, animal_json)
with open(json_path) as json_data:
animal_joint_data = json.load(json_data)
filenames = []
segnames = []
joints = []
visible = []
first_path = animal_joint_data[0]["segmentation_path"]
last_path = animal_joint_data[-1]["segmentation_path"]
first_frame = first_path.split("/")[-1]
last_frame = last_path.split("/")[-1]
if not "extra_videos" in first_path:
animal = first_path.split("/")[-2]
first_frame_int = int(first_frame.split(".")[0])
last_frame_int = int(last_frame.split(".")[0])
for fr in range(first_frame_int, last_frame_int + 1):
ref_file_name = os.path.join(
data_root,
"DAVIS/JPEGImages/Full-Resolution/%s/%05d.jpg"
% (animal, fr),
)
ref_seg_name = os.path.join(
data_root,
"DAVIS/Annotations/Full-Resolution/%s/%05d.png"
% (animal, fr),
)
foundit = False
for ind, image_annotation in enumerate(animal_joint_data):
file_name = os.path.join(
data_root, image_annotation["image_path"]
)
seg_name = os.path.join(
data_root, image_annotation["segmentation_path"]
)
if file_name == ref_file_name:
foundit = True
label_ind = ind
if foundit:
image_annotation = animal_joint_data[label_ind]
file_name = os.path.join(
data_root, image_annotation["image_path"]
)
seg_name = os.path.join(
data_root, image_annotation["segmentation_path"]
)
joint = np.array(image_annotation["joints"])
vis = np.array(image_annotation["visibility"])
else:
file_name = ref_file_name
seg_name = ref_seg_name
joint = None
vis = None
filenames.append(file_name)
segnames.append(seg_name)
joints.append(joint)
visible.append(vis)
if len(filenames):
self.animal_dict[self.animal_count] = (
filenames,
segnames,
joints,
visible,
)
self.animal_count += 1
print("Loaded BADJA dataset")
def get_loader(self):
for __ in range(int(1e6)):
animal_id = np.random.choice(len(self.animal_dict.keys()))
filenames, segnames, joints, visible = self.animal_dict[animal_id]
image_id = np.random.randint(0, len(filenames))
seg_file = segnames[image_id]
image_file = filenames[image_id]
joints = joints[image_id].copy()
joints = joints[self.smal_joint_info.annotated_classes]
visible = visible[image_id][self.smal_joint_info.annotated_classes]
rgb_img = imageio.imread(image_file) # , mode='RGB')
sil_img = imageio.imread(seg_file) # , mode='RGB')
rgb_h, rgb_w, _ = rgb_img.shape
sil_img = cv2.resize(sil_img, (rgb_w, rgb_h), cv2.INTER_NEAREST)
yield rgb_img, sil_img, joints, visible, image_file
def get_video(self, animal_id):
filenames, segnames, joint, visible = self.animal_dict[animal_id]
rgbs = []
segs = []
joints = []
visibles = []
for s in range(len(filenames)):
image_file = filenames[s]
rgb_img = imageio.imread(image_file) # , mode='RGB')
rgb_h, rgb_w, _ = rgb_img.shape
seg_file = segnames[s]
sil_img = imageio.imread(seg_file) # , mode='RGB')
sil_img = cv2.resize(sil_img, (rgb_w, rgb_h), cv2.INTER_NEAREST)
jo = joint[s]
if jo is not None:
joi = joint[s].copy()
joi = joi[self.smal_joint_info.annotated_classes]
vis = visible[s][self.smal_joint_info.annotated_classes]
else:
joi = None
vis = None
rgbs.append(rgb_img)
segs.append(sil_img)
joints.append(joi)
visibles.append(vis)
return rgbs, segs, joints, visibles, filenames[0]
class BadjaDataset(torch.utils.data.Dataset):
def __init__(
self, data_root, max_seq_len=1000, dataset_resolution=(384, 512)
):
self.data_root = data_root
self.badja_data = BADJAData(data_root)
self.max_seq_len = max_seq_len
self.dataset_resolution = dataset_resolution
print(
"found %d unique videos in %s"
% (self.badja_data.animal_count, self.data_root)
)
def __getitem__(self, index):
rgbs, segs, joints, visibles, filename = self.badja_data.get_video(index)
S = len(rgbs)
H, W, __ = rgbs[0].shape
H, W, __ = segs[0].shape
N, __ = joints[0].shape
# let's eliminate the Nones
# note the first one is guaranteed present
for s in range(1, S):
if joints[s] is None:
joints[s] = np.zeros_like(joints[0])
visibles[s] = np.zeros_like(visibles[0])
# eliminate the mystery dim
segs = [seg[:, :, 0] for seg in segs]
rgbs = np.stack(rgbs, 0)
segs = np.stack(segs, 0)
trajs = np.stack(joints, 0)
visibles = np.stack(visibles, 0)
rgbs = torch.from_numpy(rgbs).reshape(S, H, W, 3).permute(0, 3, 1, 2).float()
segs = torch.from_numpy(segs).reshape(S, 1, H, W).float()
trajs = torch.from_numpy(trajs).reshape(S, N, 2).float()
visibles = torch.from_numpy(visibles).reshape(S, N)
rgbs = rgbs[: self.max_seq_len]
segs = segs[: self.max_seq_len]
trajs = trajs[: self.max_seq_len]
visibles = visibles[: self.max_seq_len]
# apparently the coords are in yx order
trajs = torch.flip(trajs, [2])
if "extra_videos" in filename:
seq_name = filename.split("/")[-3]
else:
seq_name = filename.split("/")[-2]
rgbs, trajs, segs = resize_sample(rgbs, trajs, segs, self.dataset_resolution)
return CoTrackerData(rgbs, segs, trajs, visibles, seq_name=seq_name)
def __len__(self):
return self.badja_data.animal_count
| co-tracker-main | cotracker/datasets/badja_dataset.py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.