Dataset Viewer
python_code
stringlengths 0
187k
| repo_name
stringlengths 8
46
| file_path
stringlengths 6
135
|
---|---|---|
# encoding: utf-8
# the following try/except block will make the custom check compatible with any Agent version
try:
# first, try to import the base class from old versions of the Agent...
from checks import AgentCheck
except ImportError:
# ...if the above failed, the check is running in Agent version 6 or later
from datadog_checks.checks import AgentCheck
# psutil
import psutil
# pynvml
import pynvml
__version__ = '0.1.5'
__author__ = 'Takashi NAGAI, Alejandro Ferrari'
# These values are not exposed through Pynvml, but are reported in the throttle
# reasons. The reason values are defined in
# https://github.com/NVIDIA/gpu-monitoring-tools/blob/master/bindings/go/nvml/nvml.h
# Indicates that the clocks are throttled because the GPU is part of a sync
# boost group, which syncs the clocks to the minimum clocks across the group.
# Corresponds to nvmlClocksThrottleReasonSyncBoost.
GPU_THROTTLE_SYNCBOOST = 0x10
# Indicates that the GPU core or memory temperature is above max. Corresponds to
# nvmlClocksThrottleReasonSwThermalSlowdown.
GPU_THROTTLE_THERMAL_SLOWDOWN_SOFTWARE = 0x20
# Indicates that the clocks are throttled by 50% or more due to very high
# temperature. Corresponds to nvmlClocksThrottleReasonHwThermalSlowdown.
GPU_THROTTLE_THERMAL_SLOWDOWN_HARDWARE = 0x40
# Indicates that the clocks are throttled by 50% or more by the power supply.
# Corresponds to nvmlClocksThrottleReasonHwPowerBrakeSlowdown.
GPU_THROTTLE_POWER_BRAKE_SLOWDOWN_HARDWARE = 0x80
# Indicates that the clocks are throttled by the current display settings.
# Corresponds to nvmlClocksThrottleReasonDisplayClockSetting.
GPU_THROTTLE_DISPLAY_CLOCKS_SETTINGS = 0x100
class NvmlCheck(AgentCheck):
def _dict2list(self, tags={}):
return ["{k}:{v}".format(k=k, v=v) for k, v in list(tags.items())]
def check(self, instance):
pynvml.nvmlInit()
msg_list = []
try:
deviceCount = pynvml.nvmlDeviceGetCount()
except:
deviceCount = 0
# Number of active GPUs
self.gauge('nvml.gpus.number', deviceCount)
for device_id in range(deviceCount):
handle = pynvml.nvmlDeviceGetHandleByIndex(device_id)
name = pynvml.nvmlDeviceGetName(handle)
tags = dict(name="{}-{}".format(name, device_id))
d_tags = self._dict2list(tags)
# temperature info
try:
temp = pynvml.nvmlDeviceGetTemperature(
handle, pynvml.NVML_TEMPERATURE_GPU)
self.gauge('nvml.temp.', temp, tags=d_tags)
except pynvml.NVMLError as err:
msg_list.append('nvmlDeviceGetTemperature:{}'.format(err))
# power info
try:
pwr = pynvml.nvmlDeviceGetPowerUsage(handle) // 1000
self.gauge('nvml.power.', pwr, tags=d_tags)
except pynvml.NVMLError as err:
msg_list.append('nvmlDeviceGetPowerUsage:{}'.format(err))
# fan info
try:
fan = pynvml.nvmlDeviceGetFanSpeed(handle)
self.gauge('nvml.fan.', fan, tags=d_tags)
except pynvml.NVMLError as err:
msg_list.append('nvmlDeviceGetFanSpeed:{}'.format(err))
# memory info
try:
mem = pynvml.nvmlDeviceGetMemoryInfo(handle)
self.gauge('nvml.mem.total', mem.total, tags=d_tags)
self.gauge('nvml.mem.used', mem.used, tags=d_tags)
self.gauge('nvml.mem.free', mem.free, tags=d_tags)
except pynvml.NVMLError as err:
msg_list.append('nvmlDeviceGetMemoryInfo:{}'.format(err))
# utilization GPU/Memory info
try:
util_rate = pynvml.nvmlDeviceGetUtilizationRates(handle)
self.gauge('nvml.util.gpu', util_rate.gpu, tags=d_tags)
self.gauge('nvml.util.memory', util_rate.memory, tags=d_tags)
except pynvml.NVMLError as err:
msg_list.append(
'nvmlDeviceGetUtilizationRates:{}'.format(err))
# utilization Encoder info
try:
util_encoder = pynvml.nvmlDeviceGetEncoderUtilization(handle)
self.log.debug('nvml.util.encoder %s' % int(util_encoder[0]))
self.gauge('nvml.util.encoder', int(
util_encoder[0]), tags=d_tags)
except pynvml.NVMLError as err:
msg_list.append(
'nvmlDeviceGetEncoderUtilization:{}'.format(err))
# utilization Decoder info
try:
util_decoder = pynvml.nvmlDeviceGetDecoderUtilization(handle)
self.log.debug('nvml.util.decoder %s' % int(util_decoder[0]))
self.gauge('nvml.util.decoder', int(
util_decoder[0]), tags=d_tags)
except pynvml.NVMLError as err:
msg_list.append(
'nvmlDeviceGetDecoderUtilization:{}'.format(err))
# Compute running processes
try:
cps = pynvml.nvmlDeviceGetComputeRunningProcesses(handle)
for ps in cps:
p_tags = tags.copy()
p_tags['pid'] = ps.pid
p_tags['name'] = pynvml.nvmlSystemGetProcessName(ps.pid)
p_tags = self._dict2list(p_tags)
self.gauge('nvml.process.used_gpu_memory',
ps.usedGpuMemory, tags=p_tags)
except pynvml.NVMLError as err:
msg_list.append(
'nvmlDeviceGetComputeRunningProcesses:{}'.format(err))
# Clocks throttling info
# Divide by the mask so that the value is either 0 or 1 per GPU
try:
throttle_reasons = (
pynvml.nvmlDeviceGetCurrentClocksThrottleReasons(handle))
self.gauge('nvml.throttle.appsettings', (throttle_reasons &
pynvml.nvmlClocksThrottleReasonApplicationsClocksSetting) /
pynvml.nvmlClocksThrottleReasonApplicationsClocksSetting,
tags=d_tags)
self.gauge('nvml.throttle.display', (throttle_reasons &
GPU_THROTTLE_DISPLAY_CLOCKS_SETTINGS) /
GPU_THROTTLE_DISPLAY_CLOCKS_SETTINGS,
tags=d_tags)
self.gauge('nvml.throttle.hardware', (throttle_reasons &
pynvml.nvmlClocksThrottleReasonHwSlowdown) /
pynvml.nvmlClocksThrottleReasonHwSlowdown,
tags=d_tags)
self.gauge('nvml.throttle.idle', (throttle_reasons &
pynvml.nvmlClocksThrottleReasonGpuIdle) /
pynvml.nvmlClocksThrottleReasonGpuIdle,
tags=d_tags)
self.gauge('nvml.throttle.power.hardware', (throttle_reasons &
GPU_THROTTLE_POWER_BRAKE_SLOWDOWN_HARDWARE) /
GPU_THROTTLE_POWER_BRAKE_SLOWDOWN_HARDWARE,
tags=d_tags)
self.gauge('nvml.throttle.power.software', (throttle_reasons &
pynvml.nvmlClocksThrottleReasonSwPowerCap) /
pynvml.nvmlClocksThrottleReasonSwPowerCap,
tags=d_tags)
self.gauge('nvml.throttle.syncboost', (throttle_reasons &
GPU_THROTTLE_SYNCBOOST) / GPU_THROTTLE_SYNCBOOST,
tags=d_tags)
self.gauge('nvml.throttle.temp.hardware', (throttle_reasons &
GPU_THROTTLE_THERMAL_SLOWDOWN_HARDWARE) /
GPU_THROTTLE_THERMAL_SLOWDOWN_HARDWARE,
tags=d_tags)
self.gauge('nvml.throttle.temp.software', (throttle_reasons &
GPU_THROTTLE_THERMAL_SLOWDOWN_SOFTWARE) /
GPU_THROTTLE_THERMAL_SLOWDOWN_SOFTWARE,
tags=d_tags)
self.gauge('nvml.throttle.unknown', (throttle_reasons &
pynvml.nvmlClocksThrottleReasonUnknown) /
pynvml.nvmlClocksThrottleReasonUnknown,
tags=d_tags)
except pynvml.NVMLError as err:
msg_list.append(
'nvmlDeviceGetCurrentClocksThrottleReasons:{}'.format(err))
if msg_list:
status = AgentCheck.CRITICAL
msg = ','.join(msg_list)
else:
status = AgentCheck.OK
msg = 'Ok'
pynvml.nvmlShutdown()
self.service_check('nvml.check', status, message=msg)
| datadog_nvml-master | nvml.py |
"""Manages constants required by multiple files."""
import getpass
import os
import platform
import socket
from pathlib import Path
def make_scene_name(type, num):
if type == "":
return "FloorPlan" + str(num) + "_physics"
elif num < 10:
return "FloorPlan" + type + "0" + str(num) + "_physics"
else:
return "FloorPlan" + type + str(num) + "_physics"
SCENES_NAMES_SPLIT_BY_TYPE = [
[make_scene_name(j, i) for i in range(1, 31)] for j in ["", "2", "3", "4"]
]
SCENE_NAMES = sum(SCENES_NAMES_SPLIT_BY_TYPE, [])
TRAIN_SCENE_NAMES = [
SCENES_NAMES_SPLIT_BY_TYPE[j][i] for j in range(3) for i in range(20)
]
VALID_SCENE_NAMES = [
SCENES_NAMES_SPLIT_BY_TYPE[j][i] for j in range(3) for i in range(20, 25)
]
TEST_SCENE_NAMES = [
SCENES_NAMES_SPLIT_BY_TYPE[j][i] for j in range(3) for i in range(25, 30)
] + SCENES_NAMES_SPLIT_BY_TYPE[3]
VISIBILITY_DISTANCE = 1.25
FOV = 90.0
PICK_UP_HEIGHT = 1.0
UNREACHABLE_SYM = 0
REACHABLE_SYM = 1
AGENT_SYM = 2
NO_INFO_SYM = 3
GOAL_OBJ_SYM = 4
VISITED_SYM = 5
AGENT_SELF_0 = 6
AGENT_SELF_90 = 7
AGENT_SELF_180 = 8
AGENT_SELF_270 = 9
AGENT_OTHER_0 = 10
AGENT_OTHER_90 = 11
AGENT_OTHER_180 = 12
AGENT_OTHER_270 = 13
# Distance that qualifies an episode as easy or medium
# For more details see `create_table` method in
# `rl_multi_agent/analysis/summarize_furnlift_eval_results.py`.
# array([ 2., 11., 17., 45.]) == [0, 1/3, 2/3, 1] percentiles of initial manhat distances
EASY_MAX = 10
MED_MAX = 17
def rotate_tuple_90_clockwise(t):
return (t[1], -t[0])
TELEVISION_ROT_0_POINTS = (
[(0.25 * i, 0.25) for i in [-1, 0, 1]]
+ [(0.25 * i, 0) for i in [-2, -1, 0, 1, 2]]
+ [(0.25 * i, -0.25) for i in [-2, -1, 0, 1, 2]]
)
TELEVISION_ROT_90_POINTS = [
rotate_tuple_90_clockwise(t) for t in TELEVISION_ROT_0_POINTS
]
TELEVISION_ROT_180_POINTS = [
rotate_tuple_90_clockwise(t) for t in TELEVISION_ROT_90_POINTS
]
TELEVISION_ROT_270_POINTS = [
rotate_tuple_90_clockwise(t) for t in TELEVISION_ROT_180_POINTS
]
TELEVISION_ROTATION_TO_OCCUPATIONS = {
0: TELEVISION_ROT_0_POINTS,
90: TELEVISION_ROT_90_POINTS,
180: TELEVISION_ROT_180_POINTS,
270: TELEVISION_ROT_270_POINTS,
}
TV_STAND_ROT_0_POINTS = [
(0.25 * i, 0.25 * j) for i in range(-2, 3) for j in range(-1, 2)
]
TV_STAND_ROT_90_POINTS = [
(0.25 * i, 0.25 * j) for i in range(-1, 2) for j in range(-2, 3)
]
TV_STAND_ROT_180_POINTS = TV_STAND_ROT_0_POINTS
TV_STAND_ROT_270_POINTS = TV_STAND_ROT_90_POINTS
TV_STAND_ROTATION_TO_OCCUPATIONS = {
0: TV_STAND_ROT_0_POINTS,
90: TV_STAND_ROT_90_POINTS,
180: TV_STAND_ROT_180_POINTS,
270: TV_STAND_ROT_270_POINTS,
}
DRESSER_SILHOUETTE_STRING = """1 1 1 1 1
1 1 1 1 1
1 1 1 1 1
"""
SPLIT_TO_USE_FOR_EVALUATION = "test"
HOST_NAME = socket.gethostname()
USERNAME = getpass.getuser()
PLATFORM = platform.system()
PROJECT_TOP_DIR = os.path.dirname(Path(__file__))
if PLATFORM == "Linux":
ABS_PATH_TO_LOCAL_THOR_BUILD = os.path.join(
PROJECT_TOP_DIR, "ai2thor_builds/thor-2-3-2020-Linux64"
)
elif PLATFORM == "Darwin":
ABS_PATH_TO_LOCAL_THOR_BUILD = os.path.join(
PROJECT_TOP_DIR,
"ai2thor_builds/thor-2-3-2020-OSXIntel64.app/Contents/MacOS/thor-2-3-2020-OSXIntel64",
)
else:
raise NotImplementedError(
(
"Your platform, {}, is not currently supported. "
"Supported platforms include Linux and MacOSX."
).format(PLATFORM)
)
ABS_PATH_TO_FINAL_FURNMOVE_CKPTS = os.path.join(
PROJECT_TOP_DIR, "trained_models", "final_furnmove_ckpts"
)
ABS_PATH_TO_FINAL_FURNLIFT_CKPTS = os.path.join(
PROJECT_TOP_DIR, "trained_models", "final_furnlift_ckpts"
)
ABS_PATH_TO_ANALYSIS_RESULTS_DIR = os.path.join(PROJECT_TOP_DIR, "analysis_output")
ABS_PATH_TO_DATA_DIR = os.path.join(PROJECT_TOP_DIR, "data")
| cordial-sync-master | constants.py |
"""Defines the tasks that an agent should complete in a given environment."""
from abc import abstractmethod
from typing import (
Dict,
Any,
Tuple,
TypeVar,
Generic,
List,
Optional,
Callable,
)
EnvType = TypeVar("EnvType")
class Episode(Generic[EnvType]):
def __init__(
self, env: EnvType, task_data: Dict[str, Any], max_steps: int, **kwargs
) -> None:
self._env = env
self.task_data = task_data
self._num_steps_taken_in_episode = 0
self.max_steps = max_steps
@property
def environment(self) -> EnvType:
return self._env
@abstractmethod
def state_for_agent(self):
raise NotImplementedError()
def step(self, action_as_int: int) -> Dict[str, Any]:
assert not self.is_paused() and not self.is_complete()
self._increment_num_steps_taken_in_episode()
return self._step(action_as_int=action_as_int)
@abstractmethod
def _step(self, action_as_int: int) -> Dict[str, Any]:
raise NotImplementedError()
def reached_max_steps(self) -> bool:
return self.num_steps_taken_in_episode() >= self.max_steps
@abstractmethod
def reached_terminal_state(self) -> bool:
raise NotImplementedError()
def is_complete(self) -> bool:
return self.reached_terminal_state() or self.reached_max_steps()
def is_paused(self) -> bool:
return False
def _increment_num_steps_taken_in_episode(self) -> None:
self._num_steps_taken_in_episode += 1
def num_steps_taken_in_episode(self) -> int:
return self._num_steps_taken_in_episode
def info(self):
return {"ep_length": self.num_steps_taken_in_episode()}
@property
def available_actions(self) -> Tuple[str, ...]:
return tuple(a for g in self.available_action_groups for a in g)
@property
def available_action_groups(self) -> Tuple[Tuple[str, ...], ...]:
return self.class_available_action_groups()
@classmethod
def class_available_actions(cls, **kwargs) -> Tuple[str, ...]:
return tuple(a for g in cls.class_available_action_groups(**kwargs) for a in g)
@classmethod
@abstractmethod
def class_available_action_groups(cls, **kwargs) -> Tuple[Tuple[str, ...], ...]:
raise NotImplementedError()
@property
def total_available_actions(self) -> int:
return len(self.available_actions)
def index_to_action(self, index: int) -> str:
assert 0 <= index < self.total_available_actions
return self.available_actions[index]
class MultiAgentEpisode(Generic[EnvType]):
def __init__(
self,
env: EnvType,
task_data: Dict[str, Any],
max_steps: int,
before_step_function: Optional[Callable] = None,
after_step_function: Optional[Callable] = None,
**kwargs,
) -> None:
self._env = env
self.task_data = task_data
self._num_steps_taken_in_episode = 0
self.max_steps = max_steps
self.before_step_function = before_step_function
self.after_step_function = after_step_function
@property
def environment(self) -> EnvType:
return self._env
@abstractmethod
def states_for_agents(self):
raise NotImplementedError()
def multi_step(self, actions_as_ints: Tuple[int, ...]) -> List[Dict[str, Any]]:
assert not self.is_paused() and not self.is_complete()
step_results = []
before_info = (
None
if self.before_step_function is None
else self.before_step_function(episode=self)
)
for i in range(self._env.num_agents):
self._increment_num_steps_taken_in_episode()
step_results.append(self._step(actions_as_ints[i], agent_id=i))
if self.after_step_function is not None:
self.after_step_function(
step_results=step_results, before_info=before_info, episode=self
)
return step_results
@abstractmethod
def _step(self, action_as_int: int, agent_id: int) -> Dict[str, Any]:
raise NotImplementedError()
def reached_max_steps(self) -> bool:
return self.num_steps_taken_in_episode() >= self.max_steps
@abstractmethod
def reached_terminal_state(self) -> bool:
raise NotImplementedError()
def is_complete(self) -> bool:
return self.reached_terminal_state() or self.reached_max_steps()
def is_paused(self) -> bool:
return False
def _increment_num_steps_taken_in_episode(self) -> None:
self._num_steps_taken_in_episode += 1
def num_steps_taken_in_episode(self) -> int:
return self._num_steps_taken_in_episode
def info(self):
return {"ep_length": self.num_steps_taken_in_episode()}
@property
def available_actions(self) -> Tuple[str, ...]:
return tuple(a for g in self.available_action_groups for a in g)
@property
def available_action_groups(self) -> Tuple[Tuple[str, ...], ...]:
return self.class_available_action_groups()
@classmethod
def class_available_actions(cls, **kwargs) -> Tuple[str, ...]:
return tuple(a for g in cls.class_available_action_groups(**kwargs) for a in g)
@classmethod
@abstractmethod
def class_available_action_groups(cls, **kwargs) -> Tuple[Tuple[str, ...], ...]:
raise NotImplementedError()
@property
def total_available_actions(self) -> int:
return len(self.available_actions)
def index_to_action(self, index: int) -> str:
assert 0 <= index < self.total_available_actions
return self.available_actions[index]
| cordial-sync-master | rl_base/episode.py |
from .agent import A3CAgent
from .episode import Episode
from .shared_optim import SharedAdam, SharedRMSprop
| cordial-sync-master | rl_base/__init__.py |
from __future__ import division
import copy
from abc import ABC
from typing import Dict, List, Union, Optional, Any, TypeVar, Generic
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from utils import net_util
from utils.misc_util import huber_loss
from .episode import Episode
EnvType = TypeVar("EnvType")
class RLAgent(Generic[EnvType]):
def __init__(
self,
model: Optional[nn.Module] = None,
episode: Optional[Episode[EnvType]] = None,
gpu_id: int = -1,
**kwargs,
) -> None:
# Important that gpu_id is set first as the model setter requires it
self.gpu_id = gpu_id
self._model: Optional[nn.Module] = None
if model is not None:
self.model = model
self._episode = episode
self.hidden: Optional[Any] = None
self.global_episode_count = 0
def sync_with_shared(self, shared_model: nn.Module) -> None:
if self.gpu_id >= 0:
with torch.cuda.device(self.gpu_id):
self.model.load_state_dict(shared_model.state_dict())
else:
self.model.load_state_dict(shared_model.state_dict())
def eval_at_state(self, state, hidden, **kwargs):
raise NotImplementedError()
def eval_at_current_state(self):
return self.eval_at_state(state=self.state, hidden=self.hidden)
@property
def episode(self) -> Optional[Episode[EnvType]]:
return self._episode
@episode.setter
def episode(self, value: Episode[EnvType]) -> None:
self.reset_hidden()
self.clear_history()
self._episode = value
@property
def environment(self) -> Optional[EnvType]:
if self.episode is None:
return None
return self.episode.environment
@property
def state(self):
raise NotImplementedError()
@property
def model(self) -> nn.Module:
assert self._model is not None
return self._model
@model.setter
def model(self, model_to_set: nn.Module):
if self.gpu_id >= 0:
with torch.cuda.device(self.gpu_id):
self._model = model_to_set.cuda()
else:
self._model = model_to_set
def gpuify(self, tensor: torch.Tensor) -> Union[torch.Tensor]:
if self.gpu_id >= 0:
with torch.cuda.device(self.gpu_id):
tensor = tensor.cuda()
return tensor
def act(self, **kwargs) -> Dict:
raise NotImplementedError()
def reset_hidden(self) -> None:
self.hidden = None
def repackage_hidden(self) -> None:
self.hidden = net_util.recursively_detach(self.hidden)
def preprocess_action(
self, last_action: Optional[int]
) -> Optional[Union[torch.LongTensor, torch.cuda.LongTensor]]:
if last_action is not None:
# noinspection PyTypeChecker
return self.gpuify(torch.LongTensor([[last_action]]))
return None
def preprocess_frame(
self, frame: np.ndarray, new_frame_size: int
) -> Union[torch.FloatTensor, torch.cuda.FloatTensor]:
frame = net_util.resnet_input_transform(frame, new_frame_size)
# noinspection PyArgumentList
return self.gpuify(torch.FloatTensor(frame))
def preprocess_long_tensor_frame(
self, frame: np.ndarray
) -> Union[torch.LongTensor, torch.cuda.LongTensor]:
agent_local_tensor = torch.LongTensor(frame)
if len(agent_local_tensor.shape) == 2:
agent_local_tensor = agent_local_tensor.unsqueeze(0)
return self.gpuify(agent_local_tensor)
def preprocess_depth_frame(
self, depth_frame: np.ndarray, new_frame_size: int
) -> Union[torch.FloatTensor, torch.cuda.FloatTensor]:
depth_frame = net_util.resize_image(
torch.FloatTensor(np.expand_dims(depth_frame / 5000.0, axis=0)),
new_frame_size,
)
return self.gpuify(depth_frame)
def clear_history(self) -> None:
raise NotImplementedError()
def clear_graph_data(self):
raise NotImplementedError()
def loss(self, **kwargs) -> Dict[str, torch.FloatTensor]:
raise NotImplementedError()
class A3CAgent(RLAgent[EnvType], ABC):
def __init__(
self,
model: Optional[nn.Module] = None,
episode: Optional[Episode[EnvType]] = None,
gamma: float = 0.99,
tau: float = 1.0,
beta: float = 1e-2,
gpu_id: int = -1,
include_test_eval_results: bool = False,
huber_delta: Optional[float] = None,
**kwargs,
) -> None:
super().__init__(model=model, episode=episode, gpu_id=gpu_id, **kwargs)
self.gamma = gamma
self.tau = tau
self.beta = beta
self.include_test_eval_results = include_test_eval_results
self.huber_delta = huber_delta
self.last_reward: Optional[float] = None
self.values: List[torch.FloatTensor] = []
self.log_prob_of_actions: List[torch.FloatTensor] = []
self.rewards: List[float] = []
self.entropies: List[torch.FloatTensor] = []
self.actions: List[int] = []
self.step_results: List[Dict[str, Any]] = []
self.eval_results: List[Dict[str, Any]] = []
self._a3c_loss_disabled_for_episode: bool = False
def eval_at_state(self, state, hidden, **kwargs):
raise NotImplementedError()
def disable_a3c_loss_for_episode(self, mode: bool = True):
self._a3c_loss_disabled_for_episode = mode
@property
def episode(self):
return super(A3CAgent, self).episode
@episode.setter
def episode(self, value: Episode[EnvType]) -> None:
super(A3CAgent, self).episode = value
self.disable_a3c_loss_for_episode(False)
def act(
self,
train: bool = True,
action: Optional[int] = None,
sample_action: bool = True,
**kwargs,
) -> Dict:
if not self.model.training and train:
self.model.train()
if self.model.training and not train:
self.model.eval()
assert self.episode is not None
assert not self.episode.is_complete()
if not train and self.hidden is not None:
self.repackage_hidden()
eval = self.eval_at_current_state()
value = eval["value"]
self.hidden = None if "hidden" not in eval else eval["hidden"]
logit = eval["logit"]
probs = F.softmax(logit, dim=1)
log_probs = F.log_softmax(logit, dim=1)
if hasattr(self.model, "action_groups"):
entropy_list = []
k = 0
marginal_probs = []
for action_group in self.model.action_groups:
marginal_probs.append(
probs[:, k : (k + len(action_group))].sum(1).unsqueeze(1)
)
group_probs = F.softmax(logit[:, k : (k + len(action_group))], dim=1)
group_log_probs = F.log_softmax(
logit[:, k : (k + len(action_group))], dim=1
)
entropy_list.append(-(group_log_probs * group_probs).sum(1))
k += len(action_group)
entropy = sum(entropy_list) / len(entropy_list)
marginal_probs = torch.cat(tuple(marginal_probs), dim=1)
entropy += -(marginal_probs * torch.log(marginal_probs)).sum(1)
else:
entropy = -(log_probs * probs).sum().unsqueeze(0)
if action is None:
if sample_action:
action = probs.multinomial(num_samples=1).item()
else:
action = probs.argmax(dim=-1, keepdim=True).item()
assert log_probs.shape[0] == 1
log_prob_of_action = log_probs.view(-1)[action]
before_location = self.environment.get_agent_location()
step_result = self.episode.step(action)
after_location = self.environment.get_agent_location()
self.last_reward = (
None if step_result["reward"] is None else float(step_result["reward"])
)
step_result["before_location"] = before_location
step_result["after_location"] = after_location
eval["probs"] = probs
eval["action"] = action
eval["step_result"] = step_result
eval["training"] = train
self.step_results.append(step_result)
self.actions.append(action)
self.rewards.append(self.last_reward)
if train or self.include_test_eval_results:
self.eval_results.append(eval)
if train:
self.entropies.append(entropy)
self.values.append(value)
self.log_prob_of_actions.append(log_prob_of_action)
return eval
def clear_history(self) -> None:
self.clear_graph_data()
self.rewards = []
self.actions = []
self.step_results = []
def clear_graph_data(self):
self.values = []
self.log_prob_of_actions = []
self.entropies = []
self.eval_results = []
def loss(self, **kwargs) -> Dict[str, torch.FloatTensor]:
assert self.episode is not None
if self._a3c_loss_disabled_for_episode:
return {}
if self.episode.is_complete():
future_reward_est = 0.0
else:
eval = self.eval_at_current_state()
future_reward_est = eval["value"].item()
return self.a3c_loss(
values=self.values,
rewards=self.rewards,
log_prob_of_actions=self.log_prob_of_actions,
entropies=self.entropies,
future_reward_est=future_reward_est,
gamma=self.gamma,
tau=self.tau,
beta=self.beta,
gpu_id=self.gpu_id,
)
@staticmethod
def a3c_loss(
values: List[torch.FloatTensor],
rewards: List[float],
log_prob_of_actions: List[torch.FloatTensor],
entropies: List[torch.FloatTensor],
future_reward_est: float,
gamma: float,
tau: float,
beta: float,
gpu_id: int,
huber_delta: Optional[float] = None,
):
R = torch.FloatTensor([[future_reward_est]])
if gpu_id >= 0:
with torch.cuda.device(gpu_id):
R = R.cuda()
values = copy.copy(values)
values.append(R)
policy_loss = 0
value_loss = 0
entropy_loss = 0
gae = torch.zeros(1, 1)
if gpu_id >= 0:
with torch.cuda.device(gpu_id):
gae = gae.cuda()
for i in reversed(range(len(rewards))):
R = gamma * R + rewards[i]
advantage = R - values[i]
value_loss = value_loss + (
0.5 * advantage.pow(2)
if huber_delta is None
else 0.5 * huber_loss(advantage, huber_delta)
)
# Generalized Advantage Estimation
delta_t = rewards[i] + gamma * values[i + 1].detach() - values[i].detach()
gae = gae * gamma * tau + delta_t
policy_loss = policy_loss - log_prob_of_actions[i] * gae
entropy_loss -= beta * entropies[i]
return {
"policy": policy_loss,
"value": 0.5 * value_loss,
"entropy": entropy_loss,
}
| cordial-sync-master | rl_base/agent.py |
from __future__ import division
import math
from collections import defaultdict
from typing import Tuple, Dict, Union, Optional, Iterable, Callable
import torch
import torch.optim as optim
from torch import nn
class SharedRMSprop(optim.Optimizer):
"""Implements RMSprop algorithm with shared states.
"""
def __init__(
self,
params: Union[Iterable[nn.Parameter], Dict],
lr: float = 7e-4,
alpha: float = 0.99,
eps: float = 0.1,
weight_decay: float = 0,
momentum: float = 0,
centered: bool = False,
saved_state: Dict = None,
) -> None:
defaults: defaultdict = defaultdict(
lr=lr,
alpha=alpha,
eps=eps,
weight_decay=weight_decay,
momentum=momentum,
centered=centered,
)
super(SharedRMSprop, self).__init__(params, defaults)
if saved_state:
self.load_state_dict(saved_state)
for group in self.param_groups:
for p in group["params"]:
if p.requires_grad:
state = self.state[p]
state["step"] = torch.zeros(1)
state["grad_avg"] = p.data.new().resize_as_(p.data).zero_()
state["square_avg"] = p.data.new().resize_as_(p.data).zero_()
state["momentum_buffer"] = p.data.new().resize_as_(p.data).zero_()
def share_memory(self) -> None:
for group in self.param_groups:
for p in group["params"]:
state = self.state[p]
state["square_avg"].share_memory_()
state["step"].share_memory_()
state["grad_avg"].share_memory_()
state["momentum_buffer"].share_memory_()
def step(self, closure: Optional[Callable] = None) -> Optional[torch.FloatTensor]:
"""Performs a single optimization step.
Arguments:
closure (callable, optional): A closure that reevaluates the model
and returns the loss.
"""
loss = None
if closure is not None:
loss = closure()
for group in self.param_groups:
for p in group["params"]:
if p.grad is None:
continue
grad = p.grad.data
if grad.is_sparse:
raise RuntimeError("RMSprop does not support sparse gradients")
state = self.state[p]
square_avg = state["square_avg"]
alpha = group["alpha"]
state["step"] += 1
if group["weight_decay"] != 0:
grad = grad.add(group["weight_decay"], p.data)
square_avg.mul_(alpha).addcmul_(1 - alpha, grad, grad)
if group["centered"]:
grad_avg = state["grad_avg"]
grad_avg.mul_(alpha).add_(1 - alpha, grad)
avg = (
square_avg.addcmul(-1, grad_avg, grad_avg)
.sqrt()
.add_(group["eps"])
)
else:
avg = square_avg.sqrt().add_(group["eps"])
if group["momentum"] > 0:
buf = state["momentum_buffer"]
buf.mul_(group["momentum"]).addcdiv_(grad, avg)
p.data.add_(-group["lr"], buf)
else:
p.data.addcdiv_(-group["lr"], grad, avg)
return loss
class SharedAdam(optim.Optimizer):
"""Implements Adam algorithm with shared states.
"""
def __init__(
self,
params: Union[Iterable[nn.Parameter], Dict],
lr: float = 1e-3,
betas: Tuple[float, float] = (0.9, 0.999),
eps: float = 1e-3,
weight_decay: float = 0,
amsgrad: bool = False,
saved_state: Dict = None,
) -> None:
defaults: defaultdict = defaultdict(
lr=lr, betas=betas, eps=eps, weight_decay=weight_decay, amsgrad=amsgrad
)
super(SharedAdam, self).__init__(params, defaults)
if saved_state:
self.load_state_dict(saved_state)
for group in self.param_groups:
for p in group["params"]:
if p.requires_grad:
state = self.state[p]
state["step"] = torch.zeros(1)
state["exp_avg"] = p.data.new().resize_as_(p.data).zero_()
state["exp_avg_sq"] = p.data.new().resize_as_(p.data).zero_()
state["max_exp_avg_sq"] = p.data.new().resize_as_(p.data).zero_()
def share_memory(self) -> None:
for group in self.param_groups:
for p in group["params"]:
state = self.state[p]
state["step"].share_memory_()
state["exp_avg"].share_memory_()
state["exp_avg_sq"].share_memory_()
state["max_exp_avg_sq"].share_memory_()
def step(self, closure: Optional[Callable] = None) -> Optional[torch.FloatTensor]:
"""Performs a single optimization step.
Arguments:
closure (callable, optional): A closure that reevaluates the model
and returns the loss.
"""
loss = None
if closure is not None:
loss = closure()
for group in self.param_groups:
for p in group["params"]:
if p.grad is None:
continue
grad = p.grad.data
if grad.is_sparse:
raise RuntimeError(
"Adam does not support sparse gradients, please consider SparseAdam instead"
)
amsgrad = group["amsgrad"]
state = self.state[p]
exp_avg, exp_avg_sq = state["exp_avg"], state["exp_avg_sq"]
if amsgrad:
max_exp_avg_sq = state["max_exp_avg_sq"]
beta1, beta2 = group["betas"]
state["step"] += 1
if group["weight_decay"] != 0:
grad = grad.add(group["weight_decay"], p.data)
# Decay the first and second moment running average coefficient
exp_avg.mul_(beta1).add_(1 - beta1, grad)
exp_avg_sq.mul_(beta2).addcmul_(1 - beta2, grad, grad)
if amsgrad:
# Maintains the maximum of all 2nd moment running avg. till
# now
torch.max(max_exp_avg_sq, exp_avg_sq, out=max_exp_avg_sq)
# Use the max. for normalizing running avg. of gradient
denom = max_exp_avg_sq.sqrt().add_(group["eps"])
else:
denom = exp_avg_sq.sqrt().add_(group["eps"])
bias_correction1 = 1 - beta1 ** state["step"][0]
bias_correction2 = 1 - beta2 ** state["step"][0]
step_size = group["lr"] * math.sqrt(bias_correction2) / bias_correction1
p.data.addcdiv_(-step_size, exp_avg, denom)
return loss
| cordial-sync-master | rl_base/shared_optim.py |
import queue
import random
import warnings
from collections import Counter
from typing import List, Callable, Optional, Dict
import torch
from torch import multiprocessing as mp
from rl_ai2thor.ai2thor_environment import AI2ThorEnvironment
from rl_ai2thor.ai2thor_gridworld_environment import AI2ThorLiftedObjectGridEnvironment
from rl_base.agent import RLAgent
from rl_multi_agent import MultiAgent
from rl_multi_agent.furnlift_episode_samplers import save_talk_reply_data_frame
from rl_multi_agent.furnmove_episodes import FurnMoveEpisode
from rl_multi_agent.multi_agent_utils import TrainingCompleteException
class FurnMoveEpisodeSampler(object):
def __init__(
self,
scenes: List[str],
num_agents: int,
object_type: str,
to_object_type: str,
env_args=None,
max_episode_length: int = 500,
episode_class: Callable = FurnMoveEpisode,
player_screen_height=224,
player_screen_width=224,
save_talk_reply_probs_path: Optional[str] = None,
max_ep_using_expert_actions: int = 10000,
visible_agents: bool = True,
include_depth_frame: bool = False,
object_initial_height: float = 1.0,
max_distance_from_object: float = 0.5,
headless: bool = False,
episode_args: Optional[Dict] = None,
environment_args: Optional[Dict] = None,
):
self.visible_agents = visible_agents
self.max_ep_using_expert_actions = max_ep_using_expert_actions
self.save_talk_reply_probs_path = save_talk_reply_probs_path
self.player_screen_height = player_screen_height
self.player_screen_width = player_screen_width
self.episode_class = episode_class
self.max_episode_length = max_episode_length
self.env_args = env_args
self.num_agents = num_agents
self.scenes = scenes
self.object_type = object_type
self.to_object_type = to_object_type
self.include_depth_frame = include_depth_frame
self.object_initial_height = object_initial_height
self.max_distance_from_object = max_distance_from_object
self.headless = headless
self.episode_args = episode_args if episode_args is not None else {}
self.environment_args = environment_args if environment_args is not None else {}
self.grid_size = 0.25
self._current_train_episode = 0
self._internal_episodes = 0
self.episode_init_data = None
@staticmethod
def _create_environment(
num_agents,
env_args,
visible_agents: bool,
render_depth_image: bool,
headless: bool,
**environment_args,
) -> AI2ThorEnvironment:
env = AI2ThorEnvironment(
docker_enabled=env_args.docker_enabled,
x_display=env_args.x_display,
local_thor_build=env_args.local_thor_build,
num_agents=num_agents,
restrict_to_initially_reachable_points=True,
visible_agents=visible_agents,
render_depth_image=render_depth_image,
time_scale=1.0,
headless=headless,
**environment_args,
)
return env
@property
def current_train_episode(self):
return self._current_train_episode
@current_train_episode.setter
def current_train_episode(self, value):
self._current_train_episode = value
def start_env(self, agent: RLAgent, env: Optional[AI2ThorEnvironment]):
if env is None:
if agent.environment is not None:
env = agent.environment
else:
env = self._create_environment(
num_agents=self.num_agents,
env_args=self.env_args,
visible_agents=self.visible_agents,
render_depth_image=self.include_depth_frame,
headless=self.headless,
**self.environment_args,
)
env.start(
"FloorPlan1_physics",
move_mag=self.grid_size,
quality="Very Low",
player_screen_height=self.player_screen_height,
player_screen_width=self.player_screen_width,
)
return env
def __call__(
self,
agent: MultiAgent,
agent_location_seed=None,
env: Optional[AI2ThorEnvironment] = None,
episode_init_queue: Optional[mp.Queue] = None,
) -> None:
self._internal_episodes += 1
if (
self.max_ep_using_expert_actions != 0
and self.current_train_episode <= self.max_ep_using_expert_actions
):
agent.take_expert_action_prob = 0.9 * (
1.0 - self.current_train_episode / self.max_ep_using_expert_actions
)
else:
agent.take_expert_action_prob = 0
env = self.start_env(agent=agent, env=env)
self.episode_init_data = None
if episode_init_queue is not None:
try:
self.episode_init_data = episode_init_queue.get(timeout=1)
except queue.Empty:
raise TrainingCompleteException("No more data in episode init queue.")
scene = self.episode_init_data["scene"]
env.reset(scene)
agent_locations = self.episode_init_data["agent_locations"]
for i in range(self.num_agents):
env.teleport_agent_to(
**agent_locations[i], standing=True, force_action=True, agent_id=i
)
# Place agents in appropriate locations
env.step(
{
"action": "DisableAllObjectsOfType",
"objectId": self.object_type,
"agentId": 0,
}
)
lifted_object_loc = self.episode_init_data["lifted_object_location"]
sr = env.step(
{
"action": "CreateObjectAtLocation",
"objectType": self.object_type,
"position": {
"x": lifted_object_loc["x"],
"y": lifted_object_loc["y"],
"z": lifted_object_loc["z"],
},
"rotation": {"x": 0, "y": lifted_object_loc["rotation"], "z": 0},
"forceAction": True,
"agentId": 0,
}
)
assert env.last_event.metadata["lastActionSuccess"]
objects_of_type = env.all_objects_with_properties(
{"objectType": self.object_type}, agent_id=0
)
if len(objects_of_type) != 1:
print("len(objects_of_type): {}".format(len(objects_of_type)))
raise (Exception("len(objects_of_type) != 1"))
object = objects_of_type[0]
object_id = object["objectId"]
# Create the object we should navigate to
to_object_loc = self.episode_init_data["to_object_location"]
env.step(
{
"action": "CreateObjectAtLocation",
"objectType": self.to_object_type,
"position": {
"x": to_object_loc["x"],
"y": to_object_loc["y"],
"z": to_object_loc["z"],
},
"rotation": {"x": 0, "y": to_object_loc["rotation"], "z": 0},
"forceAction": True,
"agentId": 0,
}
)
assert env.last_event.metadata["lastActionSuccess"]
objects_of_to_type = [
o["objectId"]
for o in env.all_objects_with_properties(
{"objectType": self.to_object_type}, agent_id=0
)
if len(o["objectId"].split("|")) == 2
]
assert len(objects_of_to_type) == 1
to_object_id = objects_of_to_type[0]
assert to_object_id is not None and len(to_object_id) != 0
else:
scene = random.choice(self.scenes)
env.reset(scene)
for i in range(self.num_agents):
env.teleport_agent_to(
**env.last_event.metadata["agent"]["position"],
rotation=env.last_event.metadata["agent"]["rotation"]["y"],
horizon=30.0,
standing=True,
force_action=True,
agent_id=i,
)
scene_successfully_setup = False
failure_reasons = []
for _ in range(10):
# Randomly generate agents looking at the TV
env.step(
{
"action": "DisableAllObjectsOfType",
"objectId": self.object_type,
"agentId": 0,
}
)
sr = env.step(
{
"action": "RandomlyCreateLiftedFurniture",
"objectType": self.object_type,
"y": self.object_initial_height,
"z": self.max_distance_from_object,
}
)
if (
env.last_event.metadata["lastAction"]
!= "RandomlyCreateLiftedFurniture"
or not env.last_event.metadata["lastActionSuccess"]
):
failure_reasons.append(
"Could not randomize location of {} in {}. Error message {}.".format(
self.object_type,
scene,
env.last_event.metadata["errorMessage"],
)
)
continue
# Refreshing reachable here should not be done as it means the
# space underneath the TV would be considered unreachable even though,
# once the TV moves, it is.
# env.refresh_initially_reachable()
objects_of_type = env.all_objects_with_properties(
{"objectType": self.object_type}, agent_id=0
)
if len(objects_of_type) != 1:
print("len(objects_of_type): {}".format(len(objects_of_type)))
raise (Exception("len(objects_of_type) != 1"))
object = objects_of_type[0]
object_id = object["objectId"]
assert object_id == sr.metadata["actionReturn"]
# Create the object we should navigate to
env.step(
{
"action": "RandomlyCreateAndPlaceObjectOnFloor",
"objectType": self.to_object_type,
"agentId": 0,
}
)
if (
env.last_event.metadata["lastAction"]
!= "RandomlyCreateAndPlaceObjectOnFloor"
or not env.last_event.metadata["lastActionSuccess"]
):
failure_reasons.append(
"Could not randomize location of {} in {}. Error message: {}.".format(
self.to_object_type,
scene,
env.last_event.metadata["errorMessage"],
)
)
continue
to_object_id = env.last_event.metadata["actionReturn"]
assert to_object_id is not None and len(to_object_id) != 0
scene_successfully_setup = True
break
if not scene_successfully_setup:
# raise Exception(
warnings.warn(
(
"Failed to randomly initialize objects and agents in scene {} 10 times"
+ "for the following reasons:"
+ "\n\t* ".join(failure_reasons)
+ "\nTrying a new scene."
).format(env.scene_name)
)
yield from self(agent, env=env)
return
assert env.scene_name == scene
# Task data includes the target object id and one agent state/metadata to naviagate to.
task_data = {"move_obj_id": object_id, "move_to_obj_id": to_object_id}
agent.episode = self.episode_class(
env,
task_data,
max_steps=self.max_episode_length,
num_agents=self.num_agents,
max_distance_from_object=self.max_distance_from_object,
include_depth_frame=self.include_depth_frame,
**self.episode_args,
)
yield True
if self.save_talk_reply_probs_path is not None:
if torch.cuda.is_available():
save_talk_reply_data_frame(
agent,
self.save_talk_reply_probs_path,
None,
use_hash=True, # self._internal_episodes,
)
else:
save_talk_reply_data_frame(
agent, self.save_talk_reply_probs_path, self._internal_episodes
)
class FurnMoveGridEpisodeSampler(object):
def __init__(
self,
scenes: List[str],
num_agents: int,
object_type: str,
to_object_type: str,
to_object_silhouette: str,
env_args=None,
max_episode_length: int = 500,
episode_class: Callable = FurnMoveEpisode,
player_screen_height=224,
player_screen_width=224,
save_talk_reply_probs_path: Optional[str] = None,
max_ep_using_expert_actions: int = 10000,
visible_agents: bool = True,
include_depth_frame: bool = False,
object_initial_height: float = 1.0,
max_distance_from_object: float = 0.5,
headless: bool = False,
episode_args: Optional[Dict] = None,
environment_args: Optional[Dict] = None,
):
self.visible_agents = visible_agents
self.max_ep_using_expert_actions = max_ep_using_expert_actions
self.save_talk_reply_probs_path = save_talk_reply_probs_path
self.player_screen_height = player_screen_height
self.player_screen_width = player_screen_width
self.episode_class = episode_class
self.max_episode_length = max_episode_length
self.env_args = env_args
self.num_agents = num_agents
self.scenes = scenes
self.object_type = object_type
self.to_object_type = to_object_type
self.include_depth_frame = include_depth_frame
self.object_initial_height = object_initial_height
self.max_distance_from_object = max_distance_from_object
self.headless = headless
self.episode_args = episode_args if episode_args is not None else {}
self.environment_args = environment_args if environment_args is not None else {}
self.to_object_silhouette = to_object_silhouette
self.grid_size = 0.25
self._current_train_episode = 0
self._internal_episodes = 0
self.episode_init_data = None
@staticmethod
def _create_environment(
num_agents,
env_args,
object_initial_height,
max_distance_from_object,
object_type,
visible_agents: bool,
render_depth_image: bool,
headless: bool,
**environment_args,
) -> AI2ThorLiftedObjectGridEnvironment:
# TODO: restrict_to_initially_reachable_points
# and render_depth_image are excluded from this.
env = AI2ThorLiftedObjectGridEnvironment(
docker_enabled=env_args.docker_enabled,
x_display=env_args.x_display,
local_thor_build=env_args.local_thor_build,
num_agents=num_agents,
visible_agents=visible_agents,
time_scale=1.0,
headless=headless,
lifted_object_height=object_initial_height,
max_dist_to_lifted_object=max_distance_from_object,
object_type=object_type,
**environment_args,
)
return env
@property
def current_train_episode(self):
return self._current_train_episode
@current_train_episode.setter
def current_train_episode(self, value):
self._current_train_episode = value
def start_env(self, agent: RLAgent, env: Optional[AI2ThorEnvironment]):
if agent.environment is not None:
env = agent.environment
else:
env = self._create_environment(
num_agents=self.num_agents,
env_args=self.env_args,
visible_agents=self.visible_agents,
render_depth_image=self.include_depth_frame,
headless=self.headless,
object_initial_height=self.object_initial_height,
max_distance_from_object=self.max_distance_from_object,
object_type=self.object_type,
**self.environment_args,
)
env.start(
"FloorPlan1_physics",
move_mag=self.grid_size,
quality="Very Low",
player_screen_height=self.player_screen_height,
player_screen_width=self.player_screen_width,
)
return env
def __call__(
self,
agent: MultiAgent,
agent_location_seed=None,
env: Optional[AI2ThorEnvironment] = None,
episode_init_queue: Optional[mp.Queue] = None,
) -> None:
self._internal_episodes += 1
if (
self.max_ep_using_expert_actions != 0
and self.current_train_episode <= self.max_ep_using_expert_actions
):
agent.take_expert_action_prob = 0.9 * (
1.0 - self.current_train_episode / self.max_ep_using_expert_actions
)
else:
agent.take_expert_action_prob = 0
env = self.start_env(agent=agent, env=env)
if episode_init_queue is not None:
try:
self.episode_init_data = episode_init_queue.get(timeout=1)
except queue.Empty:
raise TrainingCompleteException("No more data in episode init queue.")
scene = self.episode_init_data["scene"]
env.reset(scene)
agent_locations = self.episode_init_data["agent_locations"]
for i in range(self.num_agents):
loc = agent_locations[i]
env.step(
{
"action": "TeleportFull",
"agentId": i,
"x": loc["x"],
"z": loc["z"],
"rotation": {"y": loc["rotation"]},
"allowAgentIntersection": True,
"makeReachable": True,
}
)
assert env.last_event.metadata["lastActionSuccess"]
# Place agents in appropriate locations
lifted_object_loc = self.episode_init_data["lifted_object_location"]
env.step(
{
"action": "CreateLiftedFurnitureAtLocation",
"objectType": self.object_type,
"x": lifted_object_loc["x"],
"z": lifted_object_loc["z"],
"rotation": {"y": lifted_object_loc["rotation"]},
"agentId": 0,
"forceAction": True,
}
)
assert env.last_event.metadata["lastActionSuccess"]
object_id = env.last_event.metadata["actionReturn"]
# Create the object we should navigate to
to_object_loc = self.episode_init_data["to_object_location"]
env.step(
{
"action": "CreateAndPlaceObjectOnFloorAtLocation",
"objectType": "Dresser",
"object_mask": env.controller.parse_template_to_mask(
self.to_object_silhouette
),
**to_object_loc,
"rotation": {"y": to_object_loc["rotation"]},
"agentId": 0,
"forceAction": True,
}
)
assert env.last_event.metadata["lastActionSuccess"]
to_object_id = env.last_event.metadata["actionReturn"]["objectId"]
assert to_object_id is not None and len(to_object_id) != 0
else:
scene = random.choice(self.scenes)
env.reset(scene)
scene_successfully_setup = False
failure_reasons = []
for _ in range(10 if env.num_agents <= 3 else 50):
# Randomly generate agents looking at the TV
sr = env.step(
{
"action": "RandomlyCreateLiftedFurniture",
"agentId": 0,
"objectType": self.object_type,
"y": self.object_initial_height,
"z": self.max_distance_from_object,
}
)
if (
env.last_event.metadata["lastAction"]
!= "RandomlyCreateLiftedFurniture"
or not env.last_event.metadata["lastActionSuccess"]
):
failure_reasons.append(
"Could not randomize location of {} in {}.".format(
self.object_type, scene
)
)
continue
object_id = sr.metadata["actionReturn"]
# Create the object we should navigate to
env.step(
{
"action": "RandomlyCreateAndPlaceObjectOnFloor",
"objectType": self.to_object_type,
"object_mask": env.controller.parse_template_to_mask(
self.to_object_silhouette
),
"agentId": 0,
}
)
if (
env.last_event.metadata["lastAction"]
!= "RandomlyCreateAndPlaceObjectOnFloor"
or not env.last_event.metadata["lastActionSuccess"]
):
failure_reasons.append(
"Could not randomize location of {} in {}.".format(
self.to_object_type, scene
)
)
continue
to_object_id = env.last_event.metadata["actionReturn"]["objectId"]
assert to_object_id is not None and len(to_object_id) != 0
scene_successfully_setup = True
break
if not scene_successfully_setup:
# raise Exception(
warnings.warn(
(
"Failed to randomly initialize objects and agents in scene {} 10 times".format(
scene
)
+ "for the following reasons:"
+ "\n\t* ".join(
[
"({} times): {}".format(cnt, reason)
for reason, cnt in Counter(failure_reasons).items()
]
)
+ "\nTrying a new scene."
).format(env.scene_name)
)
yield from self(agent, env=env)
return
assert env.scene_name == scene
# Task data includes the target object id and one agent state/metadata to naviagate to.
task_data = {"move_obj_id": object_id, "move_to_obj_id": to_object_id}
agent.episode = self.episode_class(
env,
task_data,
max_steps=self.max_episode_length,
num_agents=self.num_agents,
max_distance_from_object=self.max_distance_from_object,
include_depth_frame=self.include_depth_frame,
**self.episode_args,
)
yield True
if self.save_talk_reply_probs_path is not None:
if torch.cuda.is_available():
save_talk_reply_data_frame(
agent,
self.save_talk_reply_probs_path,
None,
use_hash=True, # self._internal_episodes,
)
else:
save_talk_reply_data_frame(
agent, self.save_talk_reply_probs_path, self._internal_episodes
)
| cordial-sync-master | rl_multi_agent/furnmove_episode_samplers.py |
from __future__ import division
from collections import OrderedDict
from typing import Tuple, Optional, Sequence
import torch
import torch.nn as nn
import torch.nn.functional as F
from utils.misc_util import norm_col_init, weights_init, outer_product, outer_sum
def _unfold_communications(speech_per_agent: torch.FloatTensor):
assert len(speech_per_agent.shape) >= 2
num_agents = speech_per_agent.shape[0]
unfolded_commns = [speech_per_agent[1:, :].view(1, -1)]
for i in range(1, num_agents - 1):
unfolded_commns.append(
torch.cat(
(
speech_per_agent[(i + 1) :,].view(1, -1),
speech_per_agent[:i,].view(1, -1),
),
dim=1,
)
)
unfolded_commns.append(speech_per_agent[:-1,].view(1, -1))
return torch.cat(unfolded_commns, dim=0)
class A3CLSTMCentralEgoGridsEmbedCNN(nn.Module):
def __init__(
self,
num_inputs_per_agent: int,
action_groups: Tuple[Tuple[str, ...], ...],
num_agents: int,
state_repr_length: int,
occupancy_embed_length: int,
):
super(A3CLSTMCentralEgoGridsEmbedCNN, self).__init__()
self.num_outputs = sum(len(x) for x in action_groups)
self.num_inputs_per_agent = num_inputs_per_agent
self.num_inputs = num_inputs_per_agent * num_agents
self.num_agents = num_agents
self.occupancy_embed_length = occupancy_embed_length
self.occupancy_embeddings = nn.Embedding(
num_inputs_per_agent, self.occupancy_embed_length
)
# input to conv is (num_agents * self.occupancy_embed_length, 15, 15)
self.cnn = nn.Sequential(
OrderedDict(
[
(
"conv1",
nn.Conv2d(
self.occupancy_embed_length * self.num_agents,
32,
3,
padding=1,
),
),
("relu1", nn.ReLU(inplace=True)),
# shape = (15, 15)
("conv2", nn.Conv2d(32, 64, 3, stride=2)),
("relu2", nn.ReLU(inplace=True)),
# shape = (7, 7)
("conv3", nn.Conv2d(64, 128, 3, stride=2)),
("relu3", nn.ReLU(inplace=True)),
# shape = (3, 3)
("conv4", nn.Conv2d(128, 256, 3, stride=2)),
("relu4", nn.ReLU(inplace=True)),
# shape = (1, 1); Stride doesn't matter above
]
)
)
# LSTM
self.lstm_in_dim = 256
self.lstm = nn.LSTM(self.lstm_in_dim, state_repr_length, batch_first=True)
# Post LSTM fully connected layers
self.after_lstm_mlp = nn.Sequential(
nn.Linear(state_repr_length, state_repr_length),
nn.ReLU(inplace=True),
nn.Linear(state_repr_length, state_repr_length),
nn.ReLU(inplace=True),
)
# Marginal Linear actor
self.marginal_actor_linear_list = nn.ModuleList(
[
nn.Linear(state_repr_length, self.num_outputs)
for _ in range(self.num_agents)
]
)
# Conditional actor
self.joint_actor_linear = nn.Linear(
state_repr_length, self.num_outputs ** self.num_agents
)
# Linear critic
self.critic_linear = nn.Linear(state_repr_length, 1)
# Setting initial weights
self.apply(weights_init)
relu_gain = nn.init.calculate_gain("relu")
self.cnn._modules["conv1"].weight.data.mul_(relu_gain)
self.cnn._modules["conv2"].weight.data.mul_(relu_gain)
self.cnn._modules["conv3"].weight.data.mul_(relu_gain)
self.cnn._modules["conv4"].weight.data.mul_(relu_gain)
for agent_id in range(self.num_agents):
self.marginal_actor_linear_list[agent_id].weight.data = norm_col_init(
self.marginal_actor_linear_list[agent_id].weight.data, 0.01
)
self.marginal_actor_linear_list[agent_id].bias.data.fill_(0)
self.train()
def forward(
self,
inputs: torch.FloatTensor,
hidden: Optional[torch.FloatTensor],
agent_rotations: Sequence[int],
):
if inputs.shape != (self.num_agents, self.num_inputs_per_agent, 15, 15):
raise Exception("input to model is not as expected, check!")
inputs = inputs.float()
# inputs.shape == (self.num_agents, self.num_inputs, 15, 15)
inputs = inputs.permute(0, 2, 3, 1).contiguous()
original_shape = inputs.shape
# original_shape == (self.num_agents, 15, 15, self.num_inputs)
scaling_factor = torch.reciprocal(
(inputs == 1).sum(dim=3).unsqueeze(3).float() + 1e-3
)
# scaling_factor.shape == (self.num_agents, 15, 15, 1)
inputs = inputs.view(-1, self.num_inputs_per_agent)
inputs = inputs.matmul(self.occupancy_embeddings.weight)
inputs = inputs.view(
original_shape[0],
original_shape[1],
original_shape[2],
self.occupancy_embed_length,
)
x = torch.mul(inputs, scaling_factor.expand_as(inputs))
# x.shape == (self.num_agents, 15, 15, self.occupancy_embed_length)
x = (
x.permute(0, 3, 1, 2)
.contiguous()
.view(
1,
original_shape[0] * self.occupancy_embed_length,
original_shape[1],
original_shape[2],
)
)
# x.shape == (1, self.num_agents * self.occupancy_embed_length, 15, 15)
x = self.cnn(x)
# x.shape == (1, 256, 1, 1)
x = x.view(1, -1)
# x.shape == (1, self.lstm_in_dim)
batch_dim = 1
# x.shape = [batch_dim, self.lstm_in_dim]
if x.shape != (1, self.lstm_in_dim):
print("x.shape: {}".format(x.shape))
raise Exception("output of model is not as expected, check!")
x, hidden = self.lstm(x.unsqueeze(1), hidden)
# x.shape = [batch_dim, 1, state_repr_length]
x = x.squeeze(1)
# x.shape = [batch_dim, state_repr_length]
final_state = x + self.after_lstm_mlp(x)
# state_talk_reply_repr.shape = [batch=1, state_repr_length]
marginal_actor_linear_output_list = [
self.marginal_actor_linear_list[agent_id](final_state).view(-1)
for agent_id in range(self.num_agents)
]
# list with each element of size [batch_dim, self.num_outputs]
# Strangely we have to unsqueeze(0) instead of unsqueeze(1) here.
# This seems to be because the LSTM is not expecting its hidden
# state to have the batch first despite the batch_first=True parameter
# making it expect its input to have the batch first.
hidden = (final_state.unsqueeze(0), hidden[1])
joint_actor_logits = self.joint_actor_linear(final_state).view(
batch_dim, *((self.num_outputs,) * self.num_agents)
)
if self.num_agents <= 2:
marginal_logits = outer_product(
marginal_actor_linear_output_list
).unsqueeze(0)
else:
marginal_logits = outer_sum(marginal_actor_linear_output_list).unsqueeze(0)
assert marginal_logits.shape == joint_actor_logits.shape
combined_logits = joint_actor_logits + marginal_logits
# One length lists as this is a central agent
return {
"logit_all": combined_logits.view(
batch_dim, self.num_outputs ** self.num_agents
),
"value_all": self.critic_linear(final_state), # shape: [batch_dim,1]
"hidden_all": hidden,
"joint_logit_all": True,
}
class A3CLSTMBigCentralEgoGridsEmbedCNN(nn.Module):
def __init__(
self,
num_inputs_per_agent: int,
action_groups: Tuple[Tuple[str, ...], ...],
num_agents: int,
state_repr_length: int,
occupancy_embed_length: int,
):
super(A3CLSTMBigCentralEgoGridsEmbedCNN, self).__init__()
self.num_outputs = sum(len(x) for x in action_groups)
self.num_inputs_per_agent = num_inputs_per_agent
self.num_inputs = num_inputs_per_agent * num_agents
self.num_agents = num_agents
self.occupancy_embed_length = occupancy_embed_length
self.occupancy_embeddings = nn.Embedding(
num_inputs_per_agent, self.occupancy_embed_length
)
# input to conv is (self.occupancy_embed_length, 15, 15)
self.cnn = nn.Sequential(
OrderedDict(
[
("conv1", nn.Conv2d(self.occupancy_embed_length, 32, 3, padding=1)),
("relu1", nn.ReLU(inplace=True)),
# shape = (15, 15)
("conv2", nn.Conv2d(32, 64, 3, stride=2)),
("relu2", nn.ReLU(inplace=True)),
# shape = (7, 7)
("conv3", nn.Conv2d(64, 128, 3, stride=2)),
("relu3", nn.ReLU(inplace=True)),
# shape = (3, 3)
("conv4", nn.Conv2d(128, 256, 3, stride=2)),
("relu4", nn.ReLU(inplace=True)),
# shape = (1, 1); Stride doesn't matter above
]
)
)
self.lstm_in_dim = 256
self.before_lstm_mlp = nn.Sequential(
nn.Linear(
self.num_agents * self.lstm_in_dim, self.num_agents * self.lstm_in_dim
),
nn.ReLU(inplace=True),
nn.Linear(self.num_agents * self.lstm_in_dim, self.lstm_in_dim),
nn.ReLU(inplace=True),
)
# LSTM
self.lstm = nn.LSTM(self.lstm_in_dim, state_repr_length, batch_first=True)
# Post LSTM fully connected layers
self.after_lstm_mlp = nn.Sequential(
nn.Linear(state_repr_length, state_repr_length),
nn.ReLU(inplace=True),
nn.Linear(state_repr_length, state_repr_length),
nn.ReLU(inplace=True),
)
# Marginal Linear actor
self.marginal_actor_linear_list = nn.ModuleList(
[
nn.Linear(state_repr_length, self.num_outputs)
for _ in range(self.num_agents)
]
)
# Conditional actor
self.joint_actor_linear = nn.Linear(
state_repr_length, self.num_outputs ** self.num_agents
)
# Linear critic
self.critic_linear = nn.Linear(state_repr_length, 1)
# Setting initial weights
self.apply(weights_init)
relu_gain = nn.init.calculate_gain("relu")
self.cnn._modules["conv1"].weight.data.mul_(relu_gain)
self.cnn._modules["conv2"].weight.data.mul_(relu_gain)
self.cnn._modules["conv3"].weight.data.mul_(relu_gain)
self.cnn._modules["conv4"].weight.data.mul_(relu_gain)
for agent_id in range(self.num_agents):
self.marginal_actor_linear_list[agent_id].weight.data = norm_col_init(
self.marginal_actor_linear_list[agent_id].weight.data, 0.01
)
self.marginal_actor_linear_list[agent_id].bias.data.fill_(0)
self.train()
def forward(
self,
inputs: torch.FloatTensor,
hidden: Optional[torch.FloatTensor],
agent_rotations: Sequence[int],
):
if inputs.shape != (self.num_agents, self.num_inputs_per_agent, 15, 15):
raise Exception("input to model is not as expected, check!")
inputs = inputs.float()
# inputs.shape == (self.num_agents, self.num_inputs, 15, 15)
inputs = inputs.permute(0, 2, 3, 1).contiguous()
original_shape = inputs.shape
# original_shape == (self.num_agents, 15, 15, self.num_inputs)
scaling_factor = torch.reciprocal(
(inputs == 1).sum(dim=3).unsqueeze(3).float() + 1e-3
)
# scaling_factor.shape == (self.num_agents, 15, 15, 1)
inputs = inputs.view(-1, self.num_inputs_per_agent)
inputs = inputs.matmul(self.occupancy_embeddings.weight)
inputs = inputs.view(
original_shape[0],
original_shape[1],
original_shape[2],
self.occupancy_embed_length,
)
x = torch.mul(inputs, scaling_factor.expand_as(inputs))
# x.shape == (self.num_agents, 15, 15, self.occupancy_embed_length)
x = x.permute(0, 3, 1, 2).contiguous()
# x.shape == (self.num_agents, self.occupancy_embed_length, 15, 15)
x = self.cnn(x)
# x.shape == (self.num_agents, 256, 1, 1)
x = x.view(1, -1)
# x.shape == (1, self.num_agents * self.lstm_in_dim)
x = self.before_lstm_mlp(x)
batch_dim = 1
# x.shape = [batch_dim, self.lstm_in_dim]
if x.shape != (1, self.lstm_in_dim):
print("x.shape: {}".format(x.shape))
raise Exception("output of model is not as expected, check!")
x, hidden = self.lstm(x.unsqueeze(1), hidden)
# x.shape = [batch_dim, 1, state_repr_length]
x = x.squeeze(1)
# x.shape = [batch_dim, state_repr_length]
final_state = x + self.after_lstm_mlp(x)
# state_talk_reply_repr.shape = [batch=1, state_repr_length]
marginal_actor_linear_output_list = [
self.marginal_actor_linear_list[agent_id](final_state).view(-1)
for agent_id in range(self.num_agents)
]
# list with each element of size [batch_dim, self.num_outputs]
# Strangely we have to unsqueeze(0) instead of unsqueeze(1) here.
# This seems to be because the LSTM is not expecting its hidden
# state to have the batch first despite the batch_first=True parameter
# making it expect its input to have the batch first.
hidden = (final_state.unsqueeze(0), hidden[1])
joint_actor_logits = self.joint_actor_linear(final_state).view(
batch_dim, *((self.num_outputs,) * self.num_agents)
)
if self.num_agents <= 2:
marginal_logits = outer_product(
marginal_actor_linear_output_list
).unsqueeze(0)
else:
marginal_logits = outer_sum(marginal_actor_linear_output_list).unsqueeze(0)
assert marginal_logits.shape == joint_actor_logits.shape
combined_logits = joint_actor_logits + marginal_logits
# One length lists as this is a central agent
return {
"logit_all": combined_logits.view(
batch_dim, self.num_outputs ** self.num_agents
),
"value_all": self.critic_linear(final_state), # shape: [batch_dim,1]
"hidden_all": hidden,
"joint_logit_all": True,
}
class A3CLSTMBigCentralBackboneAndCoordinatedActionsEgoGridsEmbedCNN(nn.Module):
def __init__(
self,
num_inputs_per_agent: int,
action_groups: Tuple[Tuple[str, ...], ...],
num_agents: int,
state_repr_length: int,
occupancy_embed_length: int,
coordinate_actions: bool,
coordinate_actions_dim: Optional[int] = None,
central_critic: bool = False,
):
super(
A3CLSTMBigCentralBackboneAndCoordinatedActionsEgoGridsEmbedCNN, self
).__init__()
# Either coordinate actions (mixture of marginals)
# Otherwise i.e. single marginals then there shouldn't be any
# coordinate_actions_dim
assert coordinate_actions or not coordinate_actions_dim
# Since the representation for both agents is the same, having
# separate actor weights for mixture and also for single is
# necessary. Explicitly captured below.
separate_actor_weights = True
# Also since this is a single central agent, there is no central critic
# feature expected in this model
assert not central_critic
self.separate_actor_weights = separate_actor_weights
self.coordinate_actions_dim = (
self.num_outputs
if coordinate_actions and coordinate_actions_dim is None
else coordinate_actions_dim
)
self.num_outputs = sum(len(x) for x in action_groups)
self.num_inputs_per_agent = num_inputs_per_agent
self.num_inputs = num_inputs_per_agent * num_agents
self.num_agents = num_agents
self.occupancy_embed_length = occupancy_embed_length
self.occupancy_embeddings = nn.Embedding(
num_inputs_per_agent, self.occupancy_embed_length
)
self.coordinate_actions = coordinate_actions
# input to conv is (self.occupancy_embed_length, 15, 15)
self.cnn = nn.Sequential(
OrderedDict(
[
("conv1", nn.Conv2d(self.occupancy_embed_length, 32, 3, padding=1)),
("relu1", nn.ReLU(inplace=True)),
# shape = (15, 15)
("conv2", nn.Conv2d(32, 64, 3, stride=2)),
("relu2", nn.ReLU(inplace=True)),
# shape = (7, 7)
("conv3", nn.Conv2d(64, 128, 3, stride=2)),
("relu3", nn.ReLU(inplace=True)),
# shape = (3, 3)
("conv4", nn.Conv2d(128, 256, 3, stride=2)),
("relu4", nn.ReLU(inplace=True)),
# shape = (1, 1); Stride doesn't matter above
]
)
)
self.lstm_in_dim = 256
self.before_lstm_mlp = nn.Sequential(
nn.Linear(2 * self.lstm_in_dim, 2 * self.lstm_in_dim),
nn.ReLU(inplace=True),
nn.Linear(2 * self.lstm_in_dim, self.lstm_in_dim),
nn.ReLU(inplace=True),
)
# LSTM
self.lstm = nn.LSTM(self.lstm_in_dim, state_repr_length, batch_first=True)
# Post LSTM fully connected layers
self.after_lstm_mlp = nn.Sequential(
nn.Linear(state_repr_length, state_repr_length),
nn.ReLU(inplace=True),
nn.Linear(state_repr_length, state_repr_length),
nn.ReLU(inplace=True),
)
if coordinate_actions:
# Randomization MLP operating on the central state representation
self.to_randomization_logits = nn.Sequential(
nn.Linear(1 * state_repr_length, 64),
nn.ReLU(inplace=True),
nn.Linear(64, 64),
nn.ReLU(inplace=True),
nn.Linear(64, self.coordinate_actions_dim),
)
# Linear actor
self.actor_linear = None
actor_linear_input_dim = state_repr_length
# self.pre_actor_list = nn.ModuleList(
# [
# nn.Sequential(
# nn.Linear(state_repr_length, actor_linear_input_dim),
# nn.ReLU(inplace=True),
# )
#
# for _ in range(2)
# ]
# )
if coordinate_actions:
self.actor_linear_list = nn.ModuleList(
[
nn.Linear(
actor_linear_input_dim,
self.num_outputs * self.coordinate_actions_dim,
)
for _ in range(2)
]
)
else:
self.actor_linear_list = nn.ModuleList(
[nn.Linear(actor_linear_input_dim, self.num_outputs) for _ in range(2)]
)
for al in self.actor_linear_list:
al.weight.data = norm_col_init(al.weight.data, 0.01)
al.bias.data.fill_(0)
# Linear critic
self.critic_linear = nn.Linear(state_repr_length, 1)
# Setting initial weights
self.apply(weights_init)
relu_gain = nn.init.calculate_gain("relu")
self.cnn._modules["conv1"].weight.data.mul_(relu_gain)
self.cnn._modules["conv2"].weight.data.mul_(relu_gain)
self.cnn._modules["conv3"].weight.data.mul_(relu_gain)
self.cnn._modules["conv4"].weight.data.mul_(relu_gain)
self.train()
def forward(
self,
inputs: torch.FloatTensor,
hidden: Optional[torch.FloatTensor],
agent_rotations: Sequence[int],
):
if inputs.shape != (self.num_agents, self.num_inputs_per_agent, 15, 15):
raise Exception("input to model is not as expected, check!")
inputs = inputs.float()
# inputs.shape == (2, self.num_inputs, 15, 15)
inputs = inputs.permute(0, 2, 3, 1).contiguous()
original_shape = inputs.shape
# original_shape == (2, 15, 15, self.num_inputs)
scaling_factor = torch.reciprocal(
(inputs == 1).sum(dim=3).unsqueeze(3).float() + 1e-3
)
# scaling_factor.shape == (2, 15, 15, 1)
inputs = inputs.view(-1, self.num_inputs_per_agent)
inputs = inputs.matmul(self.occupancy_embeddings.weight)
inputs = inputs.view(
original_shape[0],
original_shape[1],
original_shape[2],
self.occupancy_embed_length,
)
x = torch.mul(inputs, scaling_factor.expand_as(inputs))
# x.shape == (2, 15, 15, self.occupancy_embed_length)
x = x.permute(0, 3, 1, 2).contiguous()
# x.shape == (2, self.occupancy_embed_length, 15, 15)
x = self.cnn(x)
# x.shape == (2, 256, 1, 1)
x = x.view(1, -1)
# x.shape == (1, 2 * self.lstm_in_dim = 512)
x = self.before_lstm_mlp(x)
batch_dim = 1
# x.shape = [batch_dim, self.lstm_in_dim]
if x.shape != (1, self.lstm_in_dim):
print("x.shape: {}".format(x.shape))
raise Exception("output of model is not as expected, check!")
x, hidden = self.lstm(x.unsqueeze(1), hidden)
# x.shape = [batch_dim, 1, state_repr_length]
x = x.squeeze(1)
# x.shape = [batch_dim, state_repr_length]
final_state = x + self.after_lstm_mlp(x)
# state_talk_reply_repr.shape = [batch=1, state_repr_length]
# Strangely we have to unsqueeze(0) instead of unsqueeze(1) here.
# This seems to be because the LSTM is not expecting its hidden
# state to have the batch first despite the batch_first=True parameter
# making it expect its input to have the batch first.
hidden = (final_state.unsqueeze(0), hidden[1])
# One central backbone agent, so no option for central critic, check asserts
value_all = self.critic_linear(final_state)
# Same value for both agents
to_return = {
"value_all": torch.cat([value_all, value_all], dim=0),
"hidden_all": hidden,
}
logits = torch.cat(
[
linear(
# self.pre_actor_list[i](final_state)
final_state
)
for i, linear in enumerate(self.actor_linear_list)
],
dim=0,
)
if self.coordinate_actions:
# Mixture marginals, actor output = num_outputs * coordinate_action_dim
to_return["randomization_logits"] = self.to_randomization_logits(
final_state
)
to_return["coordinated_logits"] = logits
else:
# Single marginal, actor output = num_outputs
to_return["logit_all"] = logits
return to_return
class A3CLSTMCentralEgoVision(nn.Module):
def __init__(
self,
num_inputs_per_agent: int,
action_groups: Tuple[Tuple[str, ...], ...],
num_agents: int,
state_repr_length: int,
):
super(A3CLSTMCentralEgoVision, self).__init__()
self.num_outputs = sum(len(x) for x in action_groups)
self.num_inputs_per_agent = num_inputs_per_agent
self.num_inputs = num_inputs_per_agent * num_agents
self.num_agents = num_agents
# input to conv is (num_agents * self.num_inputs_per_agent, 84, 84)
self.cnn = nn.Sequential(
OrderedDict(
[
(
"conv1",
nn.Conv2d(
self.num_inputs_per_agent * self.num_agents,
16,
5,
stride=1,
padding=2,
),
),
("maxpool1", nn.MaxPool2d(2, 2)),
("relu1", nn.ReLU(inplace=True)),
# shape =
("conv2", nn.Conv2d(16, 16, 5, stride=1, padding=1)),
("maxpool2", nn.MaxPool2d(2, 2)),
("relu2", nn.ReLU(inplace=True)),
# shape =
("conv3", nn.Conv2d(16, 32, 4, stride=1, padding=1)),
("maxpool3", nn.MaxPool2d(2, 2)),
("relu3", nn.ReLU(inplace=True)),
# shape =
("conv4", nn.Conv2d(32, 64, 3, stride=1, padding=1)),
("maxpool4", nn.MaxPool2d(2, 2)),
("relu4", nn.ReLU(inplace=True)),
# shape = (4, 4)
("conv5", nn.Conv2d(64, 128, 3, stride=1, padding=1)),
("maxpool5", nn.MaxPool2d(2, 2)),
("relu5", nn.ReLU(inplace=True)),
# shape = (2, 2)
]
)
)
# LSTM
self.lstm_in_dim = 512
self.lstm = nn.LSTM(self.lstm_in_dim, state_repr_length, batch_first=True)
# Post LSTM fully connected layers
self.after_lstm_mlp = nn.Sequential(
nn.Linear(state_repr_length, state_repr_length),
nn.ReLU(inplace=True),
nn.Linear(state_repr_length, state_repr_length),
nn.ReLU(inplace=True),
)
# Marginal Linear actor
self.marginal_actor_linear_list = nn.ModuleList(
[
nn.Linear(state_repr_length, self.num_outputs)
for _ in range(self.num_agents)
]
)
# Conditional actor
self.joint_actor_linear = nn.Linear(
state_repr_length, self.num_outputs * self.num_outputs
)
# Linear critic
self.critic_linear = nn.Linear(state_repr_length, 1)
# Setting initial weights
self.apply(weights_init)
relu_gain = nn.init.calculate_gain("relu")
self.cnn._modules["conv1"].weight.data.mul_(relu_gain)
self.cnn._modules["conv2"].weight.data.mul_(relu_gain)
self.cnn._modules["conv3"].weight.data.mul_(relu_gain)
self.cnn._modules["conv4"].weight.data.mul_(relu_gain)
for agent_id in range(self.num_agents):
self.marginal_actor_linear_list[agent_id].weight.data = norm_col_init(
self.marginal_actor_linear_list[agent_id].weight.data, 0.01
)
self.marginal_actor_linear_list[agent_id].bias.data.fill_(0)
self.train()
def forward(
self,
inputs: torch.FloatTensor,
hidden: Optional[torch.FloatTensor],
agent_rotations: Sequence[int],
):
if inputs.shape != (self.num_agents, self.num_inputs_per_agent, 84, 84):
raise Exception("input to model is not as expected, check!")
inputs = inputs.view(
self.num_agents * self.num_inputs_per_agent, 84, 84
).unsqueeze(0)
# inputs.shape == (1, self.num_agents * self.num_inputs, 84, 84)
x = self.cnn(inputs)
# x.shape == (1, 128, 4, 4)
x = x.view(1, -1)
# x.shape == (batch_dim, self.lstm_in_dim)
batch_dim = 1
# x.shape = [batch_dim, self.lstm_in_dim]
if x.shape != (1, self.lstm_in_dim):
print("x.shape: {}".format(x.shape))
raise Exception("output of model is not as expected, check!")
x, hidden = self.lstm(x.unsqueeze(1), hidden)
# x.shape = [batch_dim, 1, state_repr_length]
x = x.squeeze(1)
# x.shape = [batch_dim, state_repr_length]
final_state = x + self.after_lstm_mlp(x)
# state_talk_reply_repr.shape = [batch=1, state_repr_length]
marginal_actor_linear_output_list = [
self.marginal_actor_linear_list[agent_id](final_state)
for agent_id in range(self.num_agents)
]
# list with each element of size [batch_dim, self.num_outputs]
# Strangely we have to unsqueeze(0) instead of unsqueeze(1) here.
# This seems to be because the LSTM is not expecting its hidden
# state to have the batch first despite the batch_first=True parameter
# making it expect its input to have the batch first.
hidden = (final_state.unsqueeze(0), hidden[1])
joint_actor_logits = self.joint_actor_linear(final_state).view(
batch_dim, self.num_outputs, self.num_outputs
)
outer_product_marginal_logits = marginal_actor_linear_output_list[0].unsqueeze(
2
) * marginal_actor_linear_output_list[1].unsqueeze(1)
assert outer_product_marginal_logits.shape == (
batch_dim,
self.num_outputs,
self.num_outputs,
)
combined_logits = joint_actor_logits + outer_product_marginal_logits
# One length lists as this is a central agent
return {
"logit_all": combined_logits.view(
batch_dim, self.num_outputs * self.num_outputs
),
"value_all": self.critic_linear(final_state), # shape: [batch_dim,1]
"hidden_all": hidden,
"joint_logit_all": True,
}
class A3CLSTMBigCentralEgoVision(nn.Module):
def __init__(
self,
num_inputs_per_agent: int,
action_groups: Tuple[Tuple[str, ...], ...],
num_agents: int,
state_repr_length: int,
):
super(A3CLSTMBigCentralEgoVision, self).__init__()
self.num_outputs = sum(len(x) for x in action_groups)
self.num_inputs_per_agent = num_inputs_per_agent
self.num_inputs = num_inputs_per_agent * num_agents
self.num_agents = num_agents
# input to conv is (self.num_inputs_per_agent, 84, 84)
self.cnn = nn.Sequential(
OrderedDict(
[
(
"conv1",
nn.Conv2d(
self.num_inputs_per_agent, 16, 5, stride=1, padding=2
),
),
("maxpool1", nn.MaxPool2d(2, 2)),
("relu1", nn.ReLU(inplace=True)),
# shape =
("conv2", nn.Conv2d(16, 16, 5, stride=1, padding=1)),
("maxpool2", nn.MaxPool2d(2, 2)),
("relu2", nn.ReLU(inplace=True)),
# shape =
("conv3", nn.Conv2d(16, 32, 4, stride=1, padding=1)),
("maxpool3", nn.MaxPool2d(2, 2)),
("relu3", nn.ReLU(inplace=True)),
# shape =
("conv4", nn.Conv2d(32, 64, 3, stride=1, padding=1)),
("maxpool4", nn.MaxPool2d(2, 2)),
("relu4", nn.ReLU(inplace=True)),
# shape = (4, 4)
("conv5", nn.Conv2d(64, 128, 3, stride=1, padding=1)),
("maxpool5", nn.MaxPool2d(2, 2)),
("relu5", nn.ReLU(inplace=True)),
# shape = (2, 2)
]
)
)
# LSTM
self.lstm_in_dim = 512
self.before_lstm_mlp = nn.Sequential(
nn.Linear(self.lstm_in_dim * self.num_agents, self.lstm_in_dim),
nn.ReLU(inplace=True),
)
self.lstm = nn.LSTM(self.lstm_in_dim, state_repr_length, batch_first=True)
# Post LSTM fully connected layers
self.after_lstm_mlp = nn.Sequential(
nn.Linear(state_repr_length, state_repr_length),
nn.ReLU(inplace=True),
nn.Linear(state_repr_length, state_repr_length),
nn.ReLU(inplace=True),
)
# Marginal Linear actor
self.marginal_actor_linear_list = nn.ModuleList(
[
nn.Linear(state_repr_length, self.num_outputs)
for _ in range(self.num_agents)
]
)
# Conditional actor
self.joint_actor_linear = nn.Linear(
state_repr_length, self.num_outputs ** self.num_agents
)
# Linear critic
self.critic_linear = nn.Linear(state_repr_length, 1)
# Setting initial weights
self.apply(weights_init)
relu_gain = nn.init.calculate_gain("relu")
self.cnn._modules["conv1"].weight.data.mul_(relu_gain)
self.cnn._modules["conv2"].weight.data.mul_(relu_gain)
self.cnn._modules["conv3"].weight.data.mul_(relu_gain)
self.cnn._modules["conv4"].weight.data.mul_(relu_gain)
for agent_id in range(self.num_agents):
self.marginal_actor_linear_list[agent_id].weight.data = norm_col_init(
self.marginal_actor_linear_list[agent_id].weight.data, 0.01
)
self.marginal_actor_linear_list[agent_id].bias.data.fill_(0)
self.train()
def forward(
self,
inputs: torch.FloatTensor,
hidden: Optional[torch.FloatTensor],
agent_rotations: Sequence[int],
):
if inputs.shape != (self.num_agents, self.num_inputs_per_agent, 84, 84):
raise Exception("input to model is not as expected, check!")
# inputs.shape == (1, self.num_agents * self.num_inputs, 84, 84)
x = self.cnn(inputs)
# x.shape == (self.num_agents, 128, 4, 4)
x = x.view(1, -1)
# x.shape == (batch_dim, self.lstm_in_dim * self.num_agents)
x = self.before_lstm_mlp(x)
# x.shape == (batch_dim, self.lstm_in_dim)
batch_dim = 1
if x.shape != (1, self.lstm_in_dim):
print("x.shape: {}".format(x.shape))
raise Exception("output of model is not as expected, check!")
x, hidden = self.lstm(x.unsqueeze(1), hidden)
# x.shape = [batch_dim, 1, state_repr_length]
x = x.squeeze(1)
# x.shape = [batch_dim, state_repr_length]
final_state = x + self.after_lstm_mlp(x)
# state_talk_reply_repr.shape = [batch=1, state_repr_length]
marginal_actor_linear_output_list = [
self.marginal_actor_linear_list[agent_id](final_state).view(-1)
for agent_id in range(self.num_agents)
]
# list with each element of size [batch_dim, self.num_outputs]
# Strangely we have to unsqueeze(0) instead of unsqueeze(1) here.
# This seems to be because the LSTM is not expecting its hidden
# state to have the batch first despite the batch_first=True parameter
# making it expect its input to have the batch first.
hidden = (final_state.unsqueeze(0), hidden[1])
joint_actor_logits = self.joint_actor_linear(final_state).view(
batch_dim, *((self.num_outputs,) * self.num_agents)
)
if self.num_agents <= 2:
marginal_logits = outer_product(
marginal_actor_linear_output_list
).unsqueeze(0)
else:
marginal_logits = outer_sum(marginal_actor_linear_output_list).unsqueeze(0)
assert marginal_logits.shape == joint_actor_logits.shape
assert marginal_logits.shape == (
batch_dim,
*((self.num_outputs,) * self.num_agents),
)
combined_logits = joint_actor_logits + marginal_logits
# One length lists as this is a central agent
return {
"logit_all": combined_logits.view(
batch_dim, self.num_outputs ** self.num_agents
),
"value_all": self.critic_linear(final_state), # shape: [batch_dim,1]
"hidden_all": hidden,
"joint_logit_all": True,
}
class OldTaskA3CLSTMBigCentralEgoVision(nn.Module):
def __init__(
self,
num_inputs_per_agent: int,
action_groups: Tuple[Tuple[str, ...], ...],
num_agents: int,
state_repr_length: int,
):
super(OldTaskA3CLSTMBigCentralEgoVision, self).__init__()
self.num_outputs = sum(len(x) for x in action_groups)
self.num_inputs_per_agent = num_inputs_per_agent
self.num_inputs = num_inputs_per_agent * num_agents
self.num_agents = num_agents
final_cnn_channels = 19
# input to conv is (self.num_inputs_per_agent, 84, 84)
self.cnn = nn.Sequential(
OrderedDict(
[
(
"conv1",
nn.Conv2d(
self.num_inputs_per_agent, 32, 5, stride=1, padding=2
),
),
("maxpool1", nn.MaxPool2d(2, 2)),
("relu1", nn.ReLU(inplace=True)),
# shape =
("conv2", nn.Conv2d(32, 32, 5, stride=1, padding=1)),
("maxpool2", nn.MaxPool2d(2, 2)),
("relu2", nn.ReLU(inplace=True)),
# shape =
("conv3", nn.Conv2d(32, 64, 4, stride=1, padding=1)),
("maxpool3", nn.MaxPool2d(2, 2)),
("relu3", nn.ReLU(inplace=True)),
# shape =
(
"conv4",
nn.Conv2d(64, final_cnn_channels, 3, stride=1, padding=1),
),
("maxpool4", nn.MaxPool2d(2, 2)),
("relu4", nn.ReLU(inplace=True)),
# shape = (4, 4)
]
)
)
# CNN output:
self.cnn_output_dim = final_cnn_channels * 4 * 4
# LSTM
self.lstm_in_dim = 64 * 4 * 4
self.before_lstm_mlp = nn.Sequential(
nn.Linear(self.cnn_output_dim * 2, self.lstm_in_dim), nn.ReLU(inplace=True)
)
self.lstm = nn.LSTM(self.lstm_in_dim, state_repr_length, batch_first=True)
# Post LSTM fully connected layers
self.after_lstm_mlp = nn.Sequential(
nn.Linear(state_repr_length, state_repr_length),
nn.ReLU(inplace=True),
nn.Linear(state_repr_length, state_repr_length),
nn.ReLU(inplace=True),
)
# Marginal Linear actor
self.marginal_actor_linear_list = nn.ModuleList(
[
nn.Linear(state_repr_length, self.num_outputs)
for _ in range(self.num_agents)
]
)
# Conditional actor
self.joint_actor_linear = nn.Linear(
state_repr_length, self.num_outputs * self.num_outputs
)
# Linear critic
self.critic_linear = nn.Linear(state_repr_length, 1)
# Setting initial weights
self.apply(weights_init)
relu_gain = nn.init.calculate_gain("relu")
self.cnn._modules["conv1"].weight.data.mul_(relu_gain)
self.cnn._modules["conv2"].weight.data.mul_(relu_gain)
self.cnn._modules["conv3"].weight.data.mul_(relu_gain)
self.cnn._modules["conv4"].weight.data.mul_(relu_gain)
for agent_id in range(self.num_agents):
self.marginal_actor_linear_list[agent_id].weight.data = norm_col_init(
self.marginal_actor_linear_list[agent_id].weight.data, 0.01
)
self.marginal_actor_linear_list[agent_id].bias.data.fill_(0)
self.train()
def forward(
self,
inputs: torch.FloatTensor,
hidden: Optional[torch.FloatTensor],
agent_rotations: Sequence[int],
):
if inputs.shape != (self.num_agents, self.num_inputs_per_agent, 84, 84):
raise Exception("input to model is not as expected, check!")
# inputs.shape == (1, self.num_agents * self.num_inputs, 84, 84)
x = self.cnn(inputs)
# x.shape == (self.num_agents, 128, 4, 4)
x = x.view(1, -1)
# x.shape == (batch_dim, self.lstm_in_dim * 2)
x = self.before_lstm_mlp(x)
# x.shape == (batch_dim, self.lstm_in_dim)
batch_dim = 1
if x.shape != (1, self.lstm_in_dim):
print("x.shape: {}".format(x.shape))
raise Exception("output of model is not as expected, check!")
x, hidden = self.lstm(x.unsqueeze(1), hidden)
# x.shape = [batch_dim, 1, state_repr_length]
x = x.squeeze(1)
# x.shape = [batch_dim, state_repr_length]
final_state = x + self.after_lstm_mlp(x)
# state_talk_reply_repr.shape = [batch=1, state_repr_length]
marginal_actor_linear_output_list = [
self.marginal_actor_linear_list[agent_id](final_state)
for agent_id in range(self.num_agents)
]
# list with each element of size [batch_dim, self.num_outputs]
# Strangely we have to unsqueeze(0) instead of unsqueeze(1) here.
# This seems to be because the LSTM is not expecting its hidden
# state to have the batch first despite the batch_first=True parameter
# making it expect its input to have the batch first.
hidden = (final_state.unsqueeze(0), hidden[1])
joint_actor_logits = self.joint_actor_linear(final_state).view(
batch_dim, self.num_outputs, self.num_outputs
)
outer_product_marginal_logits = marginal_actor_linear_output_list[0].unsqueeze(
2
) * marginal_actor_linear_output_list[1].unsqueeze(1)
assert outer_product_marginal_logits.shape == (
batch_dim,
self.num_outputs,
self.num_outputs,
)
combined_logits = joint_actor_logits + outer_product_marginal_logits
# One length lists as this is a central agent
return {
"logit_all": combined_logits.view(
batch_dim, self.num_outputs * self.num_outputs
),
"value_all": self.critic_linear(final_state), # shape: [batch_dim,1]
"hidden_all": hidden,
"joint_logit_all": True,
}
class A3CLSTMNStepComCoordinatedActionsEgoVision(nn.Module):
def __init__(
self,
num_inputs_per_agent: int,
action_groups: Tuple[Tuple[str, ...], ...],
num_agents: int,
state_repr_length: int,
talk_embed_length: int,
agent_num_embed_length: int,
reply_embed_length: int,
num_talk_symbols: int,
num_reply_symbols: int,
turn_off_communication: bool,
coordinate_actions: bool,
coordinate_actions_dim: Optional[int] = None,
central_critic: bool = False,
separate_actor_weights: bool = False,
final_cnn_channels: int = 128,
):
super(A3CLSTMNStepComCoordinatedActionsEgoVision, self).__init__()
self.num_outputs = sum(len(x) for x in action_groups)
self.turn_off_communication = turn_off_communication
self.central_critic = central_critic
self.num_inputs_per_agent = num_inputs_per_agent
self.num_agents = num_agents
self.num_talk_symbols = num_talk_symbols
self.num_reply_symbols = num_reply_symbols
self.separate_actor_weights = separate_actor_weights
self.coordinate_actions_dim = (
self.num_outputs
if coordinate_actions_dim is None
else coordinate_actions_dim
)
self.coordinate_actions = coordinate_actions
# input to conv is (num_agents, self.num_inputs_per_agent, 84, 84)
self.cnn = nn.Sequential(
OrderedDict(
[
(
"conv1",
nn.Conv2d(
self.num_inputs_per_agent, 16, 5, stride=1, padding=2
),
),
("maxpool1", nn.MaxPool2d(2, 2)),
("relu1", nn.ReLU(inplace=True)),
# shape =
("conv2", nn.Conv2d(16, 16, 5, stride=1, padding=1)),
("maxpool2", nn.MaxPool2d(2, 2)),
("relu2", nn.ReLU(inplace=True)),
# shape =
("conv3", nn.Conv2d(16, 32, 4, stride=1, padding=1)),
("maxpool3", nn.MaxPool2d(2, 2)),
("relu3", nn.ReLU(inplace=True)),
# shape =
("conv4", nn.Conv2d(32, 64, 3, stride=1, padding=1)),
("maxpool4", nn.MaxPool2d(2, 2)),
("relu4", nn.ReLU(inplace=True)),
# shape = (4, 4)
(
"conv5",
nn.Conv2d(64, final_cnn_channels, 3, stride=1, padding=1),
),
("maxpool5", nn.MaxPool2d(2, 2)),
("relu5", nn.ReLU(inplace=True)),
# shape = (2, 2)
]
)
)
# Vocab embed
self.talk_embeddings = nn.Embedding(num_talk_symbols, talk_embed_length)
self.reply_embeddings = nn.Embedding(num_reply_symbols, reply_embed_length)
self.talk_symbol_classifier = nn.Linear(state_repr_length, num_talk_symbols)
self.reply_symbol_classifier = nn.Linear(state_repr_length, num_reply_symbols)
# Agent embed
self.agent_num_embeddings = nn.Parameter(
torch.rand(self.num_agents, agent_num_embed_length)
)
# LSTM
self.lstm = nn.LSTM(
final_cnn_channels * 4 + agent_num_embed_length,
state_repr_length,
batch_first=True,
)
# Belief update MLP
state_and_talk_rep_len = state_repr_length + talk_embed_length * (
num_agents - 1
)
self.after_talk_mlp = nn.Sequential(
nn.Linear(state_and_talk_rep_len, state_and_talk_rep_len),
nn.ReLU(inplace=True),
nn.Linear(state_and_talk_rep_len, state_repr_length),
nn.ReLU(inplace=True),
)
state_and_reply_rep_len = state_repr_length + reply_embed_length * (
num_agents - 1
)
self.after_reply_mlp = nn.Sequential(
nn.Linear(state_and_reply_rep_len, state_and_reply_rep_len),
nn.ReLU(inplace=True),
nn.Linear(state_and_reply_rep_len, state_repr_length),
nn.ReLU(inplace=True),
)
if coordinate_actions:
# Randomization MLP
self.to_randomization_logits = nn.Sequential(
nn.Linear(self.num_agents * reply_embed_length, 64),
nn.ReLU(inplace=True),
nn.Linear(64, 64),
nn.ReLU(inplace=True),
nn.Linear(64, self.coordinate_actions_dim),
)
# self.marginal_linear_actor = nn.Linear(state_repr_length, self.num_outputs)
# self.marginal_linear_actor.weight.data = norm_col_init(
# self.marginal_linear_actor.weight.data, 0.01
# )
# self.marginal_linear_actor.bias.data.fill_(0)
# Linear actor
self.actor_linear = None
if coordinate_actions:
if separate_actor_weights:
self.actor_linear_list = nn.ModuleList(
[
nn.Linear(
state_repr_length,
self.num_outputs * self.coordinate_actions_dim,
)
for _ in range(2)
]
)
else:
self.actor_linear = nn.Linear(
state_repr_length, self.num_outputs * self.coordinate_actions_dim
)
else:
assert not separate_actor_weights
self.actor_linear = nn.Linear(state_repr_length, self.num_outputs)
if self.actor_linear is not None:
self.actor_linear.weight.data = norm_col_init(
self.actor_linear.weight.data, 0.01
)
self.actor_linear.bias.data.fill_(0)
else:
for al in self.actor_linear_list:
al.weight.data = norm_col_init(al.weight.data, 0.01)
al.bias.data.fill_(0)
# Linear critic
if self.central_critic:
self.critic_linear = nn.Linear(state_repr_length * self.num_agents, 1)
else:
self.critic_linear = nn.Linear(state_repr_length, 1)
# Setting initial weights
self.apply(weights_init)
relu_gain = nn.init.calculate_gain("relu")
self.cnn._modules["conv1"].weight.data.mul_(relu_gain)
self.cnn._modules["conv2"].weight.data.mul_(relu_gain)
self.cnn._modules["conv3"].weight.data.mul_(relu_gain)
self.cnn._modules["conv4"].weight.data.mul_(relu_gain)
self.cnn._modules["conv5"].weight.data.mul_(relu_gain)
self.talk_symbol_classifier.weight.data = norm_col_init(
self.talk_symbol_classifier.weight.data, 0.01
)
self.talk_symbol_classifier.bias.data.fill_(0)
self.reply_symbol_classifier.weight.data = norm_col_init(
self.reply_symbol_classifier.weight.data, 0.01
)
self.reply_symbol_classifier.bias.data.fill_(0)
self.train()
def forward(
self,
inputs: torch.FloatTensor,
hidden: Optional[torch.FloatTensor],
agent_rotations: Sequence[int],
):
if inputs.shape != (self.num_agents, self.num_inputs_per_agent, 84, 84):
raise Exception("input to model is not as expected, check!")
x = self.cnn(inputs)
# x.shape == (2, 128, 2, 2)
x = x.view(x.size(0), -1)
# x.shape = [num_agents, 512]
x = torch.cat((x, self.agent_num_embeddings), dim=1)
# x.shape = [num_agents, 512 + agent_num_embed_length]
x, hidden = self.lstm(x.unsqueeze(1), hidden)
# x.shape = [num_agents, 1, state_repr_length]
# hidden[0].shape == [1, num_agents, state_repr_length]
# hidden[1].shape == [1, num_agents, state_repr_length]
x = x.squeeze(1)
# x.shape = [num_agents, state_repr_length]
talk_logits = self.talk_symbol_classifier(x)
talk_probs = F.softmax(talk_logits, dim=1)
# talk_probs.shape = [num_agents, num_talk_symbols]
talk_outputs = torch.mm(talk_probs, self.talk_embeddings.weight)
# talk_outputs.shape = [num_agents, talk_embed_length]
if not self.turn_off_communication:
talk_heard_per_agent = _unfold_communications(talk_outputs)
else:
talk_heard_per_agent = torch.cat(
[talk_outputs] * (self.num_agents - 1), dim=1
)
state_talk_repr = x + self.after_talk_mlp(
torch.cat((x, talk_heard_per_agent), dim=1)
)
# feature_talk_repr.shape = [num_agents, state_repr_length]
reply_logits = self.reply_symbol_classifier(state_talk_repr)
reply_probs = F.softmax(reply_logits, dim=1)
# reply_probs.shape = [num_agents, num_reply_symbols]
reply_outputs = torch.mm(reply_probs, self.reply_embeddings.weight)
# reply_outputs.shape = [num_agents, reply_embed_length]
if not self.turn_off_communication:
reply_heard_per_agent = _unfold_communications(reply_outputs)
else:
reply_heard_per_agent = torch.cat(
[reply_outputs] * (self.num_agents - 1), dim=1
)
state_talk_reply_repr = state_talk_repr + self.after_reply_mlp(
torch.cat((state_talk_repr, reply_heard_per_agent), dim=1)
)
# state_talk_reply_repr.shape = [num_agents, state_repr_length]
# Strangely we have to unsqueeze(0) instead of unsqueeze(1) here.
# This seems to be because the LSTM is not expecting its hidden
# state to have the batch first despite the batch_first=True parameter
# making it expect its input to have the batch first.
hidden = (state_talk_reply_repr.unsqueeze(0), hidden[1])
if self.central_critic:
value_all = self.critic_linear(
torch.cat(
[
state_talk_reply_repr,
_unfold_communications(state_talk_reply_repr),
],
dim=1,
)
)
else:
value_all = self.critic_linear(state_talk_reply_repr)
to_return = {
"value_all": value_all,
"hidden_all": hidden,
"talk_probs": talk_probs,
"reply_probs": reply_probs,
}
if self.coordinate_actions:
if self.num_agents != 2:
to_return["randomization_logits"] = self.to_randomization_logits(
reply_outputs.view(1, -1)
)
else:
to_return["randomization_logits"] = self.to_randomization_logits(
reply_heard_per_agent.view(1, -1)
)
if not self.separate_actor_weights:
logits = self.actor_linear(state_talk_reply_repr)
else:
logits = torch.cat(
[
linear(state_talk_reply_repr[i].unsqueeze(0))
for i, linear in enumerate(self.actor_linear_list)
],
dim=0,
)
# logits = self.actor_linear(
# state_talk_reply_repr
# ) + self.marginal_linear_actor(state_talk_reply_repr).unsqueeze(1).repeat(
# 1, self.coordinate_actions_dim, 1
# ).view(
# self.num_agents, self.num_outputs ** 2
# )
to_return["coordinated_logits"] = logits
else:
to_return["logit_all"] = self.actor_linear(state_talk_reply_repr)
return to_return
class BigA3CLSTMNStepComCoordinatedActionsEgoVision(nn.Module):
def __init__(
self,
num_inputs_per_agent: int,
action_groups: Tuple[Tuple[str, ...], ...],
num_agents: int,
state_repr_length: int,
talk_embed_length: int,
agent_num_embed_length: int,
reply_embed_length: int,
num_talk_symbols: int,
num_reply_symbols: int,
turn_off_communication: bool,
coordinate_actions: bool,
coordinate_actions_dim: Optional[int] = None,
central_critic: bool = False,
separate_actor_weights: bool = False,
final_cnn_channels: int = 64,
):
super(BigA3CLSTMNStepComCoordinatedActionsEgoVision, self).__init__()
self.num_outputs = sum(len(x) for x in action_groups)
self.turn_off_communication = turn_off_communication
self.central_critic = central_critic
self.num_inputs_per_agent = num_inputs_per_agent
self.num_agents = num_agents
self.num_talk_symbols = num_talk_symbols
self.num_reply_symbols = num_reply_symbols
self.separate_actor_weights = separate_actor_weights
self.coordinate_actions_dim = (
self.num_outputs
if coordinate_actions_dim is None
else coordinate_actions_dim
)
self.coordinate_actions = coordinate_actions
# input to conv is (num_agents, self.num_inputs_per_agent, 84, 84)
self.cnn = nn.Sequential(
OrderedDict(
[
(
"conv1",
nn.Conv2d(
self.num_inputs_per_agent, 32, 5, stride=1, padding=2
),
),
("maxpool1", nn.MaxPool2d(2, 2)),
("relu1", nn.ReLU(inplace=True)),
# shape =
("conv2", nn.Conv2d(32, 32, 5, stride=1, padding=1)),
("maxpool2", nn.MaxPool2d(2, 2)),
("relu2", nn.ReLU(inplace=True)),
# shape =
("conv3", nn.Conv2d(32, 64, 4, stride=1, padding=1)),
("maxpool3", nn.MaxPool2d(2, 2)),
("relu3", nn.ReLU(inplace=True)),
# shape =
(
"conv4",
nn.Conv2d(64, final_cnn_channels, 3, stride=1, padding=1),
),
("maxpool4", nn.MaxPool2d(2, 2)),
("relu4", nn.ReLU(inplace=True)),
# shape = (4, 4)
]
)
)
# Vocab embed
self.talk_embeddings = nn.Embedding(num_talk_symbols, talk_embed_length)
self.reply_embeddings = nn.Embedding(num_reply_symbols, reply_embed_length)
self.talk_symbol_classifier = nn.Linear(state_repr_length, num_talk_symbols)
self.reply_symbol_classifier = nn.Linear(state_repr_length, num_reply_symbols)
# Agent embed
self.agent_num_embeddings = nn.Parameter(
torch.rand(self.num_agents, agent_num_embed_length)
)
# LSTM
self.lstm = nn.LSTM(
final_cnn_channels * 4 * 4 + agent_num_embed_length,
state_repr_length,
batch_first=True,
)
# Belief update MLP
state_and_talk_rep_len = state_repr_length + talk_embed_length * (
num_agents - 1
)
self.after_talk_mlp = nn.Sequential(
nn.Linear(state_and_talk_rep_len, state_and_talk_rep_len),
nn.ReLU(inplace=True),
nn.Linear(state_and_talk_rep_len, state_repr_length),
nn.ReLU(inplace=True),
)
state_and_reply_rep_len = state_repr_length + reply_embed_length * (
num_agents - 1
)
self.after_reply_mlp = nn.Sequential(
nn.Linear(state_and_reply_rep_len, state_and_reply_rep_len),
nn.ReLU(inplace=True),
nn.Linear(state_and_reply_rep_len, state_repr_length),
nn.ReLU(inplace=True),
)
if coordinate_actions:
# Randomization MLP
self.to_randomization_logits = nn.Sequential(
nn.Linear(self.num_agents * reply_embed_length, 64),
nn.ReLU(inplace=True),
nn.Linear(64, 64),
nn.ReLU(inplace=True),
nn.Linear(64, self.coordinate_actions_dim),
)
# self.marginal_linear_actor = nn.Linear(state_repr_length, self.num_outputs)
# self.marginal_linear_actor.weight.data = norm_col_init(
# self.marginal_linear_actor.weight.data, 0.01
# )
# self.marginal_linear_actor.bias.data.fill_(0)
# Linear actor
self.actor_linear = None
if coordinate_actions:
if separate_actor_weights:
self.actor_linear_list = nn.ModuleList(
[
nn.Linear(
state_repr_length,
self.num_outputs * self.coordinate_actions_dim,
)
for _ in range(2)
]
)
else:
self.actor_linear = nn.Linear(
state_repr_length, self.num_outputs * self.coordinate_actions_dim
)
else:
assert not separate_actor_weights
self.actor_linear = nn.Linear(state_repr_length, self.num_outputs)
if self.actor_linear is not None:
self.actor_linear.weight.data = norm_col_init(
self.actor_linear.weight.data, 0.01
)
self.actor_linear.bias.data.fill_(0)
else:
for al in self.actor_linear_list:
al.weight.data = norm_col_init(al.weight.data, 0.01)
al.bias.data.fill_(0)
# Linear critic
if self.central_critic:
self.critic_linear = nn.Linear(state_repr_length * self.num_agents, 1)
else:
self.critic_linear = nn.Linear(state_repr_length, 1)
# Setting initial weights
self.apply(weights_init)
relu_gain = nn.init.calculate_gain("relu")
self.cnn._modules["conv1"].weight.data.mul_(relu_gain)
self.cnn._modules["conv2"].weight.data.mul_(relu_gain)
self.cnn._modules["conv3"].weight.data.mul_(relu_gain)
self.cnn._modules["conv4"].weight.data.mul_(relu_gain)
self.talk_symbol_classifier.weight.data = norm_col_init(
self.talk_symbol_classifier.weight.data, 0.01
)
self.talk_symbol_classifier.bias.data.fill_(0)
self.reply_symbol_classifier.weight.data = norm_col_init(
self.reply_symbol_classifier.weight.data, 0.01
)
self.reply_symbol_classifier.bias.data.fill_(0)
self.train()
def forward(
self,
inputs: torch.FloatTensor,
hidden: Optional[torch.FloatTensor],
agent_rotations: Sequence[int],
):
if inputs.shape != (self.num_agents, self.num_inputs_per_agent, 84, 84):
raise Exception("input to model is not as expected, check!")
x = self.cnn(inputs)
# x.shape == (2, 128, 2, 2)
x = x.view(x.size(0), -1)
# x.shape = [num_agents, 512]
x = torch.cat((x, self.agent_num_embeddings), dim=1)
# x.shape = [num_agents, 512 + agent_num_embed_length]
x, hidden = self.lstm(x.unsqueeze(1), hidden)
# x.shape = [num_agents, 1, state_repr_length]
# hidden[0].shape == [1, num_agents, state_repr_length]
# hidden[1].shape == [1, num_agents, state_repr_length]
x = x.squeeze(1)
# x.shape = [num_agents, state_repr_length]
talk_logits = self.talk_symbol_classifier(x)
talk_probs = F.softmax(talk_logits, dim=1)
# talk_probs.shape = [num_agents, num_talk_symbols]
talk_outputs = torch.mm(talk_probs, self.talk_embeddings.weight)
# talk_outputs.shape = [num_agents, talk_embed_length]
if not self.turn_off_communication:
talk_heard_per_agent = _unfold_communications(talk_outputs)
else:
talk_heard_per_agent = torch.cat(
[talk_outputs] * (self.num_agents - 1), dim=1
)
state_talk_repr = x + self.after_talk_mlp(
torch.cat((x, talk_heard_per_agent), dim=1)
)
# feature_talk_repr.shape = [num_agents, state_repr_length]
reply_logits = self.reply_symbol_classifier(state_talk_repr)
reply_probs = F.softmax(reply_logits, dim=1)
# reply_probs.shape = [num_agents, num_reply_symbols]
reply_outputs = torch.mm(reply_probs, self.reply_embeddings.weight)
# reply_outputs.shape = [num_agents, reply_embed_length]
if not self.turn_off_communication:
reply_heard_per_agent = _unfold_communications(reply_outputs)
else:
reply_heard_per_agent = torch.cat(
[reply_outputs] * (self.num_agents - 1), dim=1
)
state_talk_reply_repr = state_talk_repr + self.after_reply_mlp(
torch.cat((state_talk_repr, reply_heard_per_agent), dim=1)
)
# state_talk_reply_repr.shape = [num_agents, state_repr_length]
# Strangely we have to unsqueeze(0) instead of unsqueeze(1) here.
# This seems to be because the LSTM is not expecting its hidden
# state to have the batch first despite the batch_first=True parameter
# making it expect its input to have the batch first.
hidden = (state_talk_reply_repr.unsqueeze(0), hidden[1])
if self.central_critic:
value_all = self.critic_linear(
torch.cat(
[
state_talk_reply_repr,
_unfold_communications(state_talk_reply_repr),
],
dim=1,
)
)
else:
value_all = self.critic_linear(state_talk_reply_repr)
to_return = {
"value_all": value_all,
"hidden_all": hidden,
"talk_probs": talk_probs,
"reply_probs": reply_probs,
}
if self.coordinate_actions:
to_return["randomization_logits"] = self.to_randomization_logits(
reply_heard_per_agent.view(1, -1)
)
if not self.separate_actor_weights:
logits = self.actor_linear(state_talk_reply_repr)
else:
logits = torch.cat(
[
linear(state_talk_reply_repr[i].unsqueeze(0))
for i, linear in enumerate(self.actor_linear_list)
],
dim=0,
)
# logits = self.actor_linear(
# state_talk_reply_repr
# ) + self.marginal_linear_actor(state_talk_reply_repr).unsqueeze(1).repeat(
# 1, self.coordinate_actions_dim, 1
# ).view(
# self.num_agents, self.num_outputs ** 2
# )
to_return["coordinated_logits"] = logits
else:
to_return["logit_all"] = self.actor_linear(state_talk_reply_repr)
return to_return
class A3CLSTMNStepComCoordinatedActionsEgoGridsEmbedCNN(nn.Module):
def __init__(
self,
num_inputs: int,
action_groups: Tuple[Tuple[str, ...], ...],
num_agents: int,
state_repr_length: int,
talk_embed_length: int,
agent_num_embed_length: int,
reply_embed_length: int,
num_talk_symbols: int,
num_reply_symbols: int,
occupancy_embed_length: int,
turn_off_communication: bool,
coordinate_actions: bool,
coordinate_actions_dim: Optional[int] = None,
central_critic: bool = False,
separate_actor_weights: bool = False,
final_cnn_channels: int = 256,
):
super(A3CLSTMNStepComCoordinatedActionsEgoGridsEmbedCNN, self).__init__()
self.num_outputs = sum(len(x) for x in action_groups)
# assert not turn_off_communication
self.turn_off_communication = turn_off_communication
self.central_critic = central_critic
self.num_inputs = num_inputs
self.num_agents = num_agents
self.num_talk_symbols = num_talk_symbols
self.num_reply_symbols = num_reply_symbols
self.separate_actor_weights = separate_actor_weights
self.coordinate_actions_dim = (
self.num_outputs
if coordinate_actions_dim is None
else coordinate_actions_dim
)
self.occupancy_embed_length = occupancy_embed_length
self.occupancy_embeddings = nn.Embedding(
num_inputs, self.occupancy_embed_length
)
self.coordinate_actions = coordinate_actions
# input to conv is (num_agents, self.occupancy_embed_length, 15, 15)
self.cnn = nn.Sequential(
OrderedDict(
[
("conv1", nn.Conv2d(self.occupancy_embed_length, 32, 3, padding=1)),
("relu1", nn.ReLU(inplace=True)),
# shape = (15, 15)
("conv2", nn.Conv2d(32, 64, 3, stride=2)),
("relu2", nn.ReLU(inplace=True)),
# shape = (7, 7)
("conv3", nn.Conv2d(64, 128, 3, stride=2)),
("relu3", nn.ReLU(inplace=True)),
# shape = (3, 3)
("conv4", nn.Conv2d(128, final_cnn_channels, 3, stride=2)),
("relu4", nn.ReLU(inplace=True)),
# shape = (1, 1); Stride doesn't matter above
]
)
)
# Vocab embed
self.talk_embeddings = nn.Embedding(num_talk_symbols, talk_embed_length)
self.reply_embeddings = nn.Embedding(num_reply_symbols, reply_embed_length)
self.talk_symbol_classifier = nn.Linear(state_repr_length, num_talk_symbols)
self.reply_symbol_classifier = nn.Linear(state_repr_length, num_reply_symbols)
# Agent embed
self.agent_num_embeddings = nn.Parameter(
torch.rand(self.num_agents, agent_num_embed_length)
)
# LSTM
self.lstm = nn.LSTM(
final_cnn_channels * 1 + agent_num_embed_length,
state_repr_length,
batch_first=True,
)
# Belief update MLP
state_and_talk_rep_len = state_repr_length + talk_embed_length * (
num_agents - 1
)
self.after_talk_mlp = nn.Sequential(
nn.Linear(state_and_talk_rep_len, state_and_talk_rep_len),
nn.ReLU(inplace=True),
nn.Linear(state_and_talk_rep_len, state_repr_length),
nn.ReLU(inplace=True),
)
state_and_reply_rep_len = state_repr_length + reply_embed_length * (
num_agents - 1
)
self.after_reply_mlp = nn.Sequential(
nn.Linear(state_and_reply_rep_len, state_and_reply_rep_len),
nn.ReLU(inplace=True),
nn.Linear(state_and_reply_rep_len, state_repr_length),
nn.ReLU(inplace=True),
)
if coordinate_actions:
# Randomization MLP
self.to_randomization_logits = nn.Sequential(
nn.Linear(self.num_agents * reply_embed_length, 64),
nn.ReLU(inplace=True),
nn.Linear(64, 64),
nn.ReLU(inplace=True),
nn.Linear(64, self.coordinate_actions_dim),
)
# self.marginal_linear_actor = nn.Linear(state_repr_length, self.num_outputs)
# self.marginal_linear_actor.weight.data = norm_col_init(
# self.marginal_linear_actor.weight.data, 0.01
# )
# self.marginal_linear_actor.bias.data.fill_(0)
# Linear actor
self.actor_linear = None
if coordinate_actions:
if separate_actor_weights:
self.actor_linear_list = nn.ModuleList(
[
nn.Linear(
state_repr_length,
self.num_outputs * self.coordinate_actions_dim,
)
for _ in range(2)
]
)
else:
self.actor_linear = nn.Linear(
state_repr_length, self.num_outputs * self.coordinate_actions_dim
)
else:
assert not separate_actor_weights
self.actor_linear = nn.Linear(state_repr_length, self.num_outputs)
if self.actor_linear is not None:
self.actor_linear.weight.data = norm_col_init(
self.actor_linear.weight.data, 0.01
)
self.actor_linear.bias.data.fill_(0)
else:
for al in self.actor_linear_list:
al.weight.data = norm_col_init(al.weight.data, 0.01)
al.bias.data.fill_(0)
# Linear critic
if self.central_critic:
self.critic_linear = nn.Linear(state_repr_length * self.num_agents, 1)
else:
self.critic_linear = nn.Linear(state_repr_length, 1)
# Setting initial weights
self.apply(weights_init)
relu_gain = nn.init.calculate_gain("relu")
self.cnn._modules["conv1"].weight.data.mul_(relu_gain)
self.cnn._modules["conv2"].weight.data.mul_(relu_gain)
self.cnn._modules["conv3"].weight.data.mul_(relu_gain)
self.cnn._modules["conv4"].weight.data.mul_(relu_gain)
self.talk_symbol_classifier.weight.data = norm_col_init(
self.talk_symbol_classifier.weight.data, 0.01
)
self.talk_symbol_classifier.bias.data.fill_(0)
self.reply_symbol_classifier.weight.data = norm_col_init(
self.reply_symbol_classifier.weight.data, 0.01
)
self.reply_symbol_classifier.bias.data.fill_(0)
self.train()
def forward(
self,
inputs: torch.FloatTensor,
hidden: Optional[torch.FloatTensor],
agent_rotations: Sequence[int],
):
if inputs.shape != (self.num_agents, self.num_inputs, 15, 15):
raise Exception("input to model is not as expected, check!")
inputs = inputs.float()
# inputs.shape == (self.num_agents, self.num_inputs, 15, 15)
inputs = inputs.permute(0, 2, 3, 1).contiguous()
original_shape = inputs.shape
# original_shape == (self.num_agents, 15, 15, self.num_inputs)
scaling_factor = torch.reciprocal(
(inputs == 1).sum(dim=3).unsqueeze(3).float() + 1e-3
)
# scaling_factor.shape == (2, 15, 15, 1)
inputs = inputs.view(-1, self.num_inputs)
inputs = inputs.matmul(self.occupancy_embeddings.weight)
inputs = inputs.view(
original_shape[0],
original_shape[1],
original_shape[2],
self.occupancy_embed_length,
)
x = torch.mul(inputs, scaling_factor.expand_as(inputs))
# x.shape == (self.num_agents, 15, 15, self.occupancy_embed_length)
x = x.permute(0, 3, 1, 2).contiguous()
# x.shape == (self.num_agents, self.occupancy_embed_length, 15, 15)
x = self.cnn(x)
# x.shape == (2, 256, 1, 1)
x = x.view(x.size(0), -1)
# x.shape = [num_agents, 256]
x = torch.cat((x, self.agent_num_embeddings), dim=1)
# x.shape = [num_agents, 256 + agent_num_embed_length]
x, hidden = self.lstm(x.unsqueeze(1), hidden)
# x.shape = [num_agents, 1, state_repr_length]
# hidden[0].shape == [1, num_agents, state_repr_length]
# hidden[1].shape == [1, num_agents, state_repr_length]
x = x.squeeze(1)
# x.shape = [num_agents, state_repr_length]
talk_logits = self.talk_symbol_classifier(x)
talk_probs = F.softmax(talk_logits, dim=1)
# talk_probs.shape = [num_agents, num_talk_symbols]
talk_outputs = torch.mm(talk_probs, self.talk_embeddings.weight)
# talk_outputs.shape = [num_agents, talk_embed_length]
if not self.turn_off_communication:
talk_heard_per_agent = _unfold_communications(talk_outputs)
else:
talk_heard_per_agent = torch.cat(
[talk_outputs] * (self.num_agents - 1), dim=1
)
state_talk_repr = x + self.after_talk_mlp(
torch.cat((x, talk_heard_per_agent), dim=1)
)
# feature_talk_repr.shape = [num_agents, state_repr_length]
reply_logits = self.reply_symbol_classifier(state_talk_repr)
reply_probs = F.softmax(reply_logits, dim=1)
# reply_probs.shape = [num_agents, num_reply_symbols]
reply_outputs = torch.mm(reply_probs, self.reply_embeddings.weight)
# reply_outputs.shape = [num_agents, reply_embed_length]
if not self.turn_off_communication:
reply_heard_per_agent = _unfold_communications(reply_outputs)
else:
reply_heard_per_agent = torch.cat(
[reply_outputs] * (self.num_agents - 1), dim=1
)
state_talk_reply_repr = state_talk_repr + self.after_reply_mlp(
torch.cat((state_talk_repr, reply_heard_per_agent), dim=1)
)
# state_talk_reply_repr.shape = [num_agents, state_repr_length]
# Strangely we have to unsqueeze(0) instead of unsqueeze(1) here.
# This seems to be because the LSTM is not expecting its hidden
# state to have the batch first despite the batch_first=True parameter
# making it expect its input to have the batch first.
hidden = (state_talk_reply_repr.unsqueeze(0), hidden[1])
if self.central_critic:
value_all = self.critic_linear(
torch.cat(
[
state_talk_reply_repr,
_unfold_communications(state_talk_reply_repr),
],
dim=1,
)
)
else:
value_all = self.critic_linear(state_talk_reply_repr)
to_return = {
"value_all": value_all,
"hidden_all": hidden,
"talk_probs": talk_probs,
"reply_probs": reply_probs,
}
if self.coordinate_actions:
if self.num_agents != 2:
to_return["randomization_logits"] = self.to_randomization_logits(
reply_outputs.view(1, -1)
)
else:
to_return["randomization_logits"] = self.to_randomization_logits(
reply_heard_per_agent.view(1, -1)
)
if not self.separate_actor_weights:
logits = self.actor_linear(state_talk_reply_repr)
else:
logits = torch.cat(
[
linear(state_talk_reply_repr[i].unsqueeze(0))
for i, linear in enumerate(self.actor_linear_list)
],
dim=0,
)
# logits = self.actor_linear(
# state_talk_reply_repr
# ) + self.marginal_linear_actor(state_talk_reply_repr).unsqueeze(1).repeat(
# 1, self.coordinate_actions_dim, 1
# ).view(
# self.num_agents, self.num_outputs ** 2
# )
to_return["coordinated_logits"] = logits
else:
to_return["logit_all"] = self.actor_linear(state_talk_reply_repr)
return to_return
| cordial-sync-master | rl_multi_agent/models.py |
import itertools
import math
import warnings
from typing import Tuple, Dict, List, Any, Union
import matplotlib as mpl
import constants
from rl_ai2thor.ai2thor_environment import AI2ThorEnvironmentWithGraph
from rl_ai2thor.ai2thor_episodes import MultiAgentAI2ThorEpisode
from rl_ai2thor.ai2thor_utils import manhattan_dists_between_positions
from .furnmove_episodes import are_actions_coordinated, coordination_type_tensor
mpl.use("Agg", force=False)
class JointNavigationEpisode(MultiAgentAI2ThorEpisode):
def __init__(
self,
env: AI2ThorEnvironmentWithGraph,
task_data: Dict[str, Any],
max_steps: int,
**kwargs
):
super(JointNavigationEpisode, self).__init__(
env=env, task_data=task_data, max_steps=max_steps, **kwargs
)
self.initial_agent_metadata = env.get_all_agent_metadata()
self.target_key_groups = task_data["target_key_groups"]
self.object_id = task_data["goal_obj_id"]
self.target_keys_set = set(o for group in self.target_key_groups for o in group)
self.oracle_length = self.calculate_oracle_length()
@classmethod
def class_available_action_groups(cls, **kwargs) -> Tuple[Tuple[str, ...], ...]:
actions = ("MoveAhead", "RotateLeft", "RotateRight", "Pass")
return (actions,)
def _is_goal_object_visible(self, agent_id: int) -> bool:
return self.task_data["goal_obj_id"] in [
o["objectId"] for o in self.environment.visible_objects(agent_id=agent_id)
]
def goal_visibility(self) -> List[bool]:
return [
self._is_goal_object_visible(agentId)
for agentId in range(self.environment.num_agents)
]
def goal_visibility_int(self) -> List[int]:
return [
int(self._is_goal_object_visible(agentId))
for agentId in range(self.environment.num_agents)
]
def goal_visible_to_all(self) -> bool:
return all(self.goal_visibility())
def info(self):
return {
**super(JointNavigationEpisode, self).info(),
"accuracy": 1 * self.goal_visible_to_all(),
"goal_visible_to_one_agent": 1 * (sum(self.goal_visibility()) == 1),
}
def _step(self, action_as_int: int, agent_id: int) -> Dict[str, Any]:
action = self.available_actions[action_as_int]
action_dict = {"action": action, "agentId": agent_id}
self.environment.step(action_dict)
return {
"action": action_as_int,
"action_success": self.environment.last_event.events[agent_id].metadata[
"lastActionSuccess"
],
}
def _closest_target(self, source_key):
closest_target = None
closest_steps = float("inf")
for target_key in self.target_keys_set:
nsteps = self.environment.shortest_path_length(
source_state_key=source_key, goal_state_key=target_key
)
if closest_target is None or nsteps < closest_steps:
closest_steps = nsteps
closest_target = target_key
return closest_target
def next_expert_action(self) -> Tuple[Union[int, None], ...]:
expert_actions = []
is_goal_visible = self.goal_visibility()
for agent_id in range(self.environment.num_agents):
source_key = self.environment.get_key(
self.environment.last_event.events[agent_id].metadata["agent"]
)
if is_goal_visible[agent_id] or source_key in self.target_keys_set:
action = "Pass"
else:
action = self.environment.shortest_path_next_action(
source_key, self._closest_target(source_key)
)
expert_actions.append(self.available_actions.index(action))
return tuple(expert_actions)
def reached_terminal_state(self) -> bool:
return self.goal_visible_to_all()
def calculate_oracle_length(self):
path_lengths = []
for agent_id in range(self.environment.num_agents):
source_state_key = self.environment.get_key(
self.initial_agent_metadata[agent_id]
)
# goal_state_key = self.environment.get_key(self.target_metadata[agent_id])
path_length_single = self.environment.shortest_path_length(
source_state_key, self._closest_target(source_state_key)
)
path_lengths.append(path_length_single)
return max(path_lengths)
class FurnLiftEpisode(JointNavigationEpisode):
def __init__(
self,
env: AI2ThorEnvironmentWithGraph,
task_data: Dict[str, Any],
max_steps: int,
**kwargs
):
super().__init__(env=env, task_data=task_data, max_steps=max_steps, **kwargs)
self._item_successfully_picked_up = False
self._jointly_visible_but_not_picked = 0
self._picked_but_not_jointly_visible = 0
@classmethod
def class_available_action_groups(cls, **kwargs) -> Tuple[Tuple[str, ...], ...]:
actions = ("MoveAhead", "RotateLeft", "RotateRight", "Pass", "Pickup")
return (actions,)
def info(self):
return {
**super(JointNavigationEpisode, self).info(),
"accuracy": 1 * self._item_successfully_picked_up,
"jointly_visible_but_not_picked": self._jointly_visible_but_not_picked,
"picked_but_not_jointly_visible": self._picked_but_not_jointly_visible,
}
def multi_step(self, actions_as_ints: Tuple[int, ...]) -> List[Dict[str, Any]]:
assert not self.is_paused() and not self.is_complete()
pickup_index = self.available_actions.index("Pickup")
visibility = self.goal_visibility()
visible_to_all = all(visibility)
agent_tried_pickup = [i == pickup_index for i in actions_as_ints]
all_pick_up = all(agent_tried_pickup)
self._jointly_visible_but_not_picked += visible_to_all * (not all_pick_up)
if visible_to_all and all(i == pickup_index for i in actions_as_ints):
for i in range(self.environment.num_agents):
self._increment_num_steps_taken_in_episode()
step_results = [
{
"action": pickup_index,
"action_success": True,
"goal_visible": True,
"pickup_action_taken": True,
}
for _ in range(self.environment.num_agents)
]
object = self.environment.get_object_by_id(self.object_id, agent_id=0)
self.environment.step(
{
"action": "TeleportObject",
**object["position"],
"objectId": self.object_id,
"y": object["position"]["y"] + 1.0,
"rotation": object["rotation"],
"agentId": 0,
}
)
self._item_successfully_picked_up = True
else:
step_results = []
for i in range(self._env.num_agents):
self._increment_num_steps_taken_in_episode()
step_results.append(self._step(actions_as_ints[i], agent_id=i))
step_results[-1]["goal_visible"] = visibility[i]
step_results[-1]["pickup_action_taken"] = agent_tried_pickup[i]
return step_results
def _step(self, action_as_int: int, agent_id: int) -> Dict[str, Any]:
action = self.available_actions[action_as_int]
if action == "Pickup":
self._picked_but_not_jointly_visible += 1
metadata = self.environment.last_event.events[agent_id].metadata
metadata["lastAction"] = "Pickup"
metadata["lastActionSuccess"] = False
else:
action_dict = {"action": action, "agentId": agent_id}
self.environment.step(action_dict)
return {
"action": action_as_int,
"action_success": self.environment.last_event.events[agent_id].metadata[
"lastActionSuccess"
],
}
def next_expert_action(self) -> Tuple[Union[int, None], ...]:
expert_actions = []
is_goal_visible = self.goal_visibility()
if all(is_goal_visible):
return tuple(
[self.available_actions.index("Pickup")] * self.environment.num_agents
)
for agent_id in range(self.environment.num_agents):
source_key = self.environment.get_key(
self.environment.last_event.events[agent_id].metadata["agent"]
)
if is_goal_visible[agent_id] or source_key in self.target_keys_set:
action = "Pass"
else:
action = self.environment.shortest_path_next_action(
source_key, self._closest_target(source_key)
)
expert_actions.append(self.available_actions.index(action))
return tuple(expert_actions)
def reached_terminal_state(self) -> bool:
return self._item_successfully_picked_up
class FurnLiftNApartStateEpisode(FurnLiftEpisode):
def __init__(
self,
env: AI2ThorEnvironmentWithGraph,
task_data: Dict[str, Any],
max_steps: int,
**kwargs
):
super().__init__(env=env, task_data=task_data, max_steps=max_steps, **kwargs)
self._min_dist_between_agents_to_pickup = kwargs[
"min_dist_between_agents_to_pickup"
]
self._pickupable_but_not_picked = 0
self._picked_but_not_pickupable = 0
self._picked_but_not_pickupable_distance = 0
self._picked_but_not_pickupable_visibility = 0
self.total_reward = 0.0
self.coordinated_action_checker = are_actions_coordinated
goal_object = self.environment.get_object_by_id(self.object_id, agent_id=0)
self.object_location = dict(
x=round(goal_object["position"]["x"], 2),
z=round(goal_object["position"]["z"], 2),
)
# Initial agent locations, to calculate the expert path lengths
self._initial_agent_locations = [
self.environment.get_agent_location(i)
for i in range(self.environment.num_agents)
]
@staticmethod
def location_l1_dist(loc1, loc2):
return abs(loc1["x"] - loc2["x"]) + abs(loc1["z"] - loc2["z"])
def info(self):
nagents = self.environment.num_agents
_final_l1_dist = [
self.location_l1_dist(
self.object_location, self.environment.get_agent_location(i)
)
for i in range(nagents)
]
mean_final_agent_l1_distance_from_target = round(
sum(_final_l1_dist) / len(_final_l1_dist), 2
)
_init_l1_dist = [
self.location_l1_dist(self.object_location, ag_loc)
for ag_loc in self._initial_agent_locations
]
mean_initial_agent_manhattan_steps_from_target = (
sum(_init_l1_dist) / len(_init_l1_dist)
) / self.environment.grid_size
return {
**super(JointNavigationEpisode, self).info(),
"accuracy": 1 * self._item_successfully_picked_up,
"pickupable_but_not_picked": self._pickupable_but_not_picked,
"picked_but_not_pickupable": self._picked_but_not_pickupable,
"picked_but_not_pickupable_distance": self._picked_but_not_pickupable_distance,
"picked_but_not_pickupable_visibility": self._picked_but_not_pickupable_visibility,
"reward": self.total_reward,
"initial_manhattan_steps": mean_initial_agent_manhattan_steps_from_target,
"final_manhattan_distance_from_target": mean_final_agent_l1_distance_from_target,
"spl_manhattan": int(self._item_successfully_picked_up)
* (
(mean_initial_agent_manhattan_steps_from_target + 0.0001)
/ (
max(
mean_initial_agent_manhattan_steps_from_target,
(self.num_steps_taken_in_episode() / nagents),
)
+ 0.0001
)
),
}
def manhattan_dists_between_agents(self):
return manhattan_dists_between_positions(
[
self.environment.get_agent_location(i)
for i in range(self.environment.num_agents)
],
self.environment.grid_size,
)
def multi_step(self, actions_as_ints: Tuple[int, ...]) -> List[Dict[str, Any]]:
assert not self.is_paused() and not self.is_complete()
pickup_index = self.available_actions.index("Pickup")
visibility = self.goal_visibility()
visible_to_all = all(visibility)
agent_tried_pickup = [i == pickup_index for i in actions_as_ints]
all_pick_up = all(agent_tried_pickup)
pairwise_distances = self.manhattan_dists_between_agents()
sufficiently_far = [
all(x >= self._min_dist_between_agents_to_pickup for x in dists)
for dists in pairwise_distances
]
all_sufficiently_far = all(sufficiently_far)
self._pickupable_but_not_picked += (
visible_to_all * all_sufficiently_far * (not all_pick_up)
)
if visible_to_all and all_sufficiently_far and all_pick_up:
for i in range(self.environment.num_agents):
self._increment_num_steps_taken_in_episode()
step_results = [
{
"action": pickup_index,
"action_success": True,
"goal_visible": True,
"pickup_action_taken": True,
"mutually_distant": True,
"reward": 1.0,
}
for _ in range(self.environment.num_agents)
]
object = self.environment.get_object_by_id(self.object_id, agent_id=0)
self.environment.step(
{
"action": "TeleportObject",
**object["position"],
"objectId": self.object_id,
"y": object["position"]["y"] + 1.0,
"rotation": object["rotation"],
"agentId": 0,
}
)
self._item_successfully_picked_up = True
else:
step_results = []
for i in range(self._env.num_agents):
self._increment_num_steps_taken_in_episode()
step_results.append(self._step(actions_as_ints[i], agent_id=i))
step_results[-1]["goal_visible"] = visibility[i]
step_results[-1]["pickup_action_taken"] = agent_tried_pickup[i]
self._picked_but_not_pickupable += agent_tried_pickup[i]
self._picked_but_not_pickupable_distance += (
agent_tried_pickup[i] and not all_sufficiently_far
)
self._picked_but_not_pickupable_visibility += (
agent_tried_pickup[i] and not visible_to_all
)
step_results[-1]["mutually_distant"] = sufficiently_far[i]
self.total_reward += sum(sr["reward"] for sr in step_results)
return step_results
def _step(self, action_as_int: int, agent_id: int) -> Dict[str, Any]:
STEP_PENALITY = -0.01
FAILED_ACTION_PENALTY = -0.02
reward = STEP_PENALITY
action = self.available_actions[action_as_int]
if action == "Pickup":
self._picked_but_not_jointly_visible += 1
metadata = self.environment.last_event.events[agent_id].metadata
metadata["lastAction"] = "Pickup"
metadata["lastActionSuccess"] = False
reward += -0.1
else:
action_dict = {"action": action, "agentId": agent_id}
self.environment.step(action_dict)
action_success = self.environment.last_event.events[agent_id].metadata[
"lastActionSuccess"
]
if not action_success:
reward += FAILED_ACTION_PENALTY
return {
"reward": reward,
"action": action_as_int,
"action_success": action_success,
}
def next_expert_action(self) -> Tuple[Union[int, None], ...]:
expert_actions = []
visibility = self.goal_visibility()
visible_to_all = all(visibility)
pairwise_distances = self.manhattan_dists_between_agents()
sufficiently_far = [
all(x >= self._min_dist_between_agents_to_pickup for x in dists)
for dists in pairwise_distances
]
all_sufficiently_far = all(sufficiently_far)
agent_keys = [
self.environment.get_key(
self.environment.last_event.events[agent_id].metadata["agent"]
)
for agent_id in range(self.environment.num_agents)
]
# In the case that we have reached the expert targets but the expert targets
# fail to be correct (as can happen if, for example, two agents intersect and
# block one anothers vision) then we should remove the relevant agent_key_ind_tuple
# from the set of good target ind tuples and have the agents move somewhere else.
if all(k in self.target_keys_set for k in agent_keys):
agent_key_tuple = tuple(sorted(agent_keys))
if agent_key_tuple in self.target_key_groups and (
not visible_to_all or not all_sufficiently_far
):
self.target_key_groups.remove(agent_key_tuple)
if visible_to_all and all_sufficiently_far:
return tuple(
[self.available_actions.index("Pickup")] * self.environment.num_agents
)
if len(self.target_key_groups) == 0:
return None
_dist_cache = {}
def agent_to_dist_to_target(agent_id, target_key):
key = (agent_id, target_key)
if key not in _dist_cache:
_dist_cache[key] = self.environment.shortest_path_length(
source_state_key=agent_keys[agent_id], goal_state_key=target_key
)
return _dist_cache[key]
best_ordered_target_group = None
best_path_length = None
for good_target_group in self.target_key_groups:
for ordered_target_group in itertools.permutations(
good_target_group, len(good_target_group)
):
path_length = sum(
agent_to_dist_to_target(agent_id, target_key)
for agent_id, target_key in enumerate(ordered_target_group)
)
if best_ordered_target_group is None or best_path_length > path_length:
best_path_length = path_length
best_ordered_target_group = ordered_target_group
for agent_id in range(self.environment.num_agents):
target_key = best_ordered_target_group[agent_id]
if agent_keys[agent_id] == target_key:
action = "Pass"
else:
action = self.environment.shortest_path_next_action(
agent_keys[agent_id], target_key
)
expert_actions.append(self.available_actions.index(action))
return tuple(expert_actions)
def reached_terminal_state(self) -> bool:
return self._item_successfully_picked_up
def coordination_type_tensor(self):
return coordination_type_tensor(
self.environment, self.available_actions, self.coordinated_action_checker
)
class FurnLiftNApartStateLoggingEpisode(FurnLiftNApartStateEpisode):
def __init__(
self,
env: AI2ThorEnvironmentWithGraph,
task_data: Dict[str, Any],
max_steps: int,
**kwargs
):
super().__init__(env=env, task_data=task_data, max_steps=max_steps, **kwargs)
# Extra logging of step number at which an agent first saw the target
self._first_view_of_target = [self.max_steps + 1] * self.environment.num_agents
# Initial agent locations, to calculate the expert path lengths
self._initial_agent_locations = [
self.environment.get_agent_location(i)
for i in range(self.environment.num_agents)
]
self._initial_agent_keys = [
self.environment.get_key(ag_loc) for ag_loc in self._initial_agent_locations
]
goal_object = self.environment.get_object_by_id(self.object_id, agent_id=0)
self.object_location = dict(
x=round(goal_object["position"]["x"], 2),
z=round(goal_object["position"]["z"], 2),
)
@staticmethod
def location_l2_dist(loc1, loc2):
return math.sqrt((loc1["x"] - loc2["x"]) ** 2 + (loc1["z"] - loc2["z"]) ** 2)
def info(self):
final_agent_distance_from_target = [
self.location_l2_dist(
self.object_location, self.environment.get_agent_location(i)
)
for i in range(self.environment.num_agents)
]
_dist_cache = {}
def agent_to_dist_to_target(agent_id, target_key):
key = (agent_id, target_key)
if key not in _dist_cache:
_dist_cache[key] = self.environment.shortest_path_length(
source_state_key=self._initial_agent_keys[agent_id],
goal_state_key=target_key,
)
return _dist_cache[key]
best_path_lengths = None
best_path_length = None
for good_target_group in self.target_key_groups:
for ordered_target_group in itertools.permutations(
good_target_group, len(good_target_group)
):
path_lengths = tuple(
agent_to_dist_to_target(agent_id, target_key)
for agent_id, target_key in enumerate(ordered_target_group)
)
path_length = sum(path_lengths)
if best_path_lengths is None or best_path_length > path_length:
best_path_length = best_path_length
best_path_lengths = path_lengths
initial_agent_distance_from_target = [
self.location_l2_dist(self.object_location, ag_loc)
for ag_loc in self._initial_agent_locations
]
return {
**super(JointNavigationEpisode, self).info(),
"accuracy": 1 * self._item_successfully_picked_up,
"pickupable_but_not_picked": self._pickupable_but_not_picked,
"picked_but_not_pickupable": self._picked_but_not_pickupable,
"picked_but_not_pickupable_distance": self._picked_but_not_pickupable_distance,
"picked_but_not_pickupable_visibility": self._picked_but_not_pickupable_visibility,
"reward": self.total_reward,
"first_view_of_target": self._first_view_of_target,
"final_distance_between_agents": self.manhattan_dists_between_agents(),
"best_path_to_target_length": best_path_length,
"path_to_target_lengths": best_path_lengths,
"initial_agent_distance_from_target": initial_agent_distance_from_target,
"final_agent_distance_from_target": final_agent_distance_from_target,
"object_location": self.object_location,
"scene_name": self.environment.scene_name,
}
def multi_step(self, actions_as_ints: Tuple[int, ...]) -> List[Dict[str, Any]]:
step_results = super(FurnLiftNApartStateLoggingEpisode, self).multi_step(
actions_as_ints=actions_as_ints
)
# Extra logging of step number at which an agent first saw the target
visibility = self.goal_visibility()
for i in range(self.environment.num_agents):
if visibility[i]:
self._first_view_of_target[i] = min(
self._first_view_of_target[i], self.num_steps_taken_in_episode()
)
return step_results
class FurnLiftGridStateEpisode(FurnLiftNApartStateEpisode):
def __init__(
self,
env: AI2ThorEnvironmentWithGraph,
task_data: Dict[str, Any],
max_steps: int,
**kwargs
):
super().__init__(env=env, task_data=task_data, max_steps=max_steps, **kwargs)
self.object_points_set = kwargs["object_points_set"]
def states_for_agents(self) -> List[Dict[str, Any]]:
# TODO: Getting the current occupancy matrix like this on every iteration is,
# TODO: in principle, an expensive operation as it requires doing some thor actions.
# TODO: We could track state changes without doing this but this requires some annoying bookkeeping.
# Reachable, unreachable and agent locations are marked
matrix, point_to_element_map = self.environment.get_current_occupancy_matrix(
padding=0.5, use_initially_reachable_points_matrix=True
)
# Mark goal object locations
nskipped = 0
for point_tuple in self.object_points_set:
if point_tuple not in point_to_element_map:
nskipped += 1
continue
row, col = point_to_element_map[point_tuple]
matrix[row, col] = constants.GOAL_OBJ_SYM
if nskipped == len(self.object_points_set):
raise RuntimeError(
"Skipped all object points in scene {}.".format(
self.environment.scene_name
)
)
elif nskipped > 10:
warnings.warn(
"Skipping many object points ({}) in scene {}.".format(
nskipped, self.environment.scene_name
)
)
states = []
for agent_id in range(self.environment.num_agents):
matrix_ego_per_agent = self.environment.current_matrix_frame(
agent_id,
matrix,
point_to_element_map,
int(self.environment.visibility_distance // self.environment.grid_size),
int(self.environment.visibility_distance // self.environment.grid_size),
)
last_action = (
None if self._last_actions is None else self._last_actions[agent_id]
)
last_action_success = (
None
if self._last_actions_success is None
else self._last_actions_success[agent_id]
)
states.append(
{
"frame": matrix_ego_per_agent,
"last_action": last_action,
"last_action_success": last_action_success,
}
)
return states
| cordial-sync-master | rl_multi_agent/furnlift_episodes.py |
"import glob\nimport inspect\nimport json\nimport math\nimport os\nimport time\nimport warnings\nfro(...TRUNCATED) | cordial-sync-master | rl_multi_agent/furnmove_utils.py |
End of preview. Expand
in Data Studio
README.md exists but content is empty.
- Downloads last month
- 29