python_code
stringlengths 0
187k
| repo_name
stringlengths 8
46
| file_path
stringlengths 6
135
|
---|---|---|
# encoding: utf-8
# the following try/except block will make the custom check compatible with any Agent version
try:
# first, try to import the base class from old versions of the Agent...
from checks import AgentCheck
except ImportError:
# ...if the above failed, the check is running in Agent version 6 or later
from datadog_checks.checks import AgentCheck
# psutil
import psutil
# pynvml
import pynvml
__version__ = '0.1.5'
__author__ = 'Takashi NAGAI, Alejandro Ferrari'
# These values are not exposed through Pynvml, but are reported in the throttle
# reasons. The reason values are defined in
# https://github.com/NVIDIA/gpu-monitoring-tools/blob/master/bindings/go/nvml/nvml.h
# Indicates that the clocks are throttled because the GPU is part of a sync
# boost group, which syncs the clocks to the minimum clocks across the group.
# Corresponds to nvmlClocksThrottleReasonSyncBoost.
GPU_THROTTLE_SYNCBOOST = 0x10
# Indicates that the GPU core or memory temperature is above max. Corresponds to
# nvmlClocksThrottleReasonSwThermalSlowdown.
GPU_THROTTLE_THERMAL_SLOWDOWN_SOFTWARE = 0x20
# Indicates that the clocks are throttled by 50% or more due to very high
# temperature. Corresponds to nvmlClocksThrottleReasonHwThermalSlowdown.
GPU_THROTTLE_THERMAL_SLOWDOWN_HARDWARE = 0x40
# Indicates that the clocks are throttled by 50% or more by the power supply.
# Corresponds to nvmlClocksThrottleReasonHwPowerBrakeSlowdown.
GPU_THROTTLE_POWER_BRAKE_SLOWDOWN_HARDWARE = 0x80
# Indicates that the clocks are throttled by the current display settings.
# Corresponds to nvmlClocksThrottleReasonDisplayClockSetting.
GPU_THROTTLE_DISPLAY_CLOCKS_SETTINGS = 0x100
class NvmlCheck(AgentCheck):
def _dict2list(self, tags={}):
return ["{k}:{v}".format(k=k, v=v) for k, v in list(tags.items())]
def check(self, instance):
pynvml.nvmlInit()
msg_list = []
try:
deviceCount = pynvml.nvmlDeviceGetCount()
except:
deviceCount = 0
# Number of active GPUs
self.gauge('nvml.gpus.number', deviceCount)
for device_id in range(deviceCount):
handle = pynvml.nvmlDeviceGetHandleByIndex(device_id)
name = pynvml.nvmlDeviceGetName(handle)
tags = dict(name="{}-{}".format(name, device_id))
d_tags = self._dict2list(tags)
# temperature info
try:
temp = pynvml.nvmlDeviceGetTemperature(
handle, pynvml.NVML_TEMPERATURE_GPU)
self.gauge('nvml.temp.', temp, tags=d_tags)
except pynvml.NVMLError as err:
msg_list.append('nvmlDeviceGetTemperature:{}'.format(err))
# power info
try:
pwr = pynvml.nvmlDeviceGetPowerUsage(handle) // 1000
self.gauge('nvml.power.', pwr, tags=d_tags)
except pynvml.NVMLError as err:
msg_list.append('nvmlDeviceGetPowerUsage:{}'.format(err))
# fan info
try:
fan = pynvml.nvmlDeviceGetFanSpeed(handle)
self.gauge('nvml.fan.', fan, tags=d_tags)
except pynvml.NVMLError as err:
msg_list.append('nvmlDeviceGetFanSpeed:{}'.format(err))
# memory info
try:
mem = pynvml.nvmlDeviceGetMemoryInfo(handle)
self.gauge('nvml.mem.total', mem.total, tags=d_tags)
self.gauge('nvml.mem.used', mem.used, tags=d_tags)
self.gauge('nvml.mem.free', mem.free, tags=d_tags)
except pynvml.NVMLError as err:
msg_list.append('nvmlDeviceGetMemoryInfo:{}'.format(err))
# utilization GPU/Memory info
try:
util_rate = pynvml.nvmlDeviceGetUtilizationRates(handle)
self.gauge('nvml.util.gpu', util_rate.gpu, tags=d_tags)
self.gauge('nvml.util.memory', util_rate.memory, tags=d_tags)
except pynvml.NVMLError as err:
msg_list.append(
'nvmlDeviceGetUtilizationRates:{}'.format(err))
# utilization Encoder info
try:
util_encoder = pynvml.nvmlDeviceGetEncoderUtilization(handle)
self.log.debug('nvml.util.encoder %s' % int(util_encoder[0]))
self.gauge('nvml.util.encoder', int(
util_encoder[0]), tags=d_tags)
except pynvml.NVMLError as err:
msg_list.append(
'nvmlDeviceGetEncoderUtilization:{}'.format(err))
# utilization Decoder info
try:
util_decoder = pynvml.nvmlDeviceGetDecoderUtilization(handle)
self.log.debug('nvml.util.decoder %s' % int(util_decoder[0]))
self.gauge('nvml.util.decoder', int(
util_decoder[0]), tags=d_tags)
except pynvml.NVMLError as err:
msg_list.append(
'nvmlDeviceGetDecoderUtilization:{}'.format(err))
# Compute running processes
try:
cps = pynvml.nvmlDeviceGetComputeRunningProcesses(handle)
for ps in cps:
p_tags = tags.copy()
p_tags['pid'] = ps.pid
p_tags['name'] = pynvml.nvmlSystemGetProcessName(ps.pid)
p_tags = self._dict2list(p_tags)
self.gauge('nvml.process.used_gpu_memory',
ps.usedGpuMemory, tags=p_tags)
except pynvml.NVMLError as err:
msg_list.append(
'nvmlDeviceGetComputeRunningProcesses:{}'.format(err))
# Clocks throttling info
# Divide by the mask so that the value is either 0 or 1 per GPU
try:
throttle_reasons = (
pynvml.nvmlDeviceGetCurrentClocksThrottleReasons(handle))
self.gauge('nvml.throttle.appsettings', (throttle_reasons &
pynvml.nvmlClocksThrottleReasonApplicationsClocksSetting) /
pynvml.nvmlClocksThrottleReasonApplicationsClocksSetting,
tags=d_tags)
self.gauge('nvml.throttle.display', (throttle_reasons &
GPU_THROTTLE_DISPLAY_CLOCKS_SETTINGS) /
GPU_THROTTLE_DISPLAY_CLOCKS_SETTINGS,
tags=d_tags)
self.gauge('nvml.throttle.hardware', (throttle_reasons &
pynvml.nvmlClocksThrottleReasonHwSlowdown) /
pynvml.nvmlClocksThrottleReasonHwSlowdown,
tags=d_tags)
self.gauge('nvml.throttle.idle', (throttle_reasons &
pynvml.nvmlClocksThrottleReasonGpuIdle) /
pynvml.nvmlClocksThrottleReasonGpuIdle,
tags=d_tags)
self.gauge('nvml.throttle.power.hardware', (throttle_reasons &
GPU_THROTTLE_POWER_BRAKE_SLOWDOWN_HARDWARE) /
GPU_THROTTLE_POWER_BRAKE_SLOWDOWN_HARDWARE,
tags=d_tags)
self.gauge('nvml.throttle.power.software', (throttle_reasons &
pynvml.nvmlClocksThrottleReasonSwPowerCap) /
pynvml.nvmlClocksThrottleReasonSwPowerCap,
tags=d_tags)
self.gauge('nvml.throttle.syncboost', (throttle_reasons &
GPU_THROTTLE_SYNCBOOST) / GPU_THROTTLE_SYNCBOOST,
tags=d_tags)
self.gauge('nvml.throttle.temp.hardware', (throttle_reasons &
GPU_THROTTLE_THERMAL_SLOWDOWN_HARDWARE) /
GPU_THROTTLE_THERMAL_SLOWDOWN_HARDWARE,
tags=d_tags)
self.gauge('nvml.throttle.temp.software', (throttle_reasons &
GPU_THROTTLE_THERMAL_SLOWDOWN_SOFTWARE) /
GPU_THROTTLE_THERMAL_SLOWDOWN_SOFTWARE,
tags=d_tags)
self.gauge('nvml.throttle.unknown', (throttle_reasons &
pynvml.nvmlClocksThrottleReasonUnknown) /
pynvml.nvmlClocksThrottleReasonUnknown,
tags=d_tags)
except pynvml.NVMLError as err:
msg_list.append(
'nvmlDeviceGetCurrentClocksThrottleReasons:{}'.format(err))
if msg_list:
status = AgentCheck.CRITICAL
msg = ','.join(msg_list)
else:
status = AgentCheck.OK
msg = 'Ok'
pynvml.nvmlShutdown()
self.service_check('nvml.check', status, message=msg)
| datadog_nvml-master | nvml.py |
"""Manages constants required by multiple files."""
import getpass
import os
import platform
import socket
from pathlib import Path
def make_scene_name(type, num):
if type == "":
return "FloorPlan" + str(num) + "_physics"
elif num < 10:
return "FloorPlan" + type + "0" + str(num) + "_physics"
else:
return "FloorPlan" + type + str(num) + "_physics"
SCENES_NAMES_SPLIT_BY_TYPE = [
[make_scene_name(j, i) for i in range(1, 31)] for j in ["", "2", "3", "4"]
]
SCENE_NAMES = sum(SCENES_NAMES_SPLIT_BY_TYPE, [])
TRAIN_SCENE_NAMES = [
SCENES_NAMES_SPLIT_BY_TYPE[j][i] for j in range(3) for i in range(20)
]
VALID_SCENE_NAMES = [
SCENES_NAMES_SPLIT_BY_TYPE[j][i] for j in range(3) for i in range(20, 25)
]
TEST_SCENE_NAMES = [
SCENES_NAMES_SPLIT_BY_TYPE[j][i] for j in range(3) for i in range(25, 30)
] + SCENES_NAMES_SPLIT_BY_TYPE[3]
VISIBILITY_DISTANCE = 1.25
FOV = 90.0
PICK_UP_HEIGHT = 1.0
UNREACHABLE_SYM = 0
REACHABLE_SYM = 1
AGENT_SYM = 2
NO_INFO_SYM = 3
GOAL_OBJ_SYM = 4
VISITED_SYM = 5
AGENT_SELF_0 = 6
AGENT_SELF_90 = 7
AGENT_SELF_180 = 8
AGENT_SELF_270 = 9
AGENT_OTHER_0 = 10
AGENT_OTHER_90 = 11
AGENT_OTHER_180 = 12
AGENT_OTHER_270 = 13
# Distance that qualifies an episode as easy or medium
# For more details see `create_table` method in
# `rl_multi_agent/analysis/summarize_furnlift_eval_results.py`.
# array([ 2., 11., 17., 45.]) == [0, 1/3, 2/3, 1] percentiles of initial manhat distances
EASY_MAX = 10
MED_MAX = 17
def rotate_tuple_90_clockwise(t):
return (t[1], -t[0])
TELEVISION_ROT_0_POINTS = (
[(0.25 * i, 0.25) for i in [-1, 0, 1]]
+ [(0.25 * i, 0) for i in [-2, -1, 0, 1, 2]]
+ [(0.25 * i, -0.25) for i in [-2, -1, 0, 1, 2]]
)
TELEVISION_ROT_90_POINTS = [
rotate_tuple_90_clockwise(t) for t in TELEVISION_ROT_0_POINTS
]
TELEVISION_ROT_180_POINTS = [
rotate_tuple_90_clockwise(t) for t in TELEVISION_ROT_90_POINTS
]
TELEVISION_ROT_270_POINTS = [
rotate_tuple_90_clockwise(t) for t in TELEVISION_ROT_180_POINTS
]
TELEVISION_ROTATION_TO_OCCUPATIONS = {
0: TELEVISION_ROT_0_POINTS,
90: TELEVISION_ROT_90_POINTS,
180: TELEVISION_ROT_180_POINTS,
270: TELEVISION_ROT_270_POINTS,
}
TV_STAND_ROT_0_POINTS = [
(0.25 * i, 0.25 * j) for i in range(-2, 3) for j in range(-1, 2)
]
TV_STAND_ROT_90_POINTS = [
(0.25 * i, 0.25 * j) for i in range(-1, 2) for j in range(-2, 3)
]
TV_STAND_ROT_180_POINTS = TV_STAND_ROT_0_POINTS
TV_STAND_ROT_270_POINTS = TV_STAND_ROT_90_POINTS
TV_STAND_ROTATION_TO_OCCUPATIONS = {
0: TV_STAND_ROT_0_POINTS,
90: TV_STAND_ROT_90_POINTS,
180: TV_STAND_ROT_180_POINTS,
270: TV_STAND_ROT_270_POINTS,
}
DRESSER_SILHOUETTE_STRING = """1 1 1 1 1
1 1 1 1 1
1 1 1 1 1
"""
SPLIT_TO_USE_FOR_EVALUATION = "test"
HOST_NAME = socket.gethostname()
USERNAME = getpass.getuser()
PLATFORM = platform.system()
PROJECT_TOP_DIR = os.path.dirname(Path(__file__))
if PLATFORM == "Linux":
ABS_PATH_TO_LOCAL_THOR_BUILD = os.path.join(
PROJECT_TOP_DIR, "ai2thor_builds/thor-2-3-2020-Linux64"
)
elif PLATFORM == "Darwin":
ABS_PATH_TO_LOCAL_THOR_BUILD = os.path.join(
PROJECT_TOP_DIR,
"ai2thor_builds/thor-2-3-2020-OSXIntel64.app/Contents/MacOS/thor-2-3-2020-OSXIntel64",
)
else:
raise NotImplementedError(
(
"Your platform, {}, is not currently supported. "
"Supported platforms include Linux and MacOSX."
).format(PLATFORM)
)
ABS_PATH_TO_FINAL_FURNMOVE_CKPTS = os.path.join(
PROJECT_TOP_DIR, "trained_models", "final_furnmove_ckpts"
)
ABS_PATH_TO_FINAL_FURNLIFT_CKPTS = os.path.join(
PROJECT_TOP_DIR, "trained_models", "final_furnlift_ckpts"
)
ABS_PATH_TO_ANALYSIS_RESULTS_DIR = os.path.join(PROJECT_TOP_DIR, "analysis_output")
ABS_PATH_TO_DATA_DIR = os.path.join(PROJECT_TOP_DIR, "data")
| cordial-sync-master | constants.py |
"""Defines the tasks that an agent should complete in a given environment."""
from abc import abstractmethod
from typing import (
Dict,
Any,
Tuple,
TypeVar,
Generic,
List,
Optional,
Callable,
)
EnvType = TypeVar("EnvType")
class Episode(Generic[EnvType]):
def __init__(
self, env: EnvType, task_data: Dict[str, Any], max_steps: int, **kwargs
) -> None:
self._env = env
self.task_data = task_data
self._num_steps_taken_in_episode = 0
self.max_steps = max_steps
@property
def environment(self) -> EnvType:
return self._env
@abstractmethod
def state_for_agent(self):
raise NotImplementedError()
def step(self, action_as_int: int) -> Dict[str, Any]:
assert not self.is_paused() and not self.is_complete()
self._increment_num_steps_taken_in_episode()
return self._step(action_as_int=action_as_int)
@abstractmethod
def _step(self, action_as_int: int) -> Dict[str, Any]:
raise NotImplementedError()
def reached_max_steps(self) -> bool:
return self.num_steps_taken_in_episode() >= self.max_steps
@abstractmethod
def reached_terminal_state(self) -> bool:
raise NotImplementedError()
def is_complete(self) -> bool:
return self.reached_terminal_state() or self.reached_max_steps()
def is_paused(self) -> bool:
return False
def _increment_num_steps_taken_in_episode(self) -> None:
self._num_steps_taken_in_episode += 1
def num_steps_taken_in_episode(self) -> int:
return self._num_steps_taken_in_episode
def info(self):
return {"ep_length": self.num_steps_taken_in_episode()}
@property
def available_actions(self) -> Tuple[str, ...]:
return tuple(a for g in self.available_action_groups for a in g)
@property
def available_action_groups(self) -> Tuple[Tuple[str, ...], ...]:
return self.class_available_action_groups()
@classmethod
def class_available_actions(cls, **kwargs) -> Tuple[str, ...]:
return tuple(a for g in cls.class_available_action_groups(**kwargs) for a in g)
@classmethod
@abstractmethod
def class_available_action_groups(cls, **kwargs) -> Tuple[Tuple[str, ...], ...]:
raise NotImplementedError()
@property
def total_available_actions(self) -> int:
return len(self.available_actions)
def index_to_action(self, index: int) -> str:
assert 0 <= index < self.total_available_actions
return self.available_actions[index]
class MultiAgentEpisode(Generic[EnvType]):
def __init__(
self,
env: EnvType,
task_data: Dict[str, Any],
max_steps: int,
before_step_function: Optional[Callable] = None,
after_step_function: Optional[Callable] = None,
**kwargs,
) -> None:
self._env = env
self.task_data = task_data
self._num_steps_taken_in_episode = 0
self.max_steps = max_steps
self.before_step_function = before_step_function
self.after_step_function = after_step_function
@property
def environment(self) -> EnvType:
return self._env
@abstractmethod
def states_for_agents(self):
raise NotImplementedError()
def multi_step(self, actions_as_ints: Tuple[int, ...]) -> List[Dict[str, Any]]:
assert not self.is_paused() and not self.is_complete()
step_results = []
before_info = (
None
if self.before_step_function is None
else self.before_step_function(episode=self)
)
for i in range(self._env.num_agents):
self._increment_num_steps_taken_in_episode()
step_results.append(self._step(actions_as_ints[i], agent_id=i))
if self.after_step_function is not None:
self.after_step_function(
step_results=step_results, before_info=before_info, episode=self
)
return step_results
@abstractmethod
def _step(self, action_as_int: int, agent_id: int) -> Dict[str, Any]:
raise NotImplementedError()
def reached_max_steps(self) -> bool:
return self.num_steps_taken_in_episode() >= self.max_steps
@abstractmethod
def reached_terminal_state(self) -> bool:
raise NotImplementedError()
def is_complete(self) -> bool:
return self.reached_terminal_state() or self.reached_max_steps()
def is_paused(self) -> bool:
return False
def _increment_num_steps_taken_in_episode(self) -> None:
self._num_steps_taken_in_episode += 1
def num_steps_taken_in_episode(self) -> int:
return self._num_steps_taken_in_episode
def info(self):
return {"ep_length": self.num_steps_taken_in_episode()}
@property
def available_actions(self) -> Tuple[str, ...]:
return tuple(a for g in self.available_action_groups for a in g)
@property
def available_action_groups(self) -> Tuple[Tuple[str, ...], ...]:
return self.class_available_action_groups()
@classmethod
def class_available_actions(cls, **kwargs) -> Tuple[str, ...]:
return tuple(a for g in cls.class_available_action_groups(**kwargs) for a in g)
@classmethod
@abstractmethod
def class_available_action_groups(cls, **kwargs) -> Tuple[Tuple[str, ...], ...]:
raise NotImplementedError()
@property
def total_available_actions(self) -> int:
return len(self.available_actions)
def index_to_action(self, index: int) -> str:
assert 0 <= index < self.total_available_actions
return self.available_actions[index]
| cordial-sync-master | rl_base/episode.py |
from .agent import A3CAgent
from .episode import Episode
from .shared_optim import SharedAdam, SharedRMSprop
| cordial-sync-master | rl_base/__init__.py |
from __future__ import division
import copy
from abc import ABC
from typing import Dict, List, Union, Optional, Any, TypeVar, Generic
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from utils import net_util
from utils.misc_util import huber_loss
from .episode import Episode
EnvType = TypeVar("EnvType")
class RLAgent(Generic[EnvType]):
def __init__(
self,
model: Optional[nn.Module] = None,
episode: Optional[Episode[EnvType]] = None,
gpu_id: int = -1,
**kwargs,
) -> None:
# Important that gpu_id is set first as the model setter requires it
self.gpu_id = gpu_id
self._model: Optional[nn.Module] = None
if model is not None:
self.model = model
self._episode = episode
self.hidden: Optional[Any] = None
self.global_episode_count = 0
def sync_with_shared(self, shared_model: nn.Module) -> None:
if self.gpu_id >= 0:
with torch.cuda.device(self.gpu_id):
self.model.load_state_dict(shared_model.state_dict())
else:
self.model.load_state_dict(shared_model.state_dict())
def eval_at_state(self, state, hidden, **kwargs):
raise NotImplementedError()
def eval_at_current_state(self):
return self.eval_at_state(state=self.state, hidden=self.hidden)
@property
def episode(self) -> Optional[Episode[EnvType]]:
return self._episode
@episode.setter
def episode(self, value: Episode[EnvType]) -> None:
self.reset_hidden()
self.clear_history()
self._episode = value
@property
def environment(self) -> Optional[EnvType]:
if self.episode is None:
return None
return self.episode.environment
@property
def state(self):
raise NotImplementedError()
@property
def model(self) -> nn.Module:
assert self._model is not None
return self._model
@model.setter
def model(self, model_to_set: nn.Module):
if self.gpu_id >= 0:
with torch.cuda.device(self.gpu_id):
self._model = model_to_set.cuda()
else:
self._model = model_to_set
def gpuify(self, tensor: torch.Tensor) -> Union[torch.Tensor]:
if self.gpu_id >= 0:
with torch.cuda.device(self.gpu_id):
tensor = tensor.cuda()
return tensor
def act(self, **kwargs) -> Dict:
raise NotImplementedError()
def reset_hidden(self) -> None:
self.hidden = None
def repackage_hidden(self) -> None:
self.hidden = net_util.recursively_detach(self.hidden)
def preprocess_action(
self, last_action: Optional[int]
) -> Optional[Union[torch.LongTensor, torch.cuda.LongTensor]]:
if last_action is not None:
# noinspection PyTypeChecker
return self.gpuify(torch.LongTensor([[last_action]]))
return None
def preprocess_frame(
self, frame: np.ndarray, new_frame_size: int
) -> Union[torch.FloatTensor, torch.cuda.FloatTensor]:
frame = net_util.resnet_input_transform(frame, new_frame_size)
# noinspection PyArgumentList
return self.gpuify(torch.FloatTensor(frame))
def preprocess_long_tensor_frame(
self, frame: np.ndarray
) -> Union[torch.LongTensor, torch.cuda.LongTensor]:
agent_local_tensor = torch.LongTensor(frame)
if len(agent_local_tensor.shape) == 2:
agent_local_tensor = agent_local_tensor.unsqueeze(0)
return self.gpuify(agent_local_tensor)
def preprocess_depth_frame(
self, depth_frame: np.ndarray, new_frame_size: int
) -> Union[torch.FloatTensor, torch.cuda.FloatTensor]:
depth_frame = net_util.resize_image(
torch.FloatTensor(np.expand_dims(depth_frame / 5000.0, axis=0)),
new_frame_size,
)
return self.gpuify(depth_frame)
def clear_history(self) -> None:
raise NotImplementedError()
def clear_graph_data(self):
raise NotImplementedError()
def loss(self, **kwargs) -> Dict[str, torch.FloatTensor]:
raise NotImplementedError()
class A3CAgent(RLAgent[EnvType], ABC):
def __init__(
self,
model: Optional[nn.Module] = None,
episode: Optional[Episode[EnvType]] = None,
gamma: float = 0.99,
tau: float = 1.0,
beta: float = 1e-2,
gpu_id: int = -1,
include_test_eval_results: bool = False,
huber_delta: Optional[float] = None,
**kwargs,
) -> None:
super().__init__(model=model, episode=episode, gpu_id=gpu_id, **kwargs)
self.gamma = gamma
self.tau = tau
self.beta = beta
self.include_test_eval_results = include_test_eval_results
self.huber_delta = huber_delta
self.last_reward: Optional[float] = None
self.values: List[torch.FloatTensor] = []
self.log_prob_of_actions: List[torch.FloatTensor] = []
self.rewards: List[float] = []
self.entropies: List[torch.FloatTensor] = []
self.actions: List[int] = []
self.step_results: List[Dict[str, Any]] = []
self.eval_results: List[Dict[str, Any]] = []
self._a3c_loss_disabled_for_episode: bool = False
def eval_at_state(self, state, hidden, **kwargs):
raise NotImplementedError()
def disable_a3c_loss_for_episode(self, mode: bool = True):
self._a3c_loss_disabled_for_episode = mode
@property
def episode(self):
return super(A3CAgent, self).episode
@episode.setter
def episode(self, value: Episode[EnvType]) -> None:
super(A3CAgent, self).episode = value
self.disable_a3c_loss_for_episode(False)
def act(
self,
train: bool = True,
action: Optional[int] = None,
sample_action: bool = True,
**kwargs,
) -> Dict:
if not self.model.training and train:
self.model.train()
if self.model.training and not train:
self.model.eval()
assert self.episode is not None
assert not self.episode.is_complete()
if not train and self.hidden is not None:
self.repackage_hidden()
eval = self.eval_at_current_state()
value = eval["value"]
self.hidden = None if "hidden" not in eval else eval["hidden"]
logit = eval["logit"]
probs = F.softmax(logit, dim=1)
log_probs = F.log_softmax(logit, dim=1)
if hasattr(self.model, "action_groups"):
entropy_list = []
k = 0
marginal_probs = []
for action_group in self.model.action_groups:
marginal_probs.append(
probs[:, k : (k + len(action_group))].sum(1).unsqueeze(1)
)
group_probs = F.softmax(logit[:, k : (k + len(action_group))], dim=1)
group_log_probs = F.log_softmax(
logit[:, k : (k + len(action_group))], dim=1
)
entropy_list.append(-(group_log_probs * group_probs).sum(1))
k += len(action_group)
entropy = sum(entropy_list) / len(entropy_list)
marginal_probs = torch.cat(tuple(marginal_probs), dim=1)
entropy += -(marginal_probs * torch.log(marginal_probs)).sum(1)
else:
entropy = -(log_probs * probs).sum().unsqueeze(0)
if action is None:
if sample_action:
action = probs.multinomial(num_samples=1).item()
else:
action = probs.argmax(dim=-1, keepdim=True).item()
assert log_probs.shape[0] == 1
log_prob_of_action = log_probs.view(-1)[action]
before_location = self.environment.get_agent_location()
step_result = self.episode.step(action)
after_location = self.environment.get_agent_location()
self.last_reward = (
None if step_result["reward"] is None else float(step_result["reward"])
)
step_result["before_location"] = before_location
step_result["after_location"] = after_location
eval["probs"] = probs
eval["action"] = action
eval["step_result"] = step_result
eval["training"] = train
self.step_results.append(step_result)
self.actions.append(action)
self.rewards.append(self.last_reward)
if train or self.include_test_eval_results:
self.eval_results.append(eval)
if train:
self.entropies.append(entropy)
self.values.append(value)
self.log_prob_of_actions.append(log_prob_of_action)
return eval
def clear_history(self) -> None:
self.clear_graph_data()
self.rewards = []
self.actions = []
self.step_results = []
def clear_graph_data(self):
self.values = []
self.log_prob_of_actions = []
self.entropies = []
self.eval_results = []
def loss(self, **kwargs) -> Dict[str, torch.FloatTensor]:
assert self.episode is not None
if self._a3c_loss_disabled_for_episode:
return {}
if self.episode.is_complete():
future_reward_est = 0.0
else:
eval = self.eval_at_current_state()
future_reward_est = eval["value"].item()
return self.a3c_loss(
values=self.values,
rewards=self.rewards,
log_prob_of_actions=self.log_prob_of_actions,
entropies=self.entropies,
future_reward_est=future_reward_est,
gamma=self.gamma,
tau=self.tau,
beta=self.beta,
gpu_id=self.gpu_id,
)
@staticmethod
def a3c_loss(
values: List[torch.FloatTensor],
rewards: List[float],
log_prob_of_actions: List[torch.FloatTensor],
entropies: List[torch.FloatTensor],
future_reward_est: float,
gamma: float,
tau: float,
beta: float,
gpu_id: int,
huber_delta: Optional[float] = None,
):
R = torch.FloatTensor([[future_reward_est]])
if gpu_id >= 0:
with torch.cuda.device(gpu_id):
R = R.cuda()
values = copy.copy(values)
values.append(R)
policy_loss = 0
value_loss = 0
entropy_loss = 0
gae = torch.zeros(1, 1)
if gpu_id >= 0:
with torch.cuda.device(gpu_id):
gae = gae.cuda()
for i in reversed(range(len(rewards))):
R = gamma * R + rewards[i]
advantage = R - values[i]
value_loss = value_loss + (
0.5 * advantage.pow(2)
if huber_delta is None
else 0.5 * huber_loss(advantage, huber_delta)
)
# Generalized Advantage Estimation
delta_t = rewards[i] + gamma * values[i + 1].detach() - values[i].detach()
gae = gae * gamma * tau + delta_t
policy_loss = policy_loss - log_prob_of_actions[i] * gae
entropy_loss -= beta * entropies[i]
return {
"policy": policy_loss,
"value": 0.5 * value_loss,
"entropy": entropy_loss,
}
| cordial-sync-master | rl_base/agent.py |
from __future__ import division
import math
from collections import defaultdict
from typing import Tuple, Dict, Union, Optional, Iterable, Callable
import torch
import torch.optim as optim
from torch import nn
class SharedRMSprop(optim.Optimizer):
"""Implements RMSprop algorithm with shared states.
"""
def __init__(
self,
params: Union[Iterable[nn.Parameter], Dict],
lr: float = 7e-4,
alpha: float = 0.99,
eps: float = 0.1,
weight_decay: float = 0,
momentum: float = 0,
centered: bool = False,
saved_state: Dict = None,
) -> None:
defaults: defaultdict = defaultdict(
lr=lr,
alpha=alpha,
eps=eps,
weight_decay=weight_decay,
momentum=momentum,
centered=centered,
)
super(SharedRMSprop, self).__init__(params, defaults)
if saved_state:
self.load_state_dict(saved_state)
for group in self.param_groups:
for p in group["params"]:
if p.requires_grad:
state = self.state[p]
state["step"] = torch.zeros(1)
state["grad_avg"] = p.data.new().resize_as_(p.data).zero_()
state["square_avg"] = p.data.new().resize_as_(p.data).zero_()
state["momentum_buffer"] = p.data.new().resize_as_(p.data).zero_()
def share_memory(self) -> None:
for group in self.param_groups:
for p in group["params"]:
state = self.state[p]
state["square_avg"].share_memory_()
state["step"].share_memory_()
state["grad_avg"].share_memory_()
state["momentum_buffer"].share_memory_()
def step(self, closure: Optional[Callable] = None) -> Optional[torch.FloatTensor]:
"""Performs a single optimization step.
Arguments:
closure (callable, optional): A closure that reevaluates the model
and returns the loss.
"""
loss = None
if closure is not None:
loss = closure()
for group in self.param_groups:
for p in group["params"]:
if p.grad is None:
continue
grad = p.grad.data
if grad.is_sparse:
raise RuntimeError("RMSprop does not support sparse gradients")
state = self.state[p]
square_avg = state["square_avg"]
alpha = group["alpha"]
state["step"] += 1
if group["weight_decay"] != 0:
grad = grad.add(group["weight_decay"], p.data)
square_avg.mul_(alpha).addcmul_(1 - alpha, grad, grad)
if group["centered"]:
grad_avg = state["grad_avg"]
grad_avg.mul_(alpha).add_(1 - alpha, grad)
avg = (
square_avg.addcmul(-1, grad_avg, grad_avg)
.sqrt()
.add_(group["eps"])
)
else:
avg = square_avg.sqrt().add_(group["eps"])
if group["momentum"] > 0:
buf = state["momentum_buffer"]
buf.mul_(group["momentum"]).addcdiv_(grad, avg)
p.data.add_(-group["lr"], buf)
else:
p.data.addcdiv_(-group["lr"], grad, avg)
return loss
class SharedAdam(optim.Optimizer):
"""Implements Adam algorithm with shared states.
"""
def __init__(
self,
params: Union[Iterable[nn.Parameter], Dict],
lr: float = 1e-3,
betas: Tuple[float, float] = (0.9, 0.999),
eps: float = 1e-3,
weight_decay: float = 0,
amsgrad: bool = False,
saved_state: Dict = None,
) -> None:
defaults: defaultdict = defaultdict(
lr=lr, betas=betas, eps=eps, weight_decay=weight_decay, amsgrad=amsgrad
)
super(SharedAdam, self).__init__(params, defaults)
if saved_state:
self.load_state_dict(saved_state)
for group in self.param_groups:
for p in group["params"]:
if p.requires_grad:
state = self.state[p]
state["step"] = torch.zeros(1)
state["exp_avg"] = p.data.new().resize_as_(p.data).zero_()
state["exp_avg_sq"] = p.data.new().resize_as_(p.data).zero_()
state["max_exp_avg_sq"] = p.data.new().resize_as_(p.data).zero_()
def share_memory(self) -> None:
for group in self.param_groups:
for p in group["params"]:
state = self.state[p]
state["step"].share_memory_()
state["exp_avg"].share_memory_()
state["exp_avg_sq"].share_memory_()
state["max_exp_avg_sq"].share_memory_()
def step(self, closure: Optional[Callable] = None) -> Optional[torch.FloatTensor]:
"""Performs a single optimization step.
Arguments:
closure (callable, optional): A closure that reevaluates the model
and returns the loss.
"""
loss = None
if closure is not None:
loss = closure()
for group in self.param_groups:
for p in group["params"]:
if p.grad is None:
continue
grad = p.grad.data
if grad.is_sparse:
raise RuntimeError(
"Adam does not support sparse gradients, please consider SparseAdam instead"
)
amsgrad = group["amsgrad"]
state = self.state[p]
exp_avg, exp_avg_sq = state["exp_avg"], state["exp_avg_sq"]
if amsgrad:
max_exp_avg_sq = state["max_exp_avg_sq"]
beta1, beta2 = group["betas"]
state["step"] += 1
if group["weight_decay"] != 0:
grad = grad.add(group["weight_decay"], p.data)
# Decay the first and second moment running average coefficient
exp_avg.mul_(beta1).add_(1 - beta1, grad)
exp_avg_sq.mul_(beta2).addcmul_(1 - beta2, grad, grad)
if amsgrad:
# Maintains the maximum of all 2nd moment running avg. till
# now
torch.max(max_exp_avg_sq, exp_avg_sq, out=max_exp_avg_sq)
# Use the max. for normalizing running avg. of gradient
denom = max_exp_avg_sq.sqrt().add_(group["eps"])
else:
denom = exp_avg_sq.sqrt().add_(group["eps"])
bias_correction1 = 1 - beta1 ** state["step"][0]
bias_correction2 = 1 - beta2 ** state["step"][0]
step_size = group["lr"] * math.sqrt(bias_correction2) / bias_correction1
p.data.addcdiv_(-step_size, exp_avg, denom)
return loss
| cordial-sync-master | rl_base/shared_optim.py |
import queue
import random
import warnings
from collections import Counter
from typing import List, Callable, Optional, Dict
import torch
from torch import multiprocessing as mp
from rl_ai2thor.ai2thor_environment import AI2ThorEnvironment
from rl_ai2thor.ai2thor_gridworld_environment import AI2ThorLiftedObjectGridEnvironment
from rl_base.agent import RLAgent
from rl_multi_agent import MultiAgent
from rl_multi_agent.furnlift_episode_samplers import save_talk_reply_data_frame
from rl_multi_agent.furnmove_episodes import FurnMoveEpisode
from rl_multi_agent.multi_agent_utils import TrainingCompleteException
class FurnMoveEpisodeSampler(object):
def __init__(
self,
scenes: List[str],
num_agents: int,
object_type: str,
to_object_type: str,
env_args=None,
max_episode_length: int = 500,
episode_class: Callable = FurnMoveEpisode,
player_screen_height=224,
player_screen_width=224,
save_talk_reply_probs_path: Optional[str] = None,
max_ep_using_expert_actions: int = 10000,
visible_agents: bool = True,
include_depth_frame: bool = False,
object_initial_height: float = 1.0,
max_distance_from_object: float = 0.5,
headless: bool = False,
episode_args: Optional[Dict] = None,
environment_args: Optional[Dict] = None,
):
self.visible_agents = visible_agents
self.max_ep_using_expert_actions = max_ep_using_expert_actions
self.save_talk_reply_probs_path = save_talk_reply_probs_path
self.player_screen_height = player_screen_height
self.player_screen_width = player_screen_width
self.episode_class = episode_class
self.max_episode_length = max_episode_length
self.env_args = env_args
self.num_agents = num_agents
self.scenes = scenes
self.object_type = object_type
self.to_object_type = to_object_type
self.include_depth_frame = include_depth_frame
self.object_initial_height = object_initial_height
self.max_distance_from_object = max_distance_from_object
self.headless = headless
self.episode_args = episode_args if episode_args is not None else {}
self.environment_args = environment_args if environment_args is not None else {}
self.grid_size = 0.25
self._current_train_episode = 0
self._internal_episodes = 0
self.episode_init_data = None
@staticmethod
def _create_environment(
num_agents,
env_args,
visible_agents: bool,
render_depth_image: bool,
headless: bool,
**environment_args,
) -> AI2ThorEnvironment:
env = AI2ThorEnvironment(
docker_enabled=env_args.docker_enabled,
x_display=env_args.x_display,
local_thor_build=env_args.local_thor_build,
num_agents=num_agents,
restrict_to_initially_reachable_points=True,
visible_agents=visible_agents,
render_depth_image=render_depth_image,
time_scale=1.0,
headless=headless,
**environment_args,
)
return env
@property
def current_train_episode(self):
return self._current_train_episode
@current_train_episode.setter
def current_train_episode(self, value):
self._current_train_episode = value
def start_env(self, agent: RLAgent, env: Optional[AI2ThorEnvironment]):
if env is None:
if agent.environment is not None:
env = agent.environment
else:
env = self._create_environment(
num_agents=self.num_agents,
env_args=self.env_args,
visible_agents=self.visible_agents,
render_depth_image=self.include_depth_frame,
headless=self.headless,
**self.environment_args,
)
env.start(
"FloorPlan1_physics",
move_mag=self.grid_size,
quality="Very Low",
player_screen_height=self.player_screen_height,
player_screen_width=self.player_screen_width,
)
return env
def __call__(
self,
agent: MultiAgent,
agent_location_seed=None,
env: Optional[AI2ThorEnvironment] = None,
episode_init_queue: Optional[mp.Queue] = None,
) -> None:
self._internal_episodes += 1
if (
self.max_ep_using_expert_actions != 0
and self.current_train_episode <= self.max_ep_using_expert_actions
):
agent.take_expert_action_prob = 0.9 * (
1.0 - self.current_train_episode / self.max_ep_using_expert_actions
)
else:
agent.take_expert_action_prob = 0
env = self.start_env(agent=agent, env=env)
self.episode_init_data = None
if episode_init_queue is not None:
try:
self.episode_init_data = episode_init_queue.get(timeout=1)
except queue.Empty:
raise TrainingCompleteException("No more data in episode init queue.")
scene = self.episode_init_data["scene"]
env.reset(scene)
agent_locations = self.episode_init_data["agent_locations"]
for i in range(self.num_agents):
env.teleport_agent_to(
**agent_locations[i], standing=True, force_action=True, agent_id=i
)
# Place agents in appropriate locations
env.step(
{
"action": "DisableAllObjectsOfType",
"objectId": self.object_type,
"agentId": 0,
}
)
lifted_object_loc = self.episode_init_data["lifted_object_location"]
sr = env.step(
{
"action": "CreateObjectAtLocation",
"objectType": self.object_type,
"position": {
"x": lifted_object_loc["x"],
"y": lifted_object_loc["y"],
"z": lifted_object_loc["z"],
},
"rotation": {"x": 0, "y": lifted_object_loc["rotation"], "z": 0},
"forceAction": True,
"agentId": 0,
}
)
assert env.last_event.metadata["lastActionSuccess"]
objects_of_type = env.all_objects_with_properties(
{"objectType": self.object_type}, agent_id=0
)
if len(objects_of_type) != 1:
print("len(objects_of_type): {}".format(len(objects_of_type)))
raise (Exception("len(objects_of_type) != 1"))
object = objects_of_type[0]
object_id = object["objectId"]
# Create the object we should navigate to
to_object_loc = self.episode_init_data["to_object_location"]
env.step(
{
"action": "CreateObjectAtLocation",
"objectType": self.to_object_type,
"position": {
"x": to_object_loc["x"],
"y": to_object_loc["y"],
"z": to_object_loc["z"],
},
"rotation": {"x": 0, "y": to_object_loc["rotation"], "z": 0},
"forceAction": True,
"agentId": 0,
}
)
assert env.last_event.metadata["lastActionSuccess"]
objects_of_to_type = [
o["objectId"]
for o in env.all_objects_with_properties(
{"objectType": self.to_object_type}, agent_id=0
)
if len(o["objectId"].split("|")) == 2
]
assert len(objects_of_to_type) == 1
to_object_id = objects_of_to_type[0]
assert to_object_id is not None and len(to_object_id) != 0
else:
scene = random.choice(self.scenes)
env.reset(scene)
for i in range(self.num_agents):
env.teleport_agent_to(
**env.last_event.metadata["agent"]["position"],
rotation=env.last_event.metadata["agent"]["rotation"]["y"],
horizon=30.0,
standing=True,
force_action=True,
agent_id=i,
)
scene_successfully_setup = False
failure_reasons = []
for _ in range(10):
# Randomly generate agents looking at the TV
env.step(
{
"action": "DisableAllObjectsOfType",
"objectId": self.object_type,
"agentId": 0,
}
)
sr = env.step(
{
"action": "RandomlyCreateLiftedFurniture",
"objectType": self.object_type,
"y": self.object_initial_height,
"z": self.max_distance_from_object,
}
)
if (
env.last_event.metadata["lastAction"]
!= "RandomlyCreateLiftedFurniture"
or not env.last_event.metadata["lastActionSuccess"]
):
failure_reasons.append(
"Could not randomize location of {} in {}. Error message {}.".format(
self.object_type,
scene,
env.last_event.metadata["errorMessage"],
)
)
continue
# Refreshing reachable here should not be done as it means the
# space underneath the TV would be considered unreachable even though,
# once the TV moves, it is.
# env.refresh_initially_reachable()
objects_of_type = env.all_objects_with_properties(
{"objectType": self.object_type}, agent_id=0
)
if len(objects_of_type) != 1:
print("len(objects_of_type): {}".format(len(objects_of_type)))
raise (Exception("len(objects_of_type) != 1"))
object = objects_of_type[0]
object_id = object["objectId"]
assert object_id == sr.metadata["actionReturn"]
# Create the object we should navigate to
env.step(
{
"action": "RandomlyCreateAndPlaceObjectOnFloor",
"objectType": self.to_object_type,
"agentId": 0,
}
)
if (
env.last_event.metadata["lastAction"]
!= "RandomlyCreateAndPlaceObjectOnFloor"
or not env.last_event.metadata["lastActionSuccess"]
):
failure_reasons.append(
"Could not randomize location of {} in {}. Error message: {}.".format(
self.to_object_type,
scene,
env.last_event.metadata["errorMessage"],
)
)
continue
to_object_id = env.last_event.metadata["actionReturn"]
assert to_object_id is not None and len(to_object_id) != 0
scene_successfully_setup = True
break
if not scene_successfully_setup:
# raise Exception(
warnings.warn(
(
"Failed to randomly initialize objects and agents in scene {} 10 times"
+ "for the following reasons:"
+ "\n\t* ".join(failure_reasons)
+ "\nTrying a new scene."
).format(env.scene_name)
)
yield from self(agent, env=env)
return
assert env.scene_name == scene
# Task data includes the target object id and one agent state/metadata to naviagate to.
task_data = {"move_obj_id": object_id, "move_to_obj_id": to_object_id}
agent.episode = self.episode_class(
env,
task_data,
max_steps=self.max_episode_length,
num_agents=self.num_agents,
max_distance_from_object=self.max_distance_from_object,
include_depth_frame=self.include_depth_frame,
**self.episode_args,
)
yield True
if self.save_talk_reply_probs_path is not None:
if torch.cuda.is_available():
save_talk_reply_data_frame(
agent,
self.save_talk_reply_probs_path,
None,
use_hash=True, # self._internal_episodes,
)
else:
save_talk_reply_data_frame(
agent, self.save_talk_reply_probs_path, self._internal_episodes
)
class FurnMoveGridEpisodeSampler(object):
def __init__(
self,
scenes: List[str],
num_agents: int,
object_type: str,
to_object_type: str,
to_object_silhouette: str,
env_args=None,
max_episode_length: int = 500,
episode_class: Callable = FurnMoveEpisode,
player_screen_height=224,
player_screen_width=224,
save_talk_reply_probs_path: Optional[str] = None,
max_ep_using_expert_actions: int = 10000,
visible_agents: bool = True,
include_depth_frame: bool = False,
object_initial_height: float = 1.0,
max_distance_from_object: float = 0.5,
headless: bool = False,
episode_args: Optional[Dict] = None,
environment_args: Optional[Dict] = None,
):
self.visible_agents = visible_agents
self.max_ep_using_expert_actions = max_ep_using_expert_actions
self.save_talk_reply_probs_path = save_talk_reply_probs_path
self.player_screen_height = player_screen_height
self.player_screen_width = player_screen_width
self.episode_class = episode_class
self.max_episode_length = max_episode_length
self.env_args = env_args
self.num_agents = num_agents
self.scenes = scenes
self.object_type = object_type
self.to_object_type = to_object_type
self.include_depth_frame = include_depth_frame
self.object_initial_height = object_initial_height
self.max_distance_from_object = max_distance_from_object
self.headless = headless
self.episode_args = episode_args if episode_args is not None else {}
self.environment_args = environment_args if environment_args is not None else {}
self.to_object_silhouette = to_object_silhouette
self.grid_size = 0.25
self._current_train_episode = 0
self._internal_episodes = 0
self.episode_init_data = None
@staticmethod
def _create_environment(
num_agents,
env_args,
object_initial_height,
max_distance_from_object,
object_type,
visible_agents: bool,
render_depth_image: bool,
headless: bool,
**environment_args,
) -> AI2ThorLiftedObjectGridEnvironment:
# TODO: restrict_to_initially_reachable_points
# and render_depth_image are excluded from this.
env = AI2ThorLiftedObjectGridEnvironment(
docker_enabled=env_args.docker_enabled,
x_display=env_args.x_display,
local_thor_build=env_args.local_thor_build,
num_agents=num_agents,
visible_agents=visible_agents,
time_scale=1.0,
headless=headless,
lifted_object_height=object_initial_height,
max_dist_to_lifted_object=max_distance_from_object,
object_type=object_type,
**environment_args,
)
return env
@property
def current_train_episode(self):
return self._current_train_episode
@current_train_episode.setter
def current_train_episode(self, value):
self._current_train_episode = value
def start_env(self, agent: RLAgent, env: Optional[AI2ThorEnvironment]):
if agent.environment is not None:
env = agent.environment
else:
env = self._create_environment(
num_agents=self.num_agents,
env_args=self.env_args,
visible_agents=self.visible_agents,
render_depth_image=self.include_depth_frame,
headless=self.headless,
object_initial_height=self.object_initial_height,
max_distance_from_object=self.max_distance_from_object,
object_type=self.object_type,
**self.environment_args,
)
env.start(
"FloorPlan1_physics",
move_mag=self.grid_size,
quality="Very Low",
player_screen_height=self.player_screen_height,
player_screen_width=self.player_screen_width,
)
return env
def __call__(
self,
agent: MultiAgent,
agent_location_seed=None,
env: Optional[AI2ThorEnvironment] = None,
episode_init_queue: Optional[mp.Queue] = None,
) -> None:
self._internal_episodes += 1
if (
self.max_ep_using_expert_actions != 0
and self.current_train_episode <= self.max_ep_using_expert_actions
):
agent.take_expert_action_prob = 0.9 * (
1.0 - self.current_train_episode / self.max_ep_using_expert_actions
)
else:
agent.take_expert_action_prob = 0
env = self.start_env(agent=agent, env=env)
if episode_init_queue is not None:
try:
self.episode_init_data = episode_init_queue.get(timeout=1)
except queue.Empty:
raise TrainingCompleteException("No more data in episode init queue.")
scene = self.episode_init_data["scene"]
env.reset(scene)
agent_locations = self.episode_init_data["agent_locations"]
for i in range(self.num_agents):
loc = agent_locations[i]
env.step(
{
"action": "TeleportFull",
"agentId": i,
"x": loc["x"],
"z": loc["z"],
"rotation": {"y": loc["rotation"]},
"allowAgentIntersection": True,
"makeReachable": True,
}
)
assert env.last_event.metadata["lastActionSuccess"]
# Place agents in appropriate locations
lifted_object_loc = self.episode_init_data["lifted_object_location"]
env.step(
{
"action": "CreateLiftedFurnitureAtLocation",
"objectType": self.object_type,
"x": lifted_object_loc["x"],
"z": lifted_object_loc["z"],
"rotation": {"y": lifted_object_loc["rotation"]},
"agentId": 0,
"forceAction": True,
}
)
assert env.last_event.metadata["lastActionSuccess"]
object_id = env.last_event.metadata["actionReturn"]
# Create the object we should navigate to
to_object_loc = self.episode_init_data["to_object_location"]
env.step(
{
"action": "CreateAndPlaceObjectOnFloorAtLocation",
"objectType": "Dresser",
"object_mask": env.controller.parse_template_to_mask(
self.to_object_silhouette
),
**to_object_loc,
"rotation": {"y": to_object_loc["rotation"]},
"agentId": 0,
"forceAction": True,
}
)
assert env.last_event.metadata["lastActionSuccess"]
to_object_id = env.last_event.metadata["actionReturn"]["objectId"]
assert to_object_id is not None and len(to_object_id) != 0
else:
scene = random.choice(self.scenes)
env.reset(scene)
scene_successfully_setup = False
failure_reasons = []
for _ in range(10 if env.num_agents <= 3 else 50):
# Randomly generate agents looking at the TV
sr = env.step(
{
"action": "RandomlyCreateLiftedFurniture",
"agentId": 0,
"objectType": self.object_type,
"y": self.object_initial_height,
"z": self.max_distance_from_object,
}
)
if (
env.last_event.metadata["lastAction"]
!= "RandomlyCreateLiftedFurniture"
or not env.last_event.metadata["lastActionSuccess"]
):
failure_reasons.append(
"Could not randomize location of {} in {}.".format(
self.object_type, scene
)
)
continue
object_id = sr.metadata["actionReturn"]
# Create the object we should navigate to
env.step(
{
"action": "RandomlyCreateAndPlaceObjectOnFloor",
"objectType": self.to_object_type,
"object_mask": env.controller.parse_template_to_mask(
self.to_object_silhouette
),
"agentId": 0,
}
)
if (
env.last_event.metadata["lastAction"]
!= "RandomlyCreateAndPlaceObjectOnFloor"
or not env.last_event.metadata["lastActionSuccess"]
):
failure_reasons.append(
"Could not randomize location of {} in {}.".format(
self.to_object_type, scene
)
)
continue
to_object_id = env.last_event.metadata["actionReturn"]["objectId"]
assert to_object_id is not None and len(to_object_id) != 0
scene_successfully_setup = True
break
if not scene_successfully_setup:
# raise Exception(
warnings.warn(
(
"Failed to randomly initialize objects and agents in scene {} 10 times".format(
scene
)
+ "for the following reasons:"
+ "\n\t* ".join(
[
"({} times): {}".format(cnt, reason)
for reason, cnt in Counter(failure_reasons).items()
]
)
+ "\nTrying a new scene."
).format(env.scene_name)
)
yield from self(agent, env=env)
return
assert env.scene_name == scene
# Task data includes the target object id and one agent state/metadata to naviagate to.
task_data = {"move_obj_id": object_id, "move_to_obj_id": to_object_id}
agent.episode = self.episode_class(
env,
task_data,
max_steps=self.max_episode_length,
num_agents=self.num_agents,
max_distance_from_object=self.max_distance_from_object,
include_depth_frame=self.include_depth_frame,
**self.episode_args,
)
yield True
if self.save_talk_reply_probs_path is not None:
if torch.cuda.is_available():
save_talk_reply_data_frame(
agent,
self.save_talk_reply_probs_path,
None,
use_hash=True, # self._internal_episodes,
)
else:
save_talk_reply_data_frame(
agent, self.save_talk_reply_probs_path, self._internal_episodes
)
| cordial-sync-master | rl_multi_agent/furnmove_episode_samplers.py |
from __future__ import division
from collections import OrderedDict
from typing import Tuple, Optional, Sequence
import torch
import torch.nn as nn
import torch.nn.functional as F
from utils.misc_util import norm_col_init, weights_init, outer_product, outer_sum
def _unfold_communications(speech_per_agent: torch.FloatTensor):
assert len(speech_per_agent.shape) >= 2
num_agents = speech_per_agent.shape[0]
unfolded_commns = [speech_per_agent[1:, :].view(1, -1)]
for i in range(1, num_agents - 1):
unfolded_commns.append(
torch.cat(
(
speech_per_agent[(i + 1) :,].view(1, -1),
speech_per_agent[:i,].view(1, -1),
),
dim=1,
)
)
unfolded_commns.append(speech_per_agent[:-1,].view(1, -1))
return torch.cat(unfolded_commns, dim=0)
class A3CLSTMCentralEgoGridsEmbedCNN(nn.Module):
def __init__(
self,
num_inputs_per_agent: int,
action_groups: Tuple[Tuple[str, ...], ...],
num_agents: int,
state_repr_length: int,
occupancy_embed_length: int,
):
super(A3CLSTMCentralEgoGridsEmbedCNN, self).__init__()
self.num_outputs = sum(len(x) for x in action_groups)
self.num_inputs_per_agent = num_inputs_per_agent
self.num_inputs = num_inputs_per_agent * num_agents
self.num_agents = num_agents
self.occupancy_embed_length = occupancy_embed_length
self.occupancy_embeddings = nn.Embedding(
num_inputs_per_agent, self.occupancy_embed_length
)
# input to conv is (num_agents * self.occupancy_embed_length, 15, 15)
self.cnn = nn.Sequential(
OrderedDict(
[
(
"conv1",
nn.Conv2d(
self.occupancy_embed_length * self.num_agents,
32,
3,
padding=1,
),
),
("relu1", nn.ReLU(inplace=True)),
# shape = (15, 15)
("conv2", nn.Conv2d(32, 64, 3, stride=2)),
("relu2", nn.ReLU(inplace=True)),
# shape = (7, 7)
("conv3", nn.Conv2d(64, 128, 3, stride=2)),
("relu3", nn.ReLU(inplace=True)),
# shape = (3, 3)
("conv4", nn.Conv2d(128, 256, 3, stride=2)),
("relu4", nn.ReLU(inplace=True)),
# shape = (1, 1); Stride doesn't matter above
]
)
)
# LSTM
self.lstm_in_dim = 256
self.lstm = nn.LSTM(self.lstm_in_dim, state_repr_length, batch_first=True)
# Post LSTM fully connected layers
self.after_lstm_mlp = nn.Sequential(
nn.Linear(state_repr_length, state_repr_length),
nn.ReLU(inplace=True),
nn.Linear(state_repr_length, state_repr_length),
nn.ReLU(inplace=True),
)
# Marginal Linear actor
self.marginal_actor_linear_list = nn.ModuleList(
[
nn.Linear(state_repr_length, self.num_outputs)
for _ in range(self.num_agents)
]
)
# Conditional actor
self.joint_actor_linear = nn.Linear(
state_repr_length, self.num_outputs ** self.num_agents
)
# Linear critic
self.critic_linear = nn.Linear(state_repr_length, 1)
# Setting initial weights
self.apply(weights_init)
relu_gain = nn.init.calculate_gain("relu")
self.cnn._modules["conv1"].weight.data.mul_(relu_gain)
self.cnn._modules["conv2"].weight.data.mul_(relu_gain)
self.cnn._modules["conv3"].weight.data.mul_(relu_gain)
self.cnn._modules["conv4"].weight.data.mul_(relu_gain)
for agent_id in range(self.num_agents):
self.marginal_actor_linear_list[agent_id].weight.data = norm_col_init(
self.marginal_actor_linear_list[agent_id].weight.data, 0.01
)
self.marginal_actor_linear_list[agent_id].bias.data.fill_(0)
self.train()
def forward(
self,
inputs: torch.FloatTensor,
hidden: Optional[torch.FloatTensor],
agent_rotations: Sequence[int],
):
if inputs.shape != (self.num_agents, self.num_inputs_per_agent, 15, 15):
raise Exception("input to model is not as expected, check!")
inputs = inputs.float()
# inputs.shape == (self.num_agents, self.num_inputs, 15, 15)
inputs = inputs.permute(0, 2, 3, 1).contiguous()
original_shape = inputs.shape
# original_shape == (self.num_agents, 15, 15, self.num_inputs)
scaling_factor = torch.reciprocal(
(inputs == 1).sum(dim=3).unsqueeze(3).float() + 1e-3
)
# scaling_factor.shape == (self.num_agents, 15, 15, 1)
inputs = inputs.view(-1, self.num_inputs_per_agent)
inputs = inputs.matmul(self.occupancy_embeddings.weight)
inputs = inputs.view(
original_shape[0],
original_shape[1],
original_shape[2],
self.occupancy_embed_length,
)
x = torch.mul(inputs, scaling_factor.expand_as(inputs))
# x.shape == (self.num_agents, 15, 15, self.occupancy_embed_length)
x = (
x.permute(0, 3, 1, 2)
.contiguous()
.view(
1,
original_shape[0] * self.occupancy_embed_length,
original_shape[1],
original_shape[2],
)
)
# x.shape == (1, self.num_agents * self.occupancy_embed_length, 15, 15)
x = self.cnn(x)
# x.shape == (1, 256, 1, 1)
x = x.view(1, -1)
# x.shape == (1, self.lstm_in_dim)
batch_dim = 1
# x.shape = [batch_dim, self.lstm_in_dim]
if x.shape != (1, self.lstm_in_dim):
print("x.shape: {}".format(x.shape))
raise Exception("output of model is not as expected, check!")
x, hidden = self.lstm(x.unsqueeze(1), hidden)
# x.shape = [batch_dim, 1, state_repr_length]
x = x.squeeze(1)
# x.shape = [batch_dim, state_repr_length]
final_state = x + self.after_lstm_mlp(x)
# state_talk_reply_repr.shape = [batch=1, state_repr_length]
marginal_actor_linear_output_list = [
self.marginal_actor_linear_list[agent_id](final_state).view(-1)
for agent_id in range(self.num_agents)
]
# list with each element of size [batch_dim, self.num_outputs]
# Strangely we have to unsqueeze(0) instead of unsqueeze(1) here.
# This seems to be because the LSTM is not expecting its hidden
# state to have the batch first despite the batch_first=True parameter
# making it expect its input to have the batch first.
hidden = (final_state.unsqueeze(0), hidden[1])
joint_actor_logits = self.joint_actor_linear(final_state).view(
batch_dim, *((self.num_outputs,) * self.num_agents)
)
if self.num_agents <= 2:
marginal_logits = outer_product(
marginal_actor_linear_output_list
).unsqueeze(0)
else:
marginal_logits = outer_sum(marginal_actor_linear_output_list).unsqueeze(0)
assert marginal_logits.shape == joint_actor_logits.shape
combined_logits = joint_actor_logits + marginal_logits
# One length lists as this is a central agent
return {
"logit_all": combined_logits.view(
batch_dim, self.num_outputs ** self.num_agents
),
"value_all": self.critic_linear(final_state), # shape: [batch_dim,1]
"hidden_all": hidden,
"joint_logit_all": True,
}
class A3CLSTMBigCentralEgoGridsEmbedCNN(nn.Module):
def __init__(
self,
num_inputs_per_agent: int,
action_groups: Tuple[Tuple[str, ...], ...],
num_agents: int,
state_repr_length: int,
occupancy_embed_length: int,
):
super(A3CLSTMBigCentralEgoGridsEmbedCNN, self).__init__()
self.num_outputs = sum(len(x) for x in action_groups)
self.num_inputs_per_agent = num_inputs_per_agent
self.num_inputs = num_inputs_per_agent * num_agents
self.num_agents = num_agents
self.occupancy_embed_length = occupancy_embed_length
self.occupancy_embeddings = nn.Embedding(
num_inputs_per_agent, self.occupancy_embed_length
)
# input to conv is (self.occupancy_embed_length, 15, 15)
self.cnn = nn.Sequential(
OrderedDict(
[
("conv1", nn.Conv2d(self.occupancy_embed_length, 32, 3, padding=1)),
("relu1", nn.ReLU(inplace=True)),
# shape = (15, 15)
("conv2", nn.Conv2d(32, 64, 3, stride=2)),
("relu2", nn.ReLU(inplace=True)),
# shape = (7, 7)
("conv3", nn.Conv2d(64, 128, 3, stride=2)),
("relu3", nn.ReLU(inplace=True)),
# shape = (3, 3)
("conv4", nn.Conv2d(128, 256, 3, stride=2)),
("relu4", nn.ReLU(inplace=True)),
# shape = (1, 1); Stride doesn't matter above
]
)
)
self.lstm_in_dim = 256
self.before_lstm_mlp = nn.Sequential(
nn.Linear(
self.num_agents * self.lstm_in_dim, self.num_agents * self.lstm_in_dim
),
nn.ReLU(inplace=True),
nn.Linear(self.num_agents * self.lstm_in_dim, self.lstm_in_dim),
nn.ReLU(inplace=True),
)
# LSTM
self.lstm = nn.LSTM(self.lstm_in_dim, state_repr_length, batch_first=True)
# Post LSTM fully connected layers
self.after_lstm_mlp = nn.Sequential(
nn.Linear(state_repr_length, state_repr_length),
nn.ReLU(inplace=True),
nn.Linear(state_repr_length, state_repr_length),
nn.ReLU(inplace=True),
)
# Marginal Linear actor
self.marginal_actor_linear_list = nn.ModuleList(
[
nn.Linear(state_repr_length, self.num_outputs)
for _ in range(self.num_agents)
]
)
# Conditional actor
self.joint_actor_linear = nn.Linear(
state_repr_length, self.num_outputs ** self.num_agents
)
# Linear critic
self.critic_linear = nn.Linear(state_repr_length, 1)
# Setting initial weights
self.apply(weights_init)
relu_gain = nn.init.calculate_gain("relu")
self.cnn._modules["conv1"].weight.data.mul_(relu_gain)
self.cnn._modules["conv2"].weight.data.mul_(relu_gain)
self.cnn._modules["conv3"].weight.data.mul_(relu_gain)
self.cnn._modules["conv4"].weight.data.mul_(relu_gain)
for agent_id in range(self.num_agents):
self.marginal_actor_linear_list[agent_id].weight.data = norm_col_init(
self.marginal_actor_linear_list[agent_id].weight.data, 0.01
)
self.marginal_actor_linear_list[agent_id].bias.data.fill_(0)
self.train()
def forward(
self,
inputs: torch.FloatTensor,
hidden: Optional[torch.FloatTensor],
agent_rotations: Sequence[int],
):
if inputs.shape != (self.num_agents, self.num_inputs_per_agent, 15, 15):
raise Exception("input to model is not as expected, check!")
inputs = inputs.float()
# inputs.shape == (self.num_agents, self.num_inputs, 15, 15)
inputs = inputs.permute(0, 2, 3, 1).contiguous()
original_shape = inputs.shape
# original_shape == (self.num_agents, 15, 15, self.num_inputs)
scaling_factor = torch.reciprocal(
(inputs == 1).sum(dim=3).unsqueeze(3).float() + 1e-3
)
# scaling_factor.shape == (self.num_agents, 15, 15, 1)
inputs = inputs.view(-1, self.num_inputs_per_agent)
inputs = inputs.matmul(self.occupancy_embeddings.weight)
inputs = inputs.view(
original_shape[0],
original_shape[1],
original_shape[2],
self.occupancy_embed_length,
)
x = torch.mul(inputs, scaling_factor.expand_as(inputs))
# x.shape == (self.num_agents, 15, 15, self.occupancy_embed_length)
x = x.permute(0, 3, 1, 2).contiguous()
# x.shape == (self.num_agents, self.occupancy_embed_length, 15, 15)
x = self.cnn(x)
# x.shape == (self.num_agents, 256, 1, 1)
x = x.view(1, -1)
# x.shape == (1, self.num_agents * self.lstm_in_dim)
x = self.before_lstm_mlp(x)
batch_dim = 1
# x.shape = [batch_dim, self.lstm_in_dim]
if x.shape != (1, self.lstm_in_dim):
print("x.shape: {}".format(x.shape))
raise Exception("output of model is not as expected, check!")
x, hidden = self.lstm(x.unsqueeze(1), hidden)
# x.shape = [batch_dim, 1, state_repr_length]
x = x.squeeze(1)
# x.shape = [batch_dim, state_repr_length]
final_state = x + self.after_lstm_mlp(x)
# state_talk_reply_repr.shape = [batch=1, state_repr_length]
marginal_actor_linear_output_list = [
self.marginal_actor_linear_list[agent_id](final_state).view(-1)
for agent_id in range(self.num_agents)
]
# list with each element of size [batch_dim, self.num_outputs]
# Strangely we have to unsqueeze(0) instead of unsqueeze(1) here.
# This seems to be because the LSTM is not expecting its hidden
# state to have the batch first despite the batch_first=True parameter
# making it expect its input to have the batch first.
hidden = (final_state.unsqueeze(0), hidden[1])
joint_actor_logits = self.joint_actor_linear(final_state).view(
batch_dim, *((self.num_outputs,) * self.num_agents)
)
if self.num_agents <= 2:
marginal_logits = outer_product(
marginal_actor_linear_output_list
).unsqueeze(0)
else:
marginal_logits = outer_sum(marginal_actor_linear_output_list).unsqueeze(0)
assert marginal_logits.shape == joint_actor_logits.shape
combined_logits = joint_actor_logits + marginal_logits
# One length lists as this is a central agent
return {
"logit_all": combined_logits.view(
batch_dim, self.num_outputs ** self.num_agents
),
"value_all": self.critic_linear(final_state), # shape: [batch_dim,1]
"hidden_all": hidden,
"joint_logit_all": True,
}
class A3CLSTMBigCentralBackboneAndCoordinatedActionsEgoGridsEmbedCNN(nn.Module):
def __init__(
self,
num_inputs_per_agent: int,
action_groups: Tuple[Tuple[str, ...], ...],
num_agents: int,
state_repr_length: int,
occupancy_embed_length: int,
coordinate_actions: bool,
coordinate_actions_dim: Optional[int] = None,
central_critic: bool = False,
):
super(
A3CLSTMBigCentralBackboneAndCoordinatedActionsEgoGridsEmbedCNN, self
).__init__()
# Either coordinate actions (mixture of marginals)
# Otherwise i.e. single marginals then there shouldn't be any
# coordinate_actions_dim
assert coordinate_actions or not coordinate_actions_dim
# Since the representation for both agents is the same, having
# separate actor weights for mixture and also for single is
# necessary. Explicitly captured below.
separate_actor_weights = True
# Also since this is a single central agent, there is no central critic
# feature expected in this model
assert not central_critic
self.separate_actor_weights = separate_actor_weights
self.coordinate_actions_dim = (
self.num_outputs
if coordinate_actions and coordinate_actions_dim is None
else coordinate_actions_dim
)
self.num_outputs = sum(len(x) for x in action_groups)
self.num_inputs_per_agent = num_inputs_per_agent
self.num_inputs = num_inputs_per_agent * num_agents
self.num_agents = num_agents
self.occupancy_embed_length = occupancy_embed_length
self.occupancy_embeddings = nn.Embedding(
num_inputs_per_agent, self.occupancy_embed_length
)
self.coordinate_actions = coordinate_actions
# input to conv is (self.occupancy_embed_length, 15, 15)
self.cnn = nn.Sequential(
OrderedDict(
[
("conv1", nn.Conv2d(self.occupancy_embed_length, 32, 3, padding=1)),
("relu1", nn.ReLU(inplace=True)),
# shape = (15, 15)
("conv2", nn.Conv2d(32, 64, 3, stride=2)),
("relu2", nn.ReLU(inplace=True)),
# shape = (7, 7)
("conv3", nn.Conv2d(64, 128, 3, stride=2)),
("relu3", nn.ReLU(inplace=True)),
# shape = (3, 3)
("conv4", nn.Conv2d(128, 256, 3, stride=2)),
("relu4", nn.ReLU(inplace=True)),
# shape = (1, 1); Stride doesn't matter above
]
)
)
self.lstm_in_dim = 256
self.before_lstm_mlp = nn.Sequential(
nn.Linear(2 * self.lstm_in_dim, 2 * self.lstm_in_dim),
nn.ReLU(inplace=True),
nn.Linear(2 * self.lstm_in_dim, self.lstm_in_dim),
nn.ReLU(inplace=True),
)
# LSTM
self.lstm = nn.LSTM(self.lstm_in_dim, state_repr_length, batch_first=True)
# Post LSTM fully connected layers
self.after_lstm_mlp = nn.Sequential(
nn.Linear(state_repr_length, state_repr_length),
nn.ReLU(inplace=True),
nn.Linear(state_repr_length, state_repr_length),
nn.ReLU(inplace=True),
)
if coordinate_actions:
# Randomization MLP operating on the central state representation
self.to_randomization_logits = nn.Sequential(
nn.Linear(1 * state_repr_length, 64),
nn.ReLU(inplace=True),
nn.Linear(64, 64),
nn.ReLU(inplace=True),
nn.Linear(64, self.coordinate_actions_dim),
)
# Linear actor
self.actor_linear = None
actor_linear_input_dim = state_repr_length
# self.pre_actor_list = nn.ModuleList(
# [
# nn.Sequential(
# nn.Linear(state_repr_length, actor_linear_input_dim),
# nn.ReLU(inplace=True),
# )
#
# for _ in range(2)
# ]
# )
if coordinate_actions:
self.actor_linear_list = nn.ModuleList(
[
nn.Linear(
actor_linear_input_dim,
self.num_outputs * self.coordinate_actions_dim,
)
for _ in range(2)
]
)
else:
self.actor_linear_list = nn.ModuleList(
[nn.Linear(actor_linear_input_dim, self.num_outputs) for _ in range(2)]
)
for al in self.actor_linear_list:
al.weight.data = norm_col_init(al.weight.data, 0.01)
al.bias.data.fill_(0)
# Linear critic
self.critic_linear = nn.Linear(state_repr_length, 1)
# Setting initial weights
self.apply(weights_init)
relu_gain = nn.init.calculate_gain("relu")
self.cnn._modules["conv1"].weight.data.mul_(relu_gain)
self.cnn._modules["conv2"].weight.data.mul_(relu_gain)
self.cnn._modules["conv3"].weight.data.mul_(relu_gain)
self.cnn._modules["conv4"].weight.data.mul_(relu_gain)
self.train()
def forward(
self,
inputs: torch.FloatTensor,
hidden: Optional[torch.FloatTensor],
agent_rotations: Sequence[int],
):
if inputs.shape != (self.num_agents, self.num_inputs_per_agent, 15, 15):
raise Exception("input to model is not as expected, check!")
inputs = inputs.float()
# inputs.shape == (2, self.num_inputs, 15, 15)
inputs = inputs.permute(0, 2, 3, 1).contiguous()
original_shape = inputs.shape
# original_shape == (2, 15, 15, self.num_inputs)
scaling_factor = torch.reciprocal(
(inputs == 1).sum(dim=3).unsqueeze(3).float() + 1e-3
)
# scaling_factor.shape == (2, 15, 15, 1)
inputs = inputs.view(-1, self.num_inputs_per_agent)
inputs = inputs.matmul(self.occupancy_embeddings.weight)
inputs = inputs.view(
original_shape[0],
original_shape[1],
original_shape[2],
self.occupancy_embed_length,
)
x = torch.mul(inputs, scaling_factor.expand_as(inputs))
# x.shape == (2, 15, 15, self.occupancy_embed_length)
x = x.permute(0, 3, 1, 2).contiguous()
# x.shape == (2, self.occupancy_embed_length, 15, 15)
x = self.cnn(x)
# x.shape == (2, 256, 1, 1)
x = x.view(1, -1)
# x.shape == (1, 2 * self.lstm_in_dim = 512)
x = self.before_lstm_mlp(x)
batch_dim = 1
# x.shape = [batch_dim, self.lstm_in_dim]
if x.shape != (1, self.lstm_in_dim):
print("x.shape: {}".format(x.shape))
raise Exception("output of model is not as expected, check!")
x, hidden = self.lstm(x.unsqueeze(1), hidden)
# x.shape = [batch_dim, 1, state_repr_length]
x = x.squeeze(1)
# x.shape = [batch_dim, state_repr_length]
final_state = x + self.after_lstm_mlp(x)
# state_talk_reply_repr.shape = [batch=1, state_repr_length]
# Strangely we have to unsqueeze(0) instead of unsqueeze(1) here.
# This seems to be because the LSTM is not expecting its hidden
# state to have the batch first despite the batch_first=True parameter
# making it expect its input to have the batch first.
hidden = (final_state.unsqueeze(0), hidden[1])
# One central backbone agent, so no option for central critic, check asserts
value_all = self.critic_linear(final_state)
# Same value for both agents
to_return = {
"value_all": torch.cat([value_all, value_all], dim=0),
"hidden_all": hidden,
}
logits = torch.cat(
[
linear(
# self.pre_actor_list[i](final_state)
final_state
)
for i, linear in enumerate(self.actor_linear_list)
],
dim=0,
)
if self.coordinate_actions:
# Mixture marginals, actor output = num_outputs * coordinate_action_dim
to_return["randomization_logits"] = self.to_randomization_logits(
final_state
)
to_return["coordinated_logits"] = logits
else:
# Single marginal, actor output = num_outputs
to_return["logit_all"] = logits
return to_return
class A3CLSTMCentralEgoVision(nn.Module):
def __init__(
self,
num_inputs_per_agent: int,
action_groups: Tuple[Tuple[str, ...], ...],
num_agents: int,
state_repr_length: int,
):
super(A3CLSTMCentralEgoVision, self).__init__()
self.num_outputs = sum(len(x) for x in action_groups)
self.num_inputs_per_agent = num_inputs_per_agent
self.num_inputs = num_inputs_per_agent * num_agents
self.num_agents = num_agents
# input to conv is (num_agents * self.num_inputs_per_agent, 84, 84)
self.cnn = nn.Sequential(
OrderedDict(
[
(
"conv1",
nn.Conv2d(
self.num_inputs_per_agent * self.num_agents,
16,
5,
stride=1,
padding=2,
),
),
("maxpool1", nn.MaxPool2d(2, 2)),
("relu1", nn.ReLU(inplace=True)),
# shape =
("conv2", nn.Conv2d(16, 16, 5, stride=1, padding=1)),
("maxpool2", nn.MaxPool2d(2, 2)),
("relu2", nn.ReLU(inplace=True)),
# shape =
("conv3", nn.Conv2d(16, 32, 4, stride=1, padding=1)),
("maxpool3", nn.MaxPool2d(2, 2)),
("relu3", nn.ReLU(inplace=True)),
# shape =
("conv4", nn.Conv2d(32, 64, 3, stride=1, padding=1)),
("maxpool4", nn.MaxPool2d(2, 2)),
("relu4", nn.ReLU(inplace=True)),
# shape = (4, 4)
("conv5", nn.Conv2d(64, 128, 3, stride=1, padding=1)),
("maxpool5", nn.MaxPool2d(2, 2)),
("relu5", nn.ReLU(inplace=True)),
# shape = (2, 2)
]
)
)
# LSTM
self.lstm_in_dim = 512
self.lstm = nn.LSTM(self.lstm_in_dim, state_repr_length, batch_first=True)
# Post LSTM fully connected layers
self.after_lstm_mlp = nn.Sequential(
nn.Linear(state_repr_length, state_repr_length),
nn.ReLU(inplace=True),
nn.Linear(state_repr_length, state_repr_length),
nn.ReLU(inplace=True),
)
# Marginal Linear actor
self.marginal_actor_linear_list = nn.ModuleList(
[
nn.Linear(state_repr_length, self.num_outputs)
for _ in range(self.num_agents)
]
)
# Conditional actor
self.joint_actor_linear = nn.Linear(
state_repr_length, self.num_outputs * self.num_outputs
)
# Linear critic
self.critic_linear = nn.Linear(state_repr_length, 1)
# Setting initial weights
self.apply(weights_init)
relu_gain = nn.init.calculate_gain("relu")
self.cnn._modules["conv1"].weight.data.mul_(relu_gain)
self.cnn._modules["conv2"].weight.data.mul_(relu_gain)
self.cnn._modules["conv3"].weight.data.mul_(relu_gain)
self.cnn._modules["conv4"].weight.data.mul_(relu_gain)
for agent_id in range(self.num_agents):
self.marginal_actor_linear_list[agent_id].weight.data = norm_col_init(
self.marginal_actor_linear_list[agent_id].weight.data, 0.01
)
self.marginal_actor_linear_list[agent_id].bias.data.fill_(0)
self.train()
def forward(
self,
inputs: torch.FloatTensor,
hidden: Optional[torch.FloatTensor],
agent_rotations: Sequence[int],
):
if inputs.shape != (self.num_agents, self.num_inputs_per_agent, 84, 84):
raise Exception("input to model is not as expected, check!")
inputs = inputs.view(
self.num_agents * self.num_inputs_per_agent, 84, 84
).unsqueeze(0)
# inputs.shape == (1, self.num_agents * self.num_inputs, 84, 84)
x = self.cnn(inputs)
# x.shape == (1, 128, 4, 4)
x = x.view(1, -1)
# x.shape == (batch_dim, self.lstm_in_dim)
batch_dim = 1
# x.shape = [batch_dim, self.lstm_in_dim]
if x.shape != (1, self.lstm_in_dim):
print("x.shape: {}".format(x.shape))
raise Exception("output of model is not as expected, check!")
x, hidden = self.lstm(x.unsqueeze(1), hidden)
# x.shape = [batch_dim, 1, state_repr_length]
x = x.squeeze(1)
# x.shape = [batch_dim, state_repr_length]
final_state = x + self.after_lstm_mlp(x)
# state_talk_reply_repr.shape = [batch=1, state_repr_length]
marginal_actor_linear_output_list = [
self.marginal_actor_linear_list[agent_id](final_state)
for agent_id in range(self.num_agents)
]
# list with each element of size [batch_dim, self.num_outputs]
# Strangely we have to unsqueeze(0) instead of unsqueeze(1) here.
# This seems to be because the LSTM is not expecting its hidden
# state to have the batch first despite the batch_first=True parameter
# making it expect its input to have the batch first.
hidden = (final_state.unsqueeze(0), hidden[1])
joint_actor_logits = self.joint_actor_linear(final_state).view(
batch_dim, self.num_outputs, self.num_outputs
)
outer_product_marginal_logits = marginal_actor_linear_output_list[0].unsqueeze(
2
) * marginal_actor_linear_output_list[1].unsqueeze(1)
assert outer_product_marginal_logits.shape == (
batch_dim,
self.num_outputs,
self.num_outputs,
)
combined_logits = joint_actor_logits + outer_product_marginal_logits
# One length lists as this is a central agent
return {
"logit_all": combined_logits.view(
batch_dim, self.num_outputs * self.num_outputs
),
"value_all": self.critic_linear(final_state), # shape: [batch_dim,1]
"hidden_all": hidden,
"joint_logit_all": True,
}
class A3CLSTMBigCentralEgoVision(nn.Module):
def __init__(
self,
num_inputs_per_agent: int,
action_groups: Tuple[Tuple[str, ...], ...],
num_agents: int,
state_repr_length: int,
):
super(A3CLSTMBigCentralEgoVision, self).__init__()
self.num_outputs = sum(len(x) for x in action_groups)
self.num_inputs_per_agent = num_inputs_per_agent
self.num_inputs = num_inputs_per_agent * num_agents
self.num_agents = num_agents
# input to conv is (self.num_inputs_per_agent, 84, 84)
self.cnn = nn.Sequential(
OrderedDict(
[
(
"conv1",
nn.Conv2d(
self.num_inputs_per_agent, 16, 5, stride=1, padding=2
),
),
("maxpool1", nn.MaxPool2d(2, 2)),
("relu1", nn.ReLU(inplace=True)),
# shape =
("conv2", nn.Conv2d(16, 16, 5, stride=1, padding=1)),
("maxpool2", nn.MaxPool2d(2, 2)),
("relu2", nn.ReLU(inplace=True)),
# shape =
("conv3", nn.Conv2d(16, 32, 4, stride=1, padding=1)),
("maxpool3", nn.MaxPool2d(2, 2)),
("relu3", nn.ReLU(inplace=True)),
# shape =
("conv4", nn.Conv2d(32, 64, 3, stride=1, padding=1)),
("maxpool4", nn.MaxPool2d(2, 2)),
("relu4", nn.ReLU(inplace=True)),
# shape = (4, 4)
("conv5", nn.Conv2d(64, 128, 3, stride=1, padding=1)),
("maxpool5", nn.MaxPool2d(2, 2)),
("relu5", nn.ReLU(inplace=True)),
# shape = (2, 2)
]
)
)
# LSTM
self.lstm_in_dim = 512
self.before_lstm_mlp = nn.Sequential(
nn.Linear(self.lstm_in_dim * self.num_agents, self.lstm_in_dim),
nn.ReLU(inplace=True),
)
self.lstm = nn.LSTM(self.lstm_in_dim, state_repr_length, batch_first=True)
# Post LSTM fully connected layers
self.after_lstm_mlp = nn.Sequential(
nn.Linear(state_repr_length, state_repr_length),
nn.ReLU(inplace=True),
nn.Linear(state_repr_length, state_repr_length),
nn.ReLU(inplace=True),
)
# Marginal Linear actor
self.marginal_actor_linear_list = nn.ModuleList(
[
nn.Linear(state_repr_length, self.num_outputs)
for _ in range(self.num_agents)
]
)
# Conditional actor
self.joint_actor_linear = nn.Linear(
state_repr_length, self.num_outputs ** self.num_agents
)
# Linear critic
self.critic_linear = nn.Linear(state_repr_length, 1)
# Setting initial weights
self.apply(weights_init)
relu_gain = nn.init.calculate_gain("relu")
self.cnn._modules["conv1"].weight.data.mul_(relu_gain)
self.cnn._modules["conv2"].weight.data.mul_(relu_gain)
self.cnn._modules["conv3"].weight.data.mul_(relu_gain)
self.cnn._modules["conv4"].weight.data.mul_(relu_gain)
for agent_id in range(self.num_agents):
self.marginal_actor_linear_list[agent_id].weight.data = norm_col_init(
self.marginal_actor_linear_list[agent_id].weight.data, 0.01
)
self.marginal_actor_linear_list[agent_id].bias.data.fill_(0)
self.train()
def forward(
self,
inputs: torch.FloatTensor,
hidden: Optional[torch.FloatTensor],
agent_rotations: Sequence[int],
):
if inputs.shape != (self.num_agents, self.num_inputs_per_agent, 84, 84):
raise Exception("input to model is not as expected, check!")
# inputs.shape == (1, self.num_agents * self.num_inputs, 84, 84)
x = self.cnn(inputs)
# x.shape == (self.num_agents, 128, 4, 4)
x = x.view(1, -1)
# x.shape == (batch_dim, self.lstm_in_dim * self.num_agents)
x = self.before_lstm_mlp(x)
# x.shape == (batch_dim, self.lstm_in_dim)
batch_dim = 1
if x.shape != (1, self.lstm_in_dim):
print("x.shape: {}".format(x.shape))
raise Exception("output of model is not as expected, check!")
x, hidden = self.lstm(x.unsqueeze(1), hidden)
# x.shape = [batch_dim, 1, state_repr_length]
x = x.squeeze(1)
# x.shape = [batch_dim, state_repr_length]
final_state = x + self.after_lstm_mlp(x)
# state_talk_reply_repr.shape = [batch=1, state_repr_length]
marginal_actor_linear_output_list = [
self.marginal_actor_linear_list[agent_id](final_state).view(-1)
for agent_id in range(self.num_agents)
]
# list with each element of size [batch_dim, self.num_outputs]
# Strangely we have to unsqueeze(0) instead of unsqueeze(1) here.
# This seems to be because the LSTM is not expecting its hidden
# state to have the batch first despite the batch_first=True parameter
# making it expect its input to have the batch first.
hidden = (final_state.unsqueeze(0), hidden[1])
joint_actor_logits = self.joint_actor_linear(final_state).view(
batch_dim, *((self.num_outputs,) * self.num_agents)
)
if self.num_agents <= 2:
marginal_logits = outer_product(
marginal_actor_linear_output_list
).unsqueeze(0)
else:
marginal_logits = outer_sum(marginal_actor_linear_output_list).unsqueeze(0)
assert marginal_logits.shape == joint_actor_logits.shape
assert marginal_logits.shape == (
batch_dim,
*((self.num_outputs,) * self.num_agents),
)
combined_logits = joint_actor_logits + marginal_logits
# One length lists as this is a central agent
return {
"logit_all": combined_logits.view(
batch_dim, self.num_outputs ** self.num_agents
),
"value_all": self.critic_linear(final_state), # shape: [batch_dim,1]
"hidden_all": hidden,
"joint_logit_all": True,
}
class OldTaskA3CLSTMBigCentralEgoVision(nn.Module):
def __init__(
self,
num_inputs_per_agent: int,
action_groups: Tuple[Tuple[str, ...], ...],
num_agents: int,
state_repr_length: int,
):
super(OldTaskA3CLSTMBigCentralEgoVision, self).__init__()
self.num_outputs = sum(len(x) for x in action_groups)
self.num_inputs_per_agent = num_inputs_per_agent
self.num_inputs = num_inputs_per_agent * num_agents
self.num_agents = num_agents
final_cnn_channels = 19
# input to conv is (self.num_inputs_per_agent, 84, 84)
self.cnn = nn.Sequential(
OrderedDict(
[
(
"conv1",
nn.Conv2d(
self.num_inputs_per_agent, 32, 5, stride=1, padding=2
),
),
("maxpool1", nn.MaxPool2d(2, 2)),
("relu1", nn.ReLU(inplace=True)),
# shape =
("conv2", nn.Conv2d(32, 32, 5, stride=1, padding=1)),
("maxpool2", nn.MaxPool2d(2, 2)),
("relu2", nn.ReLU(inplace=True)),
# shape =
("conv3", nn.Conv2d(32, 64, 4, stride=1, padding=1)),
("maxpool3", nn.MaxPool2d(2, 2)),
("relu3", nn.ReLU(inplace=True)),
# shape =
(
"conv4",
nn.Conv2d(64, final_cnn_channels, 3, stride=1, padding=1),
),
("maxpool4", nn.MaxPool2d(2, 2)),
("relu4", nn.ReLU(inplace=True)),
# shape = (4, 4)
]
)
)
# CNN output:
self.cnn_output_dim = final_cnn_channels * 4 * 4
# LSTM
self.lstm_in_dim = 64 * 4 * 4
self.before_lstm_mlp = nn.Sequential(
nn.Linear(self.cnn_output_dim * 2, self.lstm_in_dim), nn.ReLU(inplace=True)
)
self.lstm = nn.LSTM(self.lstm_in_dim, state_repr_length, batch_first=True)
# Post LSTM fully connected layers
self.after_lstm_mlp = nn.Sequential(
nn.Linear(state_repr_length, state_repr_length),
nn.ReLU(inplace=True),
nn.Linear(state_repr_length, state_repr_length),
nn.ReLU(inplace=True),
)
# Marginal Linear actor
self.marginal_actor_linear_list = nn.ModuleList(
[
nn.Linear(state_repr_length, self.num_outputs)
for _ in range(self.num_agents)
]
)
# Conditional actor
self.joint_actor_linear = nn.Linear(
state_repr_length, self.num_outputs * self.num_outputs
)
# Linear critic
self.critic_linear = nn.Linear(state_repr_length, 1)
# Setting initial weights
self.apply(weights_init)
relu_gain = nn.init.calculate_gain("relu")
self.cnn._modules["conv1"].weight.data.mul_(relu_gain)
self.cnn._modules["conv2"].weight.data.mul_(relu_gain)
self.cnn._modules["conv3"].weight.data.mul_(relu_gain)
self.cnn._modules["conv4"].weight.data.mul_(relu_gain)
for agent_id in range(self.num_agents):
self.marginal_actor_linear_list[agent_id].weight.data = norm_col_init(
self.marginal_actor_linear_list[agent_id].weight.data, 0.01
)
self.marginal_actor_linear_list[agent_id].bias.data.fill_(0)
self.train()
def forward(
self,
inputs: torch.FloatTensor,
hidden: Optional[torch.FloatTensor],
agent_rotations: Sequence[int],
):
if inputs.shape != (self.num_agents, self.num_inputs_per_agent, 84, 84):
raise Exception("input to model is not as expected, check!")
# inputs.shape == (1, self.num_agents * self.num_inputs, 84, 84)
x = self.cnn(inputs)
# x.shape == (self.num_agents, 128, 4, 4)
x = x.view(1, -1)
# x.shape == (batch_dim, self.lstm_in_dim * 2)
x = self.before_lstm_mlp(x)
# x.shape == (batch_dim, self.lstm_in_dim)
batch_dim = 1
if x.shape != (1, self.lstm_in_dim):
print("x.shape: {}".format(x.shape))
raise Exception("output of model is not as expected, check!")
x, hidden = self.lstm(x.unsqueeze(1), hidden)
# x.shape = [batch_dim, 1, state_repr_length]
x = x.squeeze(1)
# x.shape = [batch_dim, state_repr_length]
final_state = x + self.after_lstm_mlp(x)
# state_talk_reply_repr.shape = [batch=1, state_repr_length]
marginal_actor_linear_output_list = [
self.marginal_actor_linear_list[agent_id](final_state)
for agent_id in range(self.num_agents)
]
# list with each element of size [batch_dim, self.num_outputs]
# Strangely we have to unsqueeze(0) instead of unsqueeze(1) here.
# This seems to be because the LSTM is not expecting its hidden
# state to have the batch first despite the batch_first=True parameter
# making it expect its input to have the batch first.
hidden = (final_state.unsqueeze(0), hidden[1])
joint_actor_logits = self.joint_actor_linear(final_state).view(
batch_dim, self.num_outputs, self.num_outputs
)
outer_product_marginal_logits = marginal_actor_linear_output_list[0].unsqueeze(
2
) * marginal_actor_linear_output_list[1].unsqueeze(1)
assert outer_product_marginal_logits.shape == (
batch_dim,
self.num_outputs,
self.num_outputs,
)
combined_logits = joint_actor_logits + outer_product_marginal_logits
# One length lists as this is a central agent
return {
"logit_all": combined_logits.view(
batch_dim, self.num_outputs * self.num_outputs
),
"value_all": self.critic_linear(final_state), # shape: [batch_dim,1]
"hidden_all": hidden,
"joint_logit_all": True,
}
class A3CLSTMNStepComCoordinatedActionsEgoVision(nn.Module):
def __init__(
self,
num_inputs_per_agent: int,
action_groups: Tuple[Tuple[str, ...], ...],
num_agents: int,
state_repr_length: int,
talk_embed_length: int,
agent_num_embed_length: int,
reply_embed_length: int,
num_talk_symbols: int,
num_reply_symbols: int,
turn_off_communication: bool,
coordinate_actions: bool,
coordinate_actions_dim: Optional[int] = None,
central_critic: bool = False,
separate_actor_weights: bool = False,
final_cnn_channels: int = 128,
):
super(A3CLSTMNStepComCoordinatedActionsEgoVision, self).__init__()
self.num_outputs = sum(len(x) for x in action_groups)
self.turn_off_communication = turn_off_communication
self.central_critic = central_critic
self.num_inputs_per_agent = num_inputs_per_agent
self.num_agents = num_agents
self.num_talk_symbols = num_talk_symbols
self.num_reply_symbols = num_reply_symbols
self.separate_actor_weights = separate_actor_weights
self.coordinate_actions_dim = (
self.num_outputs
if coordinate_actions_dim is None
else coordinate_actions_dim
)
self.coordinate_actions = coordinate_actions
# input to conv is (num_agents, self.num_inputs_per_agent, 84, 84)
self.cnn = nn.Sequential(
OrderedDict(
[
(
"conv1",
nn.Conv2d(
self.num_inputs_per_agent, 16, 5, stride=1, padding=2
),
),
("maxpool1", nn.MaxPool2d(2, 2)),
("relu1", nn.ReLU(inplace=True)),
# shape =
("conv2", nn.Conv2d(16, 16, 5, stride=1, padding=1)),
("maxpool2", nn.MaxPool2d(2, 2)),
("relu2", nn.ReLU(inplace=True)),
# shape =
("conv3", nn.Conv2d(16, 32, 4, stride=1, padding=1)),
("maxpool3", nn.MaxPool2d(2, 2)),
("relu3", nn.ReLU(inplace=True)),
# shape =
("conv4", nn.Conv2d(32, 64, 3, stride=1, padding=1)),
("maxpool4", nn.MaxPool2d(2, 2)),
("relu4", nn.ReLU(inplace=True)),
# shape = (4, 4)
(
"conv5",
nn.Conv2d(64, final_cnn_channels, 3, stride=1, padding=1),
),
("maxpool5", nn.MaxPool2d(2, 2)),
("relu5", nn.ReLU(inplace=True)),
# shape = (2, 2)
]
)
)
# Vocab embed
self.talk_embeddings = nn.Embedding(num_talk_symbols, talk_embed_length)
self.reply_embeddings = nn.Embedding(num_reply_symbols, reply_embed_length)
self.talk_symbol_classifier = nn.Linear(state_repr_length, num_talk_symbols)
self.reply_symbol_classifier = nn.Linear(state_repr_length, num_reply_symbols)
# Agent embed
self.agent_num_embeddings = nn.Parameter(
torch.rand(self.num_agents, agent_num_embed_length)
)
# LSTM
self.lstm = nn.LSTM(
final_cnn_channels * 4 + agent_num_embed_length,
state_repr_length,
batch_first=True,
)
# Belief update MLP
state_and_talk_rep_len = state_repr_length + talk_embed_length * (
num_agents - 1
)
self.after_talk_mlp = nn.Sequential(
nn.Linear(state_and_talk_rep_len, state_and_talk_rep_len),
nn.ReLU(inplace=True),
nn.Linear(state_and_talk_rep_len, state_repr_length),
nn.ReLU(inplace=True),
)
state_and_reply_rep_len = state_repr_length + reply_embed_length * (
num_agents - 1
)
self.after_reply_mlp = nn.Sequential(
nn.Linear(state_and_reply_rep_len, state_and_reply_rep_len),
nn.ReLU(inplace=True),
nn.Linear(state_and_reply_rep_len, state_repr_length),
nn.ReLU(inplace=True),
)
if coordinate_actions:
# Randomization MLP
self.to_randomization_logits = nn.Sequential(
nn.Linear(self.num_agents * reply_embed_length, 64),
nn.ReLU(inplace=True),
nn.Linear(64, 64),
nn.ReLU(inplace=True),
nn.Linear(64, self.coordinate_actions_dim),
)
# self.marginal_linear_actor = nn.Linear(state_repr_length, self.num_outputs)
# self.marginal_linear_actor.weight.data = norm_col_init(
# self.marginal_linear_actor.weight.data, 0.01
# )
# self.marginal_linear_actor.bias.data.fill_(0)
# Linear actor
self.actor_linear = None
if coordinate_actions:
if separate_actor_weights:
self.actor_linear_list = nn.ModuleList(
[
nn.Linear(
state_repr_length,
self.num_outputs * self.coordinate_actions_dim,
)
for _ in range(2)
]
)
else:
self.actor_linear = nn.Linear(
state_repr_length, self.num_outputs * self.coordinate_actions_dim
)
else:
assert not separate_actor_weights
self.actor_linear = nn.Linear(state_repr_length, self.num_outputs)
if self.actor_linear is not None:
self.actor_linear.weight.data = norm_col_init(
self.actor_linear.weight.data, 0.01
)
self.actor_linear.bias.data.fill_(0)
else:
for al in self.actor_linear_list:
al.weight.data = norm_col_init(al.weight.data, 0.01)
al.bias.data.fill_(0)
# Linear critic
if self.central_critic:
self.critic_linear = nn.Linear(state_repr_length * self.num_agents, 1)
else:
self.critic_linear = nn.Linear(state_repr_length, 1)
# Setting initial weights
self.apply(weights_init)
relu_gain = nn.init.calculate_gain("relu")
self.cnn._modules["conv1"].weight.data.mul_(relu_gain)
self.cnn._modules["conv2"].weight.data.mul_(relu_gain)
self.cnn._modules["conv3"].weight.data.mul_(relu_gain)
self.cnn._modules["conv4"].weight.data.mul_(relu_gain)
self.cnn._modules["conv5"].weight.data.mul_(relu_gain)
self.talk_symbol_classifier.weight.data = norm_col_init(
self.talk_symbol_classifier.weight.data, 0.01
)
self.talk_symbol_classifier.bias.data.fill_(0)
self.reply_symbol_classifier.weight.data = norm_col_init(
self.reply_symbol_classifier.weight.data, 0.01
)
self.reply_symbol_classifier.bias.data.fill_(0)
self.train()
def forward(
self,
inputs: torch.FloatTensor,
hidden: Optional[torch.FloatTensor],
agent_rotations: Sequence[int],
):
if inputs.shape != (self.num_agents, self.num_inputs_per_agent, 84, 84):
raise Exception("input to model is not as expected, check!")
x = self.cnn(inputs)
# x.shape == (2, 128, 2, 2)
x = x.view(x.size(0), -1)
# x.shape = [num_agents, 512]
x = torch.cat((x, self.agent_num_embeddings), dim=1)
# x.shape = [num_agents, 512 + agent_num_embed_length]
x, hidden = self.lstm(x.unsqueeze(1), hidden)
# x.shape = [num_agents, 1, state_repr_length]
# hidden[0].shape == [1, num_agents, state_repr_length]
# hidden[1].shape == [1, num_agents, state_repr_length]
x = x.squeeze(1)
# x.shape = [num_agents, state_repr_length]
talk_logits = self.talk_symbol_classifier(x)
talk_probs = F.softmax(talk_logits, dim=1)
# talk_probs.shape = [num_agents, num_talk_symbols]
talk_outputs = torch.mm(talk_probs, self.talk_embeddings.weight)
# talk_outputs.shape = [num_agents, talk_embed_length]
if not self.turn_off_communication:
talk_heard_per_agent = _unfold_communications(talk_outputs)
else:
talk_heard_per_agent = torch.cat(
[talk_outputs] * (self.num_agents - 1), dim=1
)
state_talk_repr = x + self.after_talk_mlp(
torch.cat((x, talk_heard_per_agent), dim=1)
)
# feature_talk_repr.shape = [num_agents, state_repr_length]
reply_logits = self.reply_symbol_classifier(state_talk_repr)
reply_probs = F.softmax(reply_logits, dim=1)
# reply_probs.shape = [num_agents, num_reply_symbols]
reply_outputs = torch.mm(reply_probs, self.reply_embeddings.weight)
# reply_outputs.shape = [num_agents, reply_embed_length]
if not self.turn_off_communication:
reply_heard_per_agent = _unfold_communications(reply_outputs)
else:
reply_heard_per_agent = torch.cat(
[reply_outputs] * (self.num_agents - 1), dim=1
)
state_talk_reply_repr = state_talk_repr + self.after_reply_mlp(
torch.cat((state_talk_repr, reply_heard_per_agent), dim=1)
)
# state_talk_reply_repr.shape = [num_agents, state_repr_length]
# Strangely we have to unsqueeze(0) instead of unsqueeze(1) here.
# This seems to be because the LSTM is not expecting its hidden
# state to have the batch first despite the batch_first=True parameter
# making it expect its input to have the batch first.
hidden = (state_talk_reply_repr.unsqueeze(0), hidden[1])
if self.central_critic:
value_all = self.critic_linear(
torch.cat(
[
state_talk_reply_repr,
_unfold_communications(state_talk_reply_repr),
],
dim=1,
)
)
else:
value_all = self.critic_linear(state_talk_reply_repr)
to_return = {
"value_all": value_all,
"hidden_all": hidden,
"talk_probs": talk_probs,
"reply_probs": reply_probs,
}
if self.coordinate_actions:
if self.num_agents != 2:
to_return["randomization_logits"] = self.to_randomization_logits(
reply_outputs.view(1, -1)
)
else:
to_return["randomization_logits"] = self.to_randomization_logits(
reply_heard_per_agent.view(1, -1)
)
if not self.separate_actor_weights:
logits = self.actor_linear(state_talk_reply_repr)
else:
logits = torch.cat(
[
linear(state_talk_reply_repr[i].unsqueeze(0))
for i, linear in enumerate(self.actor_linear_list)
],
dim=0,
)
# logits = self.actor_linear(
# state_talk_reply_repr
# ) + self.marginal_linear_actor(state_talk_reply_repr).unsqueeze(1).repeat(
# 1, self.coordinate_actions_dim, 1
# ).view(
# self.num_agents, self.num_outputs ** 2
# )
to_return["coordinated_logits"] = logits
else:
to_return["logit_all"] = self.actor_linear(state_talk_reply_repr)
return to_return
class BigA3CLSTMNStepComCoordinatedActionsEgoVision(nn.Module):
def __init__(
self,
num_inputs_per_agent: int,
action_groups: Tuple[Tuple[str, ...], ...],
num_agents: int,
state_repr_length: int,
talk_embed_length: int,
agent_num_embed_length: int,
reply_embed_length: int,
num_talk_symbols: int,
num_reply_symbols: int,
turn_off_communication: bool,
coordinate_actions: bool,
coordinate_actions_dim: Optional[int] = None,
central_critic: bool = False,
separate_actor_weights: bool = False,
final_cnn_channels: int = 64,
):
super(BigA3CLSTMNStepComCoordinatedActionsEgoVision, self).__init__()
self.num_outputs = sum(len(x) for x in action_groups)
self.turn_off_communication = turn_off_communication
self.central_critic = central_critic
self.num_inputs_per_agent = num_inputs_per_agent
self.num_agents = num_agents
self.num_talk_symbols = num_talk_symbols
self.num_reply_symbols = num_reply_symbols
self.separate_actor_weights = separate_actor_weights
self.coordinate_actions_dim = (
self.num_outputs
if coordinate_actions_dim is None
else coordinate_actions_dim
)
self.coordinate_actions = coordinate_actions
# input to conv is (num_agents, self.num_inputs_per_agent, 84, 84)
self.cnn = nn.Sequential(
OrderedDict(
[
(
"conv1",
nn.Conv2d(
self.num_inputs_per_agent, 32, 5, stride=1, padding=2
),
),
("maxpool1", nn.MaxPool2d(2, 2)),
("relu1", nn.ReLU(inplace=True)),
# shape =
("conv2", nn.Conv2d(32, 32, 5, stride=1, padding=1)),
("maxpool2", nn.MaxPool2d(2, 2)),
("relu2", nn.ReLU(inplace=True)),
# shape =
("conv3", nn.Conv2d(32, 64, 4, stride=1, padding=1)),
("maxpool3", nn.MaxPool2d(2, 2)),
("relu3", nn.ReLU(inplace=True)),
# shape =
(
"conv4",
nn.Conv2d(64, final_cnn_channels, 3, stride=1, padding=1),
),
("maxpool4", nn.MaxPool2d(2, 2)),
("relu4", nn.ReLU(inplace=True)),
# shape = (4, 4)
]
)
)
# Vocab embed
self.talk_embeddings = nn.Embedding(num_talk_symbols, talk_embed_length)
self.reply_embeddings = nn.Embedding(num_reply_symbols, reply_embed_length)
self.talk_symbol_classifier = nn.Linear(state_repr_length, num_talk_symbols)
self.reply_symbol_classifier = nn.Linear(state_repr_length, num_reply_symbols)
# Agent embed
self.agent_num_embeddings = nn.Parameter(
torch.rand(self.num_agents, agent_num_embed_length)
)
# LSTM
self.lstm = nn.LSTM(
final_cnn_channels * 4 * 4 + agent_num_embed_length,
state_repr_length,
batch_first=True,
)
# Belief update MLP
state_and_talk_rep_len = state_repr_length + talk_embed_length * (
num_agents - 1
)
self.after_talk_mlp = nn.Sequential(
nn.Linear(state_and_talk_rep_len, state_and_talk_rep_len),
nn.ReLU(inplace=True),
nn.Linear(state_and_talk_rep_len, state_repr_length),
nn.ReLU(inplace=True),
)
state_and_reply_rep_len = state_repr_length + reply_embed_length * (
num_agents - 1
)
self.after_reply_mlp = nn.Sequential(
nn.Linear(state_and_reply_rep_len, state_and_reply_rep_len),
nn.ReLU(inplace=True),
nn.Linear(state_and_reply_rep_len, state_repr_length),
nn.ReLU(inplace=True),
)
if coordinate_actions:
# Randomization MLP
self.to_randomization_logits = nn.Sequential(
nn.Linear(self.num_agents * reply_embed_length, 64),
nn.ReLU(inplace=True),
nn.Linear(64, 64),
nn.ReLU(inplace=True),
nn.Linear(64, self.coordinate_actions_dim),
)
# self.marginal_linear_actor = nn.Linear(state_repr_length, self.num_outputs)
# self.marginal_linear_actor.weight.data = norm_col_init(
# self.marginal_linear_actor.weight.data, 0.01
# )
# self.marginal_linear_actor.bias.data.fill_(0)
# Linear actor
self.actor_linear = None
if coordinate_actions:
if separate_actor_weights:
self.actor_linear_list = nn.ModuleList(
[
nn.Linear(
state_repr_length,
self.num_outputs * self.coordinate_actions_dim,
)
for _ in range(2)
]
)
else:
self.actor_linear = nn.Linear(
state_repr_length, self.num_outputs * self.coordinate_actions_dim
)
else:
assert not separate_actor_weights
self.actor_linear = nn.Linear(state_repr_length, self.num_outputs)
if self.actor_linear is not None:
self.actor_linear.weight.data = norm_col_init(
self.actor_linear.weight.data, 0.01
)
self.actor_linear.bias.data.fill_(0)
else:
for al in self.actor_linear_list:
al.weight.data = norm_col_init(al.weight.data, 0.01)
al.bias.data.fill_(0)
# Linear critic
if self.central_critic:
self.critic_linear = nn.Linear(state_repr_length * self.num_agents, 1)
else:
self.critic_linear = nn.Linear(state_repr_length, 1)
# Setting initial weights
self.apply(weights_init)
relu_gain = nn.init.calculate_gain("relu")
self.cnn._modules["conv1"].weight.data.mul_(relu_gain)
self.cnn._modules["conv2"].weight.data.mul_(relu_gain)
self.cnn._modules["conv3"].weight.data.mul_(relu_gain)
self.cnn._modules["conv4"].weight.data.mul_(relu_gain)
self.talk_symbol_classifier.weight.data = norm_col_init(
self.talk_symbol_classifier.weight.data, 0.01
)
self.talk_symbol_classifier.bias.data.fill_(0)
self.reply_symbol_classifier.weight.data = norm_col_init(
self.reply_symbol_classifier.weight.data, 0.01
)
self.reply_symbol_classifier.bias.data.fill_(0)
self.train()
def forward(
self,
inputs: torch.FloatTensor,
hidden: Optional[torch.FloatTensor],
agent_rotations: Sequence[int],
):
if inputs.shape != (self.num_agents, self.num_inputs_per_agent, 84, 84):
raise Exception("input to model is not as expected, check!")
x = self.cnn(inputs)
# x.shape == (2, 128, 2, 2)
x = x.view(x.size(0), -1)
# x.shape = [num_agents, 512]
x = torch.cat((x, self.agent_num_embeddings), dim=1)
# x.shape = [num_agents, 512 + agent_num_embed_length]
x, hidden = self.lstm(x.unsqueeze(1), hidden)
# x.shape = [num_agents, 1, state_repr_length]
# hidden[0].shape == [1, num_agents, state_repr_length]
# hidden[1].shape == [1, num_agents, state_repr_length]
x = x.squeeze(1)
# x.shape = [num_agents, state_repr_length]
talk_logits = self.talk_symbol_classifier(x)
talk_probs = F.softmax(talk_logits, dim=1)
# talk_probs.shape = [num_agents, num_talk_symbols]
talk_outputs = torch.mm(talk_probs, self.talk_embeddings.weight)
# talk_outputs.shape = [num_agents, talk_embed_length]
if not self.turn_off_communication:
talk_heard_per_agent = _unfold_communications(talk_outputs)
else:
talk_heard_per_agent = torch.cat(
[talk_outputs] * (self.num_agents - 1), dim=1
)
state_talk_repr = x + self.after_talk_mlp(
torch.cat((x, talk_heard_per_agent), dim=1)
)
# feature_talk_repr.shape = [num_agents, state_repr_length]
reply_logits = self.reply_symbol_classifier(state_talk_repr)
reply_probs = F.softmax(reply_logits, dim=1)
# reply_probs.shape = [num_agents, num_reply_symbols]
reply_outputs = torch.mm(reply_probs, self.reply_embeddings.weight)
# reply_outputs.shape = [num_agents, reply_embed_length]
if not self.turn_off_communication:
reply_heard_per_agent = _unfold_communications(reply_outputs)
else:
reply_heard_per_agent = torch.cat(
[reply_outputs] * (self.num_agents - 1), dim=1
)
state_talk_reply_repr = state_talk_repr + self.after_reply_mlp(
torch.cat((state_talk_repr, reply_heard_per_agent), dim=1)
)
# state_talk_reply_repr.shape = [num_agents, state_repr_length]
# Strangely we have to unsqueeze(0) instead of unsqueeze(1) here.
# This seems to be because the LSTM is not expecting its hidden
# state to have the batch first despite the batch_first=True parameter
# making it expect its input to have the batch first.
hidden = (state_talk_reply_repr.unsqueeze(0), hidden[1])
if self.central_critic:
value_all = self.critic_linear(
torch.cat(
[
state_talk_reply_repr,
_unfold_communications(state_talk_reply_repr),
],
dim=1,
)
)
else:
value_all = self.critic_linear(state_talk_reply_repr)
to_return = {
"value_all": value_all,
"hidden_all": hidden,
"talk_probs": talk_probs,
"reply_probs": reply_probs,
}
if self.coordinate_actions:
to_return["randomization_logits"] = self.to_randomization_logits(
reply_heard_per_agent.view(1, -1)
)
if not self.separate_actor_weights:
logits = self.actor_linear(state_talk_reply_repr)
else:
logits = torch.cat(
[
linear(state_talk_reply_repr[i].unsqueeze(0))
for i, linear in enumerate(self.actor_linear_list)
],
dim=0,
)
# logits = self.actor_linear(
# state_talk_reply_repr
# ) + self.marginal_linear_actor(state_talk_reply_repr).unsqueeze(1).repeat(
# 1, self.coordinate_actions_dim, 1
# ).view(
# self.num_agents, self.num_outputs ** 2
# )
to_return["coordinated_logits"] = logits
else:
to_return["logit_all"] = self.actor_linear(state_talk_reply_repr)
return to_return
class A3CLSTMNStepComCoordinatedActionsEgoGridsEmbedCNN(nn.Module):
def __init__(
self,
num_inputs: int,
action_groups: Tuple[Tuple[str, ...], ...],
num_agents: int,
state_repr_length: int,
talk_embed_length: int,
agent_num_embed_length: int,
reply_embed_length: int,
num_talk_symbols: int,
num_reply_symbols: int,
occupancy_embed_length: int,
turn_off_communication: bool,
coordinate_actions: bool,
coordinate_actions_dim: Optional[int] = None,
central_critic: bool = False,
separate_actor_weights: bool = False,
final_cnn_channels: int = 256,
):
super(A3CLSTMNStepComCoordinatedActionsEgoGridsEmbedCNN, self).__init__()
self.num_outputs = sum(len(x) for x in action_groups)
# assert not turn_off_communication
self.turn_off_communication = turn_off_communication
self.central_critic = central_critic
self.num_inputs = num_inputs
self.num_agents = num_agents
self.num_talk_symbols = num_talk_symbols
self.num_reply_symbols = num_reply_symbols
self.separate_actor_weights = separate_actor_weights
self.coordinate_actions_dim = (
self.num_outputs
if coordinate_actions_dim is None
else coordinate_actions_dim
)
self.occupancy_embed_length = occupancy_embed_length
self.occupancy_embeddings = nn.Embedding(
num_inputs, self.occupancy_embed_length
)
self.coordinate_actions = coordinate_actions
# input to conv is (num_agents, self.occupancy_embed_length, 15, 15)
self.cnn = nn.Sequential(
OrderedDict(
[
("conv1", nn.Conv2d(self.occupancy_embed_length, 32, 3, padding=1)),
("relu1", nn.ReLU(inplace=True)),
# shape = (15, 15)
("conv2", nn.Conv2d(32, 64, 3, stride=2)),
("relu2", nn.ReLU(inplace=True)),
# shape = (7, 7)
("conv3", nn.Conv2d(64, 128, 3, stride=2)),
("relu3", nn.ReLU(inplace=True)),
# shape = (3, 3)
("conv4", nn.Conv2d(128, final_cnn_channels, 3, stride=2)),
("relu4", nn.ReLU(inplace=True)),
# shape = (1, 1); Stride doesn't matter above
]
)
)
# Vocab embed
self.talk_embeddings = nn.Embedding(num_talk_symbols, talk_embed_length)
self.reply_embeddings = nn.Embedding(num_reply_symbols, reply_embed_length)
self.talk_symbol_classifier = nn.Linear(state_repr_length, num_talk_symbols)
self.reply_symbol_classifier = nn.Linear(state_repr_length, num_reply_symbols)
# Agent embed
self.agent_num_embeddings = nn.Parameter(
torch.rand(self.num_agents, agent_num_embed_length)
)
# LSTM
self.lstm = nn.LSTM(
final_cnn_channels * 1 + agent_num_embed_length,
state_repr_length,
batch_first=True,
)
# Belief update MLP
state_and_talk_rep_len = state_repr_length + talk_embed_length * (
num_agents - 1
)
self.after_talk_mlp = nn.Sequential(
nn.Linear(state_and_talk_rep_len, state_and_talk_rep_len),
nn.ReLU(inplace=True),
nn.Linear(state_and_talk_rep_len, state_repr_length),
nn.ReLU(inplace=True),
)
state_and_reply_rep_len = state_repr_length + reply_embed_length * (
num_agents - 1
)
self.after_reply_mlp = nn.Sequential(
nn.Linear(state_and_reply_rep_len, state_and_reply_rep_len),
nn.ReLU(inplace=True),
nn.Linear(state_and_reply_rep_len, state_repr_length),
nn.ReLU(inplace=True),
)
if coordinate_actions:
# Randomization MLP
self.to_randomization_logits = nn.Sequential(
nn.Linear(self.num_agents * reply_embed_length, 64),
nn.ReLU(inplace=True),
nn.Linear(64, 64),
nn.ReLU(inplace=True),
nn.Linear(64, self.coordinate_actions_dim),
)
# self.marginal_linear_actor = nn.Linear(state_repr_length, self.num_outputs)
# self.marginal_linear_actor.weight.data = norm_col_init(
# self.marginal_linear_actor.weight.data, 0.01
# )
# self.marginal_linear_actor.bias.data.fill_(0)
# Linear actor
self.actor_linear = None
if coordinate_actions:
if separate_actor_weights:
self.actor_linear_list = nn.ModuleList(
[
nn.Linear(
state_repr_length,
self.num_outputs * self.coordinate_actions_dim,
)
for _ in range(2)
]
)
else:
self.actor_linear = nn.Linear(
state_repr_length, self.num_outputs * self.coordinate_actions_dim
)
else:
assert not separate_actor_weights
self.actor_linear = nn.Linear(state_repr_length, self.num_outputs)
if self.actor_linear is not None:
self.actor_linear.weight.data = norm_col_init(
self.actor_linear.weight.data, 0.01
)
self.actor_linear.bias.data.fill_(0)
else:
for al in self.actor_linear_list:
al.weight.data = norm_col_init(al.weight.data, 0.01)
al.bias.data.fill_(0)
# Linear critic
if self.central_critic:
self.critic_linear = nn.Linear(state_repr_length * self.num_agents, 1)
else:
self.critic_linear = nn.Linear(state_repr_length, 1)
# Setting initial weights
self.apply(weights_init)
relu_gain = nn.init.calculate_gain("relu")
self.cnn._modules["conv1"].weight.data.mul_(relu_gain)
self.cnn._modules["conv2"].weight.data.mul_(relu_gain)
self.cnn._modules["conv3"].weight.data.mul_(relu_gain)
self.cnn._modules["conv4"].weight.data.mul_(relu_gain)
self.talk_symbol_classifier.weight.data = norm_col_init(
self.talk_symbol_classifier.weight.data, 0.01
)
self.talk_symbol_classifier.bias.data.fill_(0)
self.reply_symbol_classifier.weight.data = norm_col_init(
self.reply_symbol_classifier.weight.data, 0.01
)
self.reply_symbol_classifier.bias.data.fill_(0)
self.train()
def forward(
self,
inputs: torch.FloatTensor,
hidden: Optional[torch.FloatTensor],
agent_rotations: Sequence[int],
):
if inputs.shape != (self.num_agents, self.num_inputs, 15, 15):
raise Exception("input to model is not as expected, check!")
inputs = inputs.float()
# inputs.shape == (self.num_agents, self.num_inputs, 15, 15)
inputs = inputs.permute(0, 2, 3, 1).contiguous()
original_shape = inputs.shape
# original_shape == (self.num_agents, 15, 15, self.num_inputs)
scaling_factor = torch.reciprocal(
(inputs == 1).sum(dim=3).unsqueeze(3).float() + 1e-3
)
# scaling_factor.shape == (2, 15, 15, 1)
inputs = inputs.view(-1, self.num_inputs)
inputs = inputs.matmul(self.occupancy_embeddings.weight)
inputs = inputs.view(
original_shape[0],
original_shape[1],
original_shape[2],
self.occupancy_embed_length,
)
x = torch.mul(inputs, scaling_factor.expand_as(inputs))
# x.shape == (self.num_agents, 15, 15, self.occupancy_embed_length)
x = x.permute(0, 3, 1, 2).contiguous()
# x.shape == (self.num_agents, self.occupancy_embed_length, 15, 15)
x = self.cnn(x)
# x.shape == (2, 256, 1, 1)
x = x.view(x.size(0), -1)
# x.shape = [num_agents, 256]
x = torch.cat((x, self.agent_num_embeddings), dim=1)
# x.shape = [num_agents, 256 + agent_num_embed_length]
x, hidden = self.lstm(x.unsqueeze(1), hidden)
# x.shape = [num_agents, 1, state_repr_length]
# hidden[0].shape == [1, num_agents, state_repr_length]
# hidden[1].shape == [1, num_agents, state_repr_length]
x = x.squeeze(1)
# x.shape = [num_agents, state_repr_length]
talk_logits = self.talk_symbol_classifier(x)
talk_probs = F.softmax(talk_logits, dim=1)
# talk_probs.shape = [num_agents, num_talk_symbols]
talk_outputs = torch.mm(talk_probs, self.talk_embeddings.weight)
# talk_outputs.shape = [num_agents, talk_embed_length]
if not self.turn_off_communication:
talk_heard_per_agent = _unfold_communications(talk_outputs)
else:
talk_heard_per_agent = torch.cat(
[talk_outputs] * (self.num_agents - 1), dim=1
)
state_talk_repr = x + self.after_talk_mlp(
torch.cat((x, talk_heard_per_agent), dim=1)
)
# feature_talk_repr.shape = [num_agents, state_repr_length]
reply_logits = self.reply_symbol_classifier(state_talk_repr)
reply_probs = F.softmax(reply_logits, dim=1)
# reply_probs.shape = [num_agents, num_reply_symbols]
reply_outputs = torch.mm(reply_probs, self.reply_embeddings.weight)
# reply_outputs.shape = [num_agents, reply_embed_length]
if not self.turn_off_communication:
reply_heard_per_agent = _unfold_communications(reply_outputs)
else:
reply_heard_per_agent = torch.cat(
[reply_outputs] * (self.num_agents - 1), dim=1
)
state_talk_reply_repr = state_talk_repr + self.after_reply_mlp(
torch.cat((state_talk_repr, reply_heard_per_agent), dim=1)
)
# state_talk_reply_repr.shape = [num_agents, state_repr_length]
# Strangely we have to unsqueeze(0) instead of unsqueeze(1) here.
# This seems to be because the LSTM is not expecting its hidden
# state to have the batch first despite the batch_first=True parameter
# making it expect its input to have the batch first.
hidden = (state_talk_reply_repr.unsqueeze(0), hidden[1])
if self.central_critic:
value_all = self.critic_linear(
torch.cat(
[
state_talk_reply_repr,
_unfold_communications(state_talk_reply_repr),
],
dim=1,
)
)
else:
value_all = self.critic_linear(state_talk_reply_repr)
to_return = {
"value_all": value_all,
"hidden_all": hidden,
"talk_probs": talk_probs,
"reply_probs": reply_probs,
}
if self.coordinate_actions:
if self.num_agents != 2:
to_return["randomization_logits"] = self.to_randomization_logits(
reply_outputs.view(1, -1)
)
else:
to_return["randomization_logits"] = self.to_randomization_logits(
reply_heard_per_agent.view(1, -1)
)
if not self.separate_actor_weights:
logits = self.actor_linear(state_talk_reply_repr)
else:
logits = torch.cat(
[
linear(state_talk_reply_repr[i].unsqueeze(0))
for i, linear in enumerate(self.actor_linear_list)
],
dim=0,
)
# logits = self.actor_linear(
# state_talk_reply_repr
# ) + self.marginal_linear_actor(state_talk_reply_repr).unsqueeze(1).repeat(
# 1, self.coordinate_actions_dim, 1
# ).view(
# self.num_agents, self.num_outputs ** 2
# )
to_return["coordinated_logits"] = logits
else:
to_return["logit_all"] = self.actor_linear(state_talk_reply_repr)
return to_return
| cordial-sync-master | rl_multi_agent/models.py |
import itertools
import math
import warnings
from typing import Tuple, Dict, List, Any, Union
import matplotlib as mpl
import constants
from rl_ai2thor.ai2thor_environment import AI2ThorEnvironmentWithGraph
from rl_ai2thor.ai2thor_episodes import MultiAgentAI2ThorEpisode
from rl_ai2thor.ai2thor_utils import manhattan_dists_between_positions
from .furnmove_episodes import are_actions_coordinated, coordination_type_tensor
mpl.use("Agg", force=False)
class JointNavigationEpisode(MultiAgentAI2ThorEpisode):
def __init__(
self,
env: AI2ThorEnvironmentWithGraph,
task_data: Dict[str, Any],
max_steps: int,
**kwargs
):
super(JointNavigationEpisode, self).__init__(
env=env, task_data=task_data, max_steps=max_steps, **kwargs
)
self.initial_agent_metadata = env.get_all_agent_metadata()
self.target_key_groups = task_data["target_key_groups"]
self.object_id = task_data["goal_obj_id"]
self.target_keys_set = set(o for group in self.target_key_groups for o in group)
self.oracle_length = self.calculate_oracle_length()
@classmethod
def class_available_action_groups(cls, **kwargs) -> Tuple[Tuple[str, ...], ...]:
actions = ("MoveAhead", "RotateLeft", "RotateRight", "Pass")
return (actions,)
def _is_goal_object_visible(self, agent_id: int) -> bool:
return self.task_data["goal_obj_id"] in [
o["objectId"] for o in self.environment.visible_objects(agent_id=agent_id)
]
def goal_visibility(self) -> List[bool]:
return [
self._is_goal_object_visible(agentId)
for agentId in range(self.environment.num_agents)
]
def goal_visibility_int(self) -> List[int]:
return [
int(self._is_goal_object_visible(agentId))
for agentId in range(self.environment.num_agents)
]
def goal_visible_to_all(self) -> bool:
return all(self.goal_visibility())
def info(self):
return {
**super(JointNavigationEpisode, self).info(),
"accuracy": 1 * self.goal_visible_to_all(),
"goal_visible_to_one_agent": 1 * (sum(self.goal_visibility()) == 1),
}
def _step(self, action_as_int: int, agent_id: int) -> Dict[str, Any]:
action = self.available_actions[action_as_int]
action_dict = {"action": action, "agentId": agent_id}
self.environment.step(action_dict)
return {
"action": action_as_int,
"action_success": self.environment.last_event.events[agent_id].metadata[
"lastActionSuccess"
],
}
def _closest_target(self, source_key):
closest_target = None
closest_steps = float("inf")
for target_key in self.target_keys_set:
nsteps = self.environment.shortest_path_length(
source_state_key=source_key, goal_state_key=target_key
)
if closest_target is None or nsteps < closest_steps:
closest_steps = nsteps
closest_target = target_key
return closest_target
def next_expert_action(self) -> Tuple[Union[int, None], ...]:
expert_actions = []
is_goal_visible = self.goal_visibility()
for agent_id in range(self.environment.num_agents):
source_key = self.environment.get_key(
self.environment.last_event.events[agent_id].metadata["agent"]
)
if is_goal_visible[agent_id] or source_key in self.target_keys_set:
action = "Pass"
else:
action = self.environment.shortest_path_next_action(
source_key, self._closest_target(source_key)
)
expert_actions.append(self.available_actions.index(action))
return tuple(expert_actions)
def reached_terminal_state(self) -> bool:
return self.goal_visible_to_all()
def calculate_oracle_length(self):
path_lengths = []
for agent_id in range(self.environment.num_agents):
source_state_key = self.environment.get_key(
self.initial_agent_metadata[agent_id]
)
# goal_state_key = self.environment.get_key(self.target_metadata[agent_id])
path_length_single = self.environment.shortest_path_length(
source_state_key, self._closest_target(source_state_key)
)
path_lengths.append(path_length_single)
return max(path_lengths)
class FurnLiftEpisode(JointNavigationEpisode):
def __init__(
self,
env: AI2ThorEnvironmentWithGraph,
task_data: Dict[str, Any],
max_steps: int,
**kwargs
):
super().__init__(env=env, task_data=task_data, max_steps=max_steps, **kwargs)
self._item_successfully_picked_up = False
self._jointly_visible_but_not_picked = 0
self._picked_but_not_jointly_visible = 0
@classmethod
def class_available_action_groups(cls, **kwargs) -> Tuple[Tuple[str, ...], ...]:
actions = ("MoveAhead", "RotateLeft", "RotateRight", "Pass", "Pickup")
return (actions,)
def info(self):
return {
**super(JointNavigationEpisode, self).info(),
"accuracy": 1 * self._item_successfully_picked_up,
"jointly_visible_but_not_picked": self._jointly_visible_but_not_picked,
"picked_but_not_jointly_visible": self._picked_but_not_jointly_visible,
}
def multi_step(self, actions_as_ints: Tuple[int, ...]) -> List[Dict[str, Any]]:
assert not self.is_paused() and not self.is_complete()
pickup_index = self.available_actions.index("Pickup")
visibility = self.goal_visibility()
visible_to_all = all(visibility)
agent_tried_pickup = [i == pickup_index for i in actions_as_ints]
all_pick_up = all(agent_tried_pickup)
self._jointly_visible_but_not_picked += visible_to_all * (not all_pick_up)
if visible_to_all and all(i == pickup_index for i in actions_as_ints):
for i in range(self.environment.num_agents):
self._increment_num_steps_taken_in_episode()
step_results = [
{
"action": pickup_index,
"action_success": True,
"goal_visible": True,
"pickup_action_taken": True,
}
for _ in range(self.environment.num_agents)
]
object = self.environment.get_object_by_id(self.object_id, agent_id=0)
self.environment.step(
{
"action": "TeleportObject",
**object["position"],
"objectId": self.object_id,
"y": object["position"]["y"] + 1.0,
"rotation": object["rotation"],
"agentId": 0,
}
)
self._item_successfully_picked_up = True
else:
step_results = []
for i in range(self._env.num_agents):
self._increment_num_steps_taken_in_episode()
step_results.append(self._step(actions_as_ints[i], agent_id=i))
step_results[-1]["goal_visible"] = visibility[i]
step_results[-1]["pickup_action_taken"] = agent_tried_pickup[i]
return step_results
def _step(self, action_as_int: int, agent_id: int) -> Dict[str, Any]:
action = self.available_actions[action_as_int]
if action == "Pickup":
self._picked_but_not_jointly_visible += 1
metadata = self.environment.last_event.events[agent_id].metadata
metadata["lastAction"] = "Pickup"
metadata["lastActionSuccess"] = False
else:
action_dict = {"action": action, "agentId": agent_id}
self.environment.step(action_dict)
return {
"action": action_as_int,
"action_success": self.environment.last_event.events[agent_id].metadata[
"lastActionSuccess"
],
}
def next_expert_action(self) -> Tuple[Union[int, None], ...]:
expert_actions = []
is_goal_visible = self.goal_visibility()
if all(is_goal_visible):
return tuple(
[self.available_actions.index("Pickup")] * self.environment.num_agents
)
for agent_id in range(self.environment.num_agents):
source_key = self.environment.get_key(
self.environment.last_event.events[agent_id].metadata["agent"]
)
if is_goal_visible[agent_id] or source_key in self.target_keys_set:
action = "Pass"
else:
action = self.environment.shortest_path_next_action(
source_key, self._closest_target(source_key)
)
expert_actions.append(self.available_actions.index(action))
return tuple(expert_actions)
def reached_terminal_state(self) -> bool:
return self._item_successfully_picked_up
class FurnLiftNApartStateEpisode(FurnLiftEpisode):
def __init__(
self,
env: AI2ThorEnvironmentWithGraph,
task_data: Dict[str, Any],
max_steps: int,
**kwargs
):
super().__init__(env=env, task_data=task_data, max_steps=max_steps, **kwargs)
self._min_dist_between_agents_to_pickup = kwargs[
"min_dist_between_agents_to_pickup"
]
self._pickupable_but_not_picked = 0
self._picked_but_not_pickupable = 0
self._picked_but_not_pickupable_distance = 0
self._picked_but_not_pickupable_visibility = 0
self.total_reward = 0.0
self.coordinated_action_checker = are_actions_coordinated
goal_object = self.environment.get_object_by_id(self.object_id, agent_id=0)
self.object_location = dict(
x=round(goal_object["position"]["x"], 2),
z=round(goal_object["position"]["z"], 2),
)
# Initial agent locations, to calculate the expert path lengths
self._initial_agent_locations = [
self.environment.get_agent_location(i)
for i in range(self.environment.num_agents)
]
@staticmethod
def location_l1_dist(loc1, loc2):
return abs(loc1["x"] - loc2["x"]) + abs(loc1["z"] - loc2["z"])
def info(self):
nagents = self.environment.num_agents
_final_l1_dist = [
self.location_l1_dist(
self.object_location, self.environment.get_agent_location(i)
)
for i in range(nagents)
]
mean_final_agent_l1_distance_from_target = round(
sum(_final_l1_dist) / len(_final_l1_dist), 2
)
_init_l1_dist = [
self.location_l1_dist(self.object_location, ag_loc)
for ag_loc in self._initial_agent_locations
]
mean_initial_agent_manhattan_steps_from_target = (
sum(_init_l1_dist) / len(_init_l1_dist)
) / self.environment.grid_size
return {
**super(JointNavigationEpisode, self).info(),
"accuracy": 1 * self._item_successfully_picked_up,
"pickupable_but_not_picked": self._pickupable_but_not_picked,
"picked_but_not_pickupable": self._picked_but_not_pickupable,
"picked_but_not_pickupable_distance": self._picked_but_not_pickupable_distance,
"picked_but_not_pickupable_visibility": self._picked_but_not_pickupable_visibility,
"reward": self.total_reward,
"initial_manhattan_steps": mean_initial_agent_manhattan_steps_from_target,
"final_manhattan_distance_from_target": mean_final_agent_l1_distance_from_target,
"spl_manhattan": int(self._item_successfully_picked_up)
* (
(mean_initial_agent_manhattan_steps_from_target + 0.0001)
/ (
max(
mean_initial_agent_manhattan_steps_from_target,
(self.num_steps_taken_in_episode() / nagents),
)
+ 0.0001
)
),
}
def manhattan_dists_between_agents(self):
return manhattan_dists_between_positions(
[
self.environment.get_agent_location(i)
for i in range(self.environment.num_agents)
],
self.environment.grid_size,
)
def multi_step(self, actions_as_ints: Tuple[int, ...]) -> List[Dict[str, Any]]:
assert not self.is_paused() and not self.is_complete()
pickup_index = self.available_actions.index("Pickup")
visibility = self.goal_visibility()
visible_to_all = all(visibility)
agent_tried_pickup = [i == pickup_index for i in actions_as_ints]
all_pick_up = all(agent_tried_pickup)
pairwise_distances = self.manhattan_dists_between_agents()
sufficiently_far = [
all(x >= self._min_dist_between_agents_to_pickup for x in dists)
for dists in pairwise_distances
]
all_sufficiently_far = all(sufficiently_far)
self._pickupable_but_not_picked += (
visible_to_all * all_sufficiently_far * (not all_pick_up)
)
if visible_to_all and all_sufficiently_far and all_pick_up:
for i in range(self.environment.num_agents):
self._increment_num_steps_taken_in_episode()
step_results = [
{
"action": pickup_index,
"action_success": True,
"goal_visible": True,
"pickup_action_taken": True,
"mutually_distant": True,
"reward": 1.0,
}
for _ in range(self.environment.num_agents)
]
object = self.environment.get_object_by_id(self.object_id, agent_id=0)
self.environment.step(
{
"action": "TeleportObject",
**object["position"],
"objectId": self.object_id,
"y": object["position"]["y"] + 1.0,
"rotation": object["rotation"],
"agentId": 0,
}
)
self._item_successfully_picked_up = True
else:
step_results = []
for i in range(self._env.num_agents):
self._increment_num_steps_taken_in_episode()
step_results.append(self._step(actions_as_ints[i], agent_id=i))
step_results[-1]["goal_visible"] = visibility[i]
step_results[-1]["pickup_action_taken"] = agent_tried_pickup[i]
self._picked_but_not_pickupable += agent_tried_pickup[i]
self._picked_but_not_pickupable_distance += (
agent_tried_pickup[i] and not all_sufficiently_far
)
self._picked_but_not_pickupable_visibility += (
agent_tried_pickup[i] and not visible_to_all
)
step_results[-1]["mutually_distant"] = sufficiently_far[i]
self.total_reward += sum(sr["reward"] for sr in step_results)
return step_results
def _step(self, action_as_int: int, agent_id: int) -> Dict[str, Any]:
STEP_PENALITY = -0.01
FAILED_ACTION_PENALTY = -0.02
reward = STEP_PENALITY
action = self.available_actions[action_as_int]
if action == "Pickup":
self._picked_but_not_jointly_visible += 1
metadata = self.environment.last_event.events[agent_id].metadata
metadata["lastAction"] = "Pickup"
metadata["lastActionSuccess"] = False
reward += -0.1
else:
action_dict = {"action": action, "agentId": agent_id}
self.environment.step(action_dict)
action_success = self.environment.last_event.events[agent_id].metadata[
"lastActionSuccess"
]
if not action_success:
reward += FAILED_ACTION_PENALTY
return {
"reward": reward,
"action": action_as_int,
"action_success": action_success,
}
def next_expert_action(self) -> Tuple[Union[int, None], ...]:
expert_actions = []
visibility = self.goal_visibility()
visible_to_all = all(visibility)
pairwise_distances = self.manhattan_dists_between_agents()
sufficiently_far = [
all(x >= self._min_dist_between_agents_to_pickup for x in dists)
for dists in pairwise_distances
]
all_sufficiently_far = all(sufficiently_far)
agent_keys = [
self.environment.get_key(
self.environment.last_event.events[agent_id].metadata["agent"]
)
for agent_id in range(self.environment.num_agents)
]
# In the case that we have reached the expert targets but the expert targets
# fail to be correct (as can happen if, for example, two agents intersect and
# block one anothers vision) then we should remove the relevant agent_key_ind_tuple
# from the set of good target ind tuples and have the agents move somewhere else.
if all(k in self.target_keys_set for k in agent_keys):
agent_key_tuple = tuple(sorted(agent_keys))
if agent_key_tuple in self.target_key_groups and (
not visible_to_all or not all_sufficiently_far
):
self.target_key_groups.remove(agent_key_tuple)
if visible_to_all and all_sufficiently_far:
return tuple(
[self.available_actions.index("Pickup")] * self.environment.num_agents
)
if len(self.target_key_groups) == 0:
return None
_dist_cache = {}
def agent_to_dist_to_target(agent_id, target_key):
key = (agent_id, target_key)
if key not in _dist_cache:
_dist_cache[key] = self.environment.shortest_path_length(
source_state_key=agent_keys[agent_id], goal_state_key=target_key
)
return _dist_cache[key]
best_ordered_target_group = None
best_path_length = None
for good_target_group in self.target_key_groups:
for ordered_target_group in itertools.permutations(
good_target_group, len(good_target_group)
):
path_length = sum(
agent_to_dist_to_target(agent_id, target_key)
for agent_id, target_key in enumerate(ordered_target_group)
)
if best_ordered_target_group is None or best_path_length > path_length:
best_path_length = path_length
best_ordered_target_group = ordered_target_group
for agent_id in range(self.environment.num_agents):
target_key = best_ordered_target_group[agent_id]
if agent_keys[agent_id] == target_key:
action = "Pass"
else:
action = self.environment.shortest_path_next_action(
agent_keys[agent_id], target_key
)
expert_actions.append(self.available_actions.index(action))
return tuple(expert_actions)
def reached_terminal_state(self) -> bool:
return self._item_successfully_picked_up
def coordination_type_tensor(self):
return coordination_type_tensor(
self.environment, self.available_actions, self.coordinated_action_checker
)
class FurnLiftNApartStateLoggingEpisode(FurnLiftNApartStateEpisode):
def __init__(
self,
env: AI2ThorEnvironmentWithGraph,
task_data: Dict[str, Any],
max_steps: int,
**kwargs
):
super().__init__(env=env, task_data=task_data, max_steps=max_steps, **kwargs)
# Extra logging of step number at which an agent first saw the target
self._first_view_of_target = [self.max_steps + 1] * self.environment.num_agents
# Initial agent locations, to calculate the expert path lengths
self._initial_agent_locations = [
self.environment.get_agent_location(i)
for i in range(self.environment.num_agents)
]
self._initial_agent_keys = [
self.environment.get_key(ag_loc) for ag_loc in self._initial_agent_locations
]
goal_object = self.environment.get_object_by_id(self.object_id, agent_id=0)
self.object_location = dict(
x=round(goal_object["position"]["x"], 2),
z=round(goal_object["position"]["z"], 2),
)
@staticmethod
def location_l2_dist(loc1, loc2):
return math.sqrt((loc1["x"] - loc2["x"]) ** 2 + (loc1["z"] - loc2["z"]) ** 2)
def info(self):
final_agent_distance_from_target = [
self.location_l2_dist(
self.object_location, self.environment.get_agent_location(i)
)
for i in range(self.environment.num_agents)
]
_dist_cache = {}
def agent_to_dist_to_target(agent_id, target_key):
key = (agent_id, target_key)
if key not in _dist_cache:
_dist_cache[key] = self.environment.shortest_path_length(
source_state_key=self._initial_agent_keys[agent_id],
goal_state_key=target_key,
)
return _dist_cache[key]
best_path_lengths = None
best_path_length = None
for good_target_group in self.target_key_groups:
for ordered_target_group in itertools.permutations(
good_target_group, len(good_target_group)
):
path_lengths = tuple(
agent_to_dist_to_target(agent_id, target_key)
for agent_id, target_key in enumerate(ordered_target_group)
)
path_length = sum(path_lengths)
if best_path_lengths is None or best_path_length > path_length:
best_path_length = best_path_length
best_path_lengths = path_lengths
initial_agent_distance_from_target = [
self.location_l2_dist(self.object_location, ag_loc)
for ag_loc in self._initial_agent_locations
]
return {
**super(JointNavigationEpisode, self).info(),
"accuracy": 1 * self._item_successfully_picked_up,
"pickupable_but_not_picked": self._pickupable_but_not_picked,
"picked_but_not_pickupable": self._picked_but_not_pickupable,
"picked_but_not_pickupable_distance": self._picked_but_not_pickupable_distance,
"picked_but_not_pickupable_visibility": self._picked_but_not_pickupable_visibility,
"reward": self.total_reward,
"first_view_of_target": self._first_view_of_target,
"final_distance_between_agents": self.manhattan_dists_between_agents(),
"best_path_to_target_length": best_path_length,
"path_to_target_lengths": best_path_lengths,
"initial_agent_distance_from_target": initial_agent_distance_from_target,
"final_agent_distance_from_target": final_agent_distance_from_target,
"object_location": self.object_location,
"scene_name": self.environment.scene_name,
}
def multi_step(self, actions_as_ints: Tuple[int, ...]) -> List[Dict[str, Any]]:
step_results = super(FurnLiftNApartStateLoggingEpisode, self).multi_step(
actions_as_ints=actions_as_ints
)
# Extra logging of step number at which an agent first saw the target
visibility = self.goal_visibility()
for i in range(self.environment.num_agents):
if visibility[i]:
self._first_view_of_target[i] = min(
self._first_view_of_target[i], self.num_steps_taken_in_episode()
)
return step_results
class FurnLiftGridStateEpisode(FurnLiftNApartStateEpisode):
def __init__(
self,
env: AI2ThorEnvironmentWithGraph,
task_data: Dict[str, Any],
max_steps: int,
**kwargs
):
super().__init__(env=env, task_data=task_data, max_steps=max_steps, **kwargs)
self.object_points_set = kwargs["object_points_set"]
def states_for_agents(self) -> List[Dict[str, Any]]:
# TODO: Getting the current occupancy matrix like this on every iteration is,
# TODO: in principle, an expensive operation as it requires doing some thor actions.
# TODO: We could track state changes without doing this but this requires some annoying bookkeeping.
# Reachable, unreachable and agent locations are marked
matrix, point_to_element_map = self.environment.get_current_occupancy_matrix(
padding=0.5, use_initially_reachable_points_matrix=True
)
# Mark goal object locations
nskipped = 0
for point_tuple in self.object_points_set:
if point_tuple not in point_to_element_map:
nskipped += 1
continue
row, col = point_to_element_map[point_tuple]
matrix[row, col] = constants.GOAL_OBJ_SYM
if nskipped == len(self.object_points_set):
raise RuntimeError(
"Skipped all object points in scene {}.".format(
self.environment.scene_name
)
)
elif nskipped > 10:
warnings.warn(
"Skipping many object points ({}) in scene {}.".format(
nskipped, self.environment.scene_name
)
)
states = []
for agent_id in range(self.environment.num_agents):
matrix_ego_per_agent = self.environment.current_matrix_frame(
agent_id,
matrix,
point_to_element_map,
int(self.environment.visibility_distance // self.environment.grid_size),
int(self.environment.visibility_distance // self.environment.grid_size),
)
last_action = (
None if self._last_actions is None else self._last_actions[agent_id]
)
last_action_success = (
None
if self._last_actions_success is None
else self._last_actions_success[agent_id]
)
states.append(
{
"frame": matrix_ego_per_agent,
"last_action": last_action,
"last_action_success": last_action_success,
}
)
return states
| cordial-sync-master | rl_multi_agent/furnlift_episodes.py |
import glob
import inspect
import json
import math
import os
import time
import warnings
from collections import defaultdict
from typing import List, Dict, Any
import matplotlib as mpl
import numpy as np
import torch
from matplotlib import pyplot as plt
from constants import ABS_PATH_TO_ANALYSIS_RESULTS_DIR, ABS_PATH_TO_DATA_DIR
from rl_ai2thor.ai2thor_episodes import MultiAgentAI2ThorEpisode
from rl_base import A3CAgent
from rl_multi_agent import MultiAgent
from utils.visualization_utils import get_agent_map_data, visualize_agent_path
def step_results_to_data_to_save_for_furnmove_experiment(
episode: MultiAgentAI2ThorEpisode, step_results: List[List[Dict]]
):
if len(step_results) == 0:
return {}
episode_info = episode.info()
nagents = len(step_results[0])
info = {
"ep_length": len(step_results) * nagents,
"final_distance": episode_info["navigation/final_distance"],
"reached_target": episode_info["navigation/reached_target"],
"spl_manhattan": episode_info["navigation/spl_manhattan"],
"initial_manhattan_steps": episode_info["navigation/initial_manhattan_steps"],
}
avail_actions = episode.available_actions
action_taken_matrix = np.zeros((len(avail_actions),) * nagents)
action_taken_success_matrix = np.zeros((len(avail_actions),) * nagents)
agent_action_info = [defaultdict(lambda: []) for _ in range(nagents)]
total_reward = 0
for srs in step_results:
action_inds = []
success_so_far = None
for agent_id, sr in enumerate(srs):
agent_info = agent_action_info[agent_id]
total_reward += sr["reward"]
action_ind = sr["action"]
action = avail_actions[sr["action"]]
action_success = sr["action_success"]
action_inds.append(action_ind)
agent_info["action_ind"].append(action_ind)
agent_info["action"].append(action)
agent_info["action_success"].append(action_success)
if "extra_before_info" in sr:
agent_info["dresser_visible"].append(
sr["extra_before_info"]["dresser_visible"]
)
agent_info["tv_visible"].append(sr["extra_before_info"]["tv_visible"])
if success_so_far is None:
success_so_far = action_success
elif success_so_far != action_success:
success_so_far = math.nan
action_inds = tuple(action_inds)
if success_so_far is math.nan:
if all(
"Pass" != agent_action_info[i]["action"][-1] for i in range(nagents)
):
warnings.warn("One agent was successful while the other was not!")
elif success_so_far:
action_taken_success_matrix[action_inds] += 1
action_taken_matrix[action_inds] += 1
info["step_results"] = step_results
info["reward"] = total_reward
info["agent_action_info"] = [dict(info) for info in agent_action_info]
info["action_taken_matrix"] = action_taken_matrix.tolist()
info["action_taken_success_matrix"] = action_taken_success_matrix.tolist()
return info
class SaveFurnMoveMixin(object):
def simple_name(self):
raise NotImplementedError()
def create_episode_summary(
self,
agent: A3CAgent,
additional_metrics: Dict[str, float],
step_results: List[List[Dict]],
) -> Any:
data_to_save = step_results_to_data_to_save_for_furnmove_experiment(
episode=agent.episode, step_results=step_results
)
data_to_save["episode_init_data"] = self.init_train_agent.episode_init_data
if len(agent.eval_results) != 0:
for i in range(agent.environment.num_agents):
for k in ["talk_probs", "reply_probs"]:
data_to_save["a{}_{}".format(i, k)] = []
for er in agent.eval_results:
if k in er:
data_to_save["a{}_{}".format(i, k)].append(
er[k][i].cpu().detach().view(-1).numpy().tolist()
)
if "randomization_probs" in agent.eval_results[0]:
data_to_save["randomization_probs"] = []
for er in agent.eval_results:
data_to_save["randomization_probs"].append(
torch.exp(er["randomization_log_probs"])
.cpu()
.detach()
.view(-1)
.numpy()
.tolist()
)
return {**data_to_save, **additional_metrics}
def data_save_dir(self) -> str:
return os.path.join(ABS_PATH_TO_DATA_DIR, "furnmove_evaluations__{}/{}").format(
self.episode_init_queue_file_name.split("__")[-1].replace(".json", ""),
self.simple_name(),
)
def save_episode_summary(self, data_to_save: Any):
self.total_saved += 1
scene = data_to_save["episode_init_data"]["scene"]
rep = data_to_save["episode_init_data"]["replication"]
print(
"Saving data corresponding to scene {} and replication {}. {}/{} completed.".format(
scene, rep, self.total_saved, self.total_to_save
),
flush=True,
)
write_path = os.path.join(
self.data_save_dir(), "{}__{}.json".format(scene, rep)
)
with open(write_path, "w") as f:
json.dump(data_to_save, f)
def create_episode_init_queue(self, mp_module) -> torch.multiprocessing.Queue:
q = mp_module.Queue()
self.total_saved = 0
with open(
os.path.join(ABS_PATH_TO_DATA_DIR, "{}").format(
self.episode_init_queue_file_name
),
"r",
) as f:
scene_to_rep_to_ep_data = json.load(f)
keys_remaining = {
(scene, rep)
for scene in scene_to_rep_to_ep_data
for rep in scene_to_rep_to_ep_data[scene]
}
os.makedirs(self.data_save_dir(), exist_ok=True)
for p in glob.glob(os.path.join(self.data_save_dir(), "*.json")):
file_name = os.path.basename(p)
keys_remaining.remove(tuple(file_name.replace(".json", "").split("__")))
k = 0
for scene, rep in keys_remaining:
data = scene_to_rep_to_ep_data[scene][rep]
data["scene"] = scene
data["replication"] = rep
q.put(data)
k += 1
print("{} episodes added to queue".format(k))
self.total_to_save = k
return q
def stopping_criteria_reached(self):
return self.total_saved >= self.total_to_save
def step_results_to_data_to_save_for_furnlift_experiment(
episode: MultiAgentAI2ThorEpisode, step_results: List[List[Dict]]
):
if len(step_results) == 0:
return {}
episode_info = episode.info()
nagents = len(step_results[0])
info = {
"ep_length": len(step_results) * nagents,
"picked_but_not_pickupable_distance": episode_info[
"picked_but_not_pickupable_distance"
],
"picked_but_not_pickupable": episode_info["picked_but_not_pickupable"],
"picked_but_not_pickupable_visibility": episode_info[
"picked_but_not_pickupable_visibility"
],
"pickupable_but_not_picked": episode_info["pickupable_but_not_picked"],
"accuracy": episode_info["accuracy"],
"final_distance": episode_info["final_manhattan_distance_from_target"],
"initial_manhattan_steps": episode_info["initial_manhattan_steps"],
"spl_manhattan": episode_info["spl_manhattan"],
}
avail_actions = episode.available_actions
action_taken_matrix = np.zeros((len(avail_actions),) * nagents)
agent_action_info = [defaultdict(lambda: []) for _ in range(nagents)]
total_reward = 0
for srs in step_results:
action_inds = []
success_so_far = None
for agent_id, sr in enumerate(srs):
agent_info = agent_action_info[agent_id]
total_reward += sr["reward"]
action_ind = sr["action"]
action = avail_actions[sr["action"]]
action_success = sr["action_success"]
action_inds.append(action_ind)
agent_info["action_ind"].append(action_ind)
agent_info["action"].append(action)
agent_info["action_success"].append(action_success)
if success_so_far is None:
success_so_far = action_success
elif success_so_far != action_success:
success_so_far = math.nan
action_inds = tuple(action_inds)
action_taken_matrix[action_inds] += 1
info["step_results"] = step_results
info["reward"] = total_reward
info["agent_action_info"] = [dict(info) for info in agent_action_info]
info["action_taken_matrix"] = action_taken_matrix.tolist()
return info
class SaveFurnLiftMixin(object):
def simple_name(self):
raise NotImplementedError()
def create_episode_summary(
self,
agent: A3CAgent,
additional_metrics: Dict[str, float],
step_results: List[List[Dict]],
) -> Any:
data_to_save = step_results_to_data_to_save_for_furnlift_experiment(
episode=agent.episode, step_results=step_results
)
data_to_save["episode_init_data"] = self.init_train_agent.episode_init_data
if len(agent.eval_results) != 0:
for i in range(agent.environment.num_agents):
for k in ["talk_probs", "reply_probs"]:
data_to_save["a{}_{}".format(i, k)] = []
for er in agent.eval_results:
if k in er:
data_to_save["a{}_{}".format(i, k)].append(
er[k][i].cpu().detach().view(-1).numpy().tolist()
)
if "randomization_probs" in agent.eval_results[0]:
data_to_save["randomization_probs"] = []
for er in agent.eval_results:
data_to_save["randomization_probs"].append(
torch.exp(er["randomization_log_probs"])
.cpu()
.detach()
.view(-1)
.numpy()
.tolist()
)
return {**data_to_save, **additional_metrics}
def data_save_dir(self) -> str:
unique_dir = os.path.basename(inspect.getfile(type(self))).replace(
"_config.py", ""
)
return os.path.join(
ABS_PATH_TO_DATA_DIR, "furnlift_evaluations__{}/{}__{}"
).format(
self.episode_init_queue_file_name.split("__")[-1].replace(".json", ""),
self.simple_name(),
unique_dir,
)
def save_episode_summary(self, data_to_save: Any):
self.total_saved += 1
scene = data_to_save["episode_init_data"]["scene"]
rep = data_to_save["episode_init_data"]["replication"]
print(
(
"Saving data corresponding to scene {} and replication {}."
" {}/{} completed."
).format(scene, rep, self.total_saved, self.total_to_save),
flush=True,
)
write_path = os.path.join(
self.data_save_dir(), "{}__{}.json".format(scene, rep)
)
with open(write_path, "w") as f:
json.dump(data_to_save, f)
def create_episode_init_queue(self, mp_module) -> torch.multiprocessing.Queue:
q = mp_module.Queue()
self.total_saved = 0
with open(
os.path.join(ABS_PATH_TO_DATA_DIR, "{}").format(
self.episode_init_queue_file_name
),
"r",
) as f:
scene_to_rep_to_ep_data = json.load(f)
keys_remaining = {
(scene, rep)
for scene in scene_to_rep_to_ep_data
for rep in scene_to_rep_to_ep_data[scene]
}
os.makedirs(self.data_save_dir(), exist_ok=True)
for p in glob.glob(os.path.join(self.data_save_dir(), "*.json")):
file_name = os.path.basename(p)
keys_remaining.remove(tuple(file_name.replace(".json", "").split("__")))
k = 0
for scene, rep in keys_remaining:
data = scene_to_rep_to_ep_data[scene][rep]
data["scene"] = scene
data["replication"] = rep
q.put(data)
k += 1
print("{} episodes added to queue".format(k))
self.total_to_save = k
return q
def stopping_criteria_reached(self):
return self.total_saved >= self.total_to_save
def save_agents_path_without_frame_png(
agent: MultiAgent, k=0, sampling_rate=5, **kwargs
):
angle_to_dx = {0: 0.0, 90: 0.1, 180: 0, 270: -0.1}
angle_to_dz = {0: 0.1, 90: 0.0, 180: -0.1, 270: 0.0}
available_actions = agent.episode.available_actions
step_results = agent.step_results
plt.figure(figsize=(14, 14))
plt.tight_layout()
plt.axis("equal")
colors = mpl.cm.rainbow(np.linspace(0, 1, agent.episode.environment.num_agents))
agent_marker_type = "o"
agent_marker_size = 6
object_marker_type = "*"
object_marker_size = 10
short_arrow_length_factor = 1.8
long_arrow_length_factor = 1.2
longer_arrow_length_factor = 0.8
object_color = "cyan"
object_before_positions = [
sr[0]["object_location"][agent.episode.object_id]["before_location"]
for sr in step_results
]
object_after_positions = [
sr[0]["object_location"][agent.episode.object_id]["after_location"]
for sr in step_results
]
plt.plot(
object_before_positions[0]["x"],
object_before_positions[0]["z"],
color="blue",
marker="x",
markersize=12,
)
plt.arrow(
object_before_positions[0]["x"],
object_before_positions[0]["z"],
angle_to_dx[round(object_before_positions[0]["rotation"] / 90.0) * 90] / 0.5,
angle_to_dz[round(object_before_positions[0]["rotation"] / 90.0) * 90] / 0.5,
color="blue",
head_width=0.08,
)
if hasattr(agent.episode, "object_points_set"):
object_points_set = agent.episode.object_points_set()
object_points_set_x = [point_tuple[0] for point_tuple in object_points_set]
object_points_set_z = [point_tuple[1] for point_tuple in object_points_set]
plt.plot(
object_points_set_x,
object_points_set_z,
color="blue",
marker="x",
markersize=6,
linestyle="None",
)
for agent_id in range(agent.environment.num_agents):
agent_color = colors[agent_id]
before_positions = [sr[agent_id]["before_location"] for sr in step_results]
after_positions = [sr[agent_id]["after_location"] for sr in step_results]
plt.plot(
before_positions[0]["x"],
before_positions[0]["z"],
color=agent_color,
marker="x",
markersize=10,
)
plt.arrow(
before_positions[0]["x"],
before_positions[0]["z"],
angle_to_dx[round(object_before_positions[0]["rotation"] / 90.0) * 90]
/ 0.5,
angle_to_dz[round(object_before_positions[0]["rotation"] / 90.0) * 90]
/ 0.5,
color=agent_color,
head_width=0.08,
)
for sr_i, sr in enumerate(step_results):
before_position_i = before_positions[sr_i]
after_position_i = after_positions[sr_i]
object_before_position_i = object_before_positions[sr_i]
object_after_position_i = object_after_positions[sr_i]
object_before_angle_i = (
round((object_before_position_i["rotation"] % 360) / 90.0) * 90.0
)
object_after_angle_i = (
round((object_after_position_i["rotation"] % 360) / 90.0) * 90.0
)
before_angle_i = round((before_position_i["rotation"] % 360) / 90.0) * 90.0
after_angle_i = round((after_position_i["rotation"] % 360) / 90.0) * 90.0
action_string = available_actions[sr[agent_id]["action"]]
agent_arrow = {
"color": ["black"],
"length_factor": [short_arrow_length_factor],
"angle": [after_angle_i],
}
object_arrow = {
"color": ["black"],
"length_factor": [short_arrow_length_factor],
"angle": [object_after_angle_i],
}
if not sr[agent_id]["action_success"]:
# No marks for unsuccessful actions
continue
if action_string == "MoveAhead":
# No extra marks for individual move aheads
pass
elif action_string in ["RotateLeft", "RotateRight"]:
# overwite with a long arrow to mark rotations
agent_arrow["color"].append("yellow")
agent_arrow["length_factor"].append(long_arrow_length_factor)
agent_arrow["angle"].append(after_angle_i)
elif action_string in ["RotateLiftedRight"]:
# overwite with a long arrow to mark rotations
object_arrow["color"].append("yellow")
object_arrow["length_factor"].append(long_arrow_length_factor)
object_arrow["angle"].append(object_after_angle_i)
elif action_string in [
"MoveLiftedObjectAhead",
"MoveLiftedObjectRight",
"MoveLiftedObjectBack",
"MoveLiftedObjectLeft",
]:
object_arrow["color"].append("green")
object_arrow["length_factor"].append(longer_arrow_length_factor)
action_to_rot_ind_offset = {
"MoveLiftedObjectAhead": 0,
"MoveLiftedObjectRight": 1,
"MoveLiftedObjectBack": 2,
"MoveLiftedObjectLeft": 3,
}
object_rot_ind = round((object_after_angle_i % 360) / 90.0)
adjusted_rot_ind = (
action_to_rot_ind_offset[action_string] + object_rot_ind
) % 4
adjusted_rot = adjusted_rot_ind * 90
object_arrow["angle"].append(adjusted_rot)
elif action_string in [
"MoveAgentsAheadWithObject",
"MoveAgentsRightWithObject",
"MoveAgentsBackWithObject",
"MoveAgentsLeftWithObject",
]:
agent_arrow["color"].append(agent_color)
agent_arrow["length_factor"].append(longer_arrow_length_factor)
action_to_rot_ind_offset = {
"MoveAgentsAheadWithObject": 0,
"MoveAgentsRightWithObject": 1,
"MoveAgentsBackWithObject": 2,
"MoveAgentsLeftWithObject": 3,
}
agent_rot_ind = round((after_angle_i % 360) / 90.0)
adjusted_rot_ind = (
action_to_rot_ind_offset[action_string] + agent_rot_ind
) % 4
adjusted_rot = adjusted_rot_ind * 90
agent_arrow["angle"].append(adjusted_rot)
elif action_string in [
"MoveAgentsNorthWithObject",
"MoveAgentsEastWithObject",
"MoveAgentsSouthWithObject",
"MoveAgentsWestWithObject",
]:
agent_arrow["color"].append(agent_color)
agent_arrow["length_factor"].append(longer_arrow_length_factor)
action_to_rot = {
"MoveAgentsNorthWithObject": 0,
"MoveAgentsEastWithObject": 90,
"MoveAgentsSouthWithObject": 180,
"MoveAgentsWestWithObject": 270,
}
agent_arrow["angle"].append(action_to_rot[action_string])
elif action_string == "Pass":
continue
plt.plot(
after_position_i["x"],
after_position_i["z"],
color=agent_color,
marker=agent_marker_type,
markersize=agent_marker_size,
alpha=0.2,
)
plt.plot(
object_after_position_i["x"],
object_after_position_i["z"],
color=object_color,
marker=object_marker_type,
markersize=object_marker_size,
alpha=0.2,
)
assert agent_arrow.keys() == object_arrow.keys()
arrow_keys = list(agent_arrow.keys())
assert all(
len(agent_arrow[key]) == len(agent_arrow[arrow_keys[0]])
for key in arrow_keys
)
assert all(
len(object_arrow[key]) == len(object_arrow[arrow_keys[0]])
for key in arrow_keys
)
for arrow_i in range(len(agent_arrow[arrow_keys[0]]))[::-1]:
angle = agent_arrow["angle"][arrow_i]
color = agent_arrow["color"][arrow_i]
length_factor = agent_arrow["length_factor"][arrow_i]
plt.arrow(
after_position_i["x"],
after_position_i["z"],
angle_to_dx[angle] / (length_factor + 0.05),
angle_to_dz[angle] / (length_factor + 0.05),
color=color,
alpha=0.4,
)
if sr_i % sampling_rate == 0:
plt.annotate(
str(sr_i),
(
after_position_i["x"]
+ angle_to_dx[angle] * 2 / (length_factor + 0.05),
after_position_i["z"]
+ angle_to_dz[angle] * 2 / (length_factor + 0.05),
),
)
for arrow_i in range(len(object_arrow[arrow_keys[0]]))[::-1]:
angle = object_arrow["angle"][arrow_i]
color = object_arrow["color"][arrow_i]
length_factor = object_arrow["length_factor"][arrow_i]
plt.arrow(
object_after_position_i["x"],
object_after_position_i["z"],
angle_to_dx[angle] / (length_factor + 0.05),
angle_to_dz[angle] / (length_factor + 0.05),
color=color,
alpha=0.4,
)
if sr_i % sampling_rate == 0:
plt.annotate(
str(sr_i),
(
object_after_position_i["x"]
+ angle_to_dx[angle] * 2 / (length_factor + 0.05),
object_after_position_i["z"]
+ angle_to_dz[angle] * 2 / (length_factor + 0.05),
),
)
plt.arrow(
object_after_positions[-1]["x"],
object_after_positions[-1]["z"],
angle_to_dx[round(object_after_positions[-1]["rotation"] / 90.0) * 90] / 0.5,
angle_to_dz[round(object_after_positions[-1]["rotation"] / 90.0) * 90] / 0.5,
color="blue",
head_width=0.08,
linestyle=":",
)
# Mark all reachable points
for p in agent.episode.environment.initially_reachable_points:
plt.plot(p["x"], p["z"], "k.", markersize=5)
# Mark all unreachable points
for (px, pz) in list(agent.episode.environment.get_initially_unreachable_points()):
plt.plot(px, pz, "k,", markersize=5)
plt.savefig(
os.path.join(
ABS_PATH_TO_ANALYSIS_RESULTS_DIR, "episode_run_paths/{}_{}.jpg"
).format(kwargs["args"].task, k),
bbox_inches="tight",
)
plt.close("all")
time.sleep(0.1)
def save_agents_path_png(
agent: MultiAgent, k=0, start_resolution: int = 84, save_resolution: int = 1000,
):
agent.environment.step(
{
"action": "ChangeResolution",
"x": save_resolution,
"y": save_resolution,
"agentId": 0,
}
)
time.sleep(2)
step_results = agent.step_results
joint_position_mark_colors = []
pickup_index = agent.episode.available_actions.index("Pickup")
for sr in step_results:
all_picked = all(
sr[i]["action"] == pickup_index for i in range(agent.environment.num_agents)
)
all_success = all(
sr[i]["action_success"] for i in range(agent.environment.num_agents)
)
if all_picked and all_success:
joint_position_mark_colors.append("white")
elif all_picked:
joint_position_mark_colors.append("grey")
else:
joint_position_mark_colors.append(None)
map_data = get_agent_map_data(agent.environment)
frame = map_data["frame"]
for i in range(agent.environment.num_agents):
positions = [sr[i]["before_location"] for sr in step_results]
positions.append(step_results[-1][i]["after_location"])
position_mark_colors = []
for j, sr in enumerate(step_results):
if joint_position_mark_colors[j] is not None:
position_mark_colors.append(joint_position_mark_colors[j])
elif sr[i]["action"] == agent.episode.available_actions.index("Pickup"):
position_mark_colors.append("black")
else:
position_mark_colors.append(None)
position_mark_colors.append(None)
frame = visualize_agent_path(
positions,
frame,
map_data["pos_translator"],
color_pair_ind=i,
position_mark_colors=position_mark_colors,
only_show_last_visibility_cone=True,
)
agent.environment.step(
{
"action": "ChangeResolution",
"x": start_resolution,
"y": start_resolution,
"agentId": 0,
}
)
time.sleep(2)
def vector3s_near_equal(p0: Dict[str, float], p1: Dict[str, float], ep: float = 1e-3):
return abs(p0["x"] - p1["x"]) + abs(p0["y"] - p1["y"]) + abs(p0["z"] - p1["z"]) < ep
def at_same_location(m0, m1, ignore_camera: bool = False):
the_same = vector3s_near_equal(
m0 if "position" not in m0 else m0["position"],
m1 if "position" not in m1 else m1["position"],
) and vector3s_near_equal(m0["rotation"], m1["rotation"])
if "cameraHorizon" in m0:
the_same = the_same and (
ignore_camera
or abs(
(m0.get("horizon") if "horizon" in m0 else m0["cameraHorizon"])
- (m1.get("horizon") if "horizon" in m1 else m1["cameraHorizon"])
)
< 0.001
)
return the_same
| cordial-sync-master | rl_multi_agent/furnmove_utils.py |
from .agents import *
| cordial-sync-master | rl_multi_agent/__init__.py |
import hashlib
import itertools
import math
import os
import queue
import random
import warnings
from typing import Dict, List, Optional, Callable, Tuple
import networkx as nx
import numpy as np
import pandas as pd
import torch
from networkx import find_cliques
from torch import multiprocessing as mp
from constants import TELEVISION_ROTATION_TO_OCCUPATIONS
from rl_ai2thor.ai2thor_environment import AI2ThorEnvironmentWithGraph
from rl_ai2thor.ai2thor_utils import manhattan_dists_between_positions
from rl_multi_agent import MultiAgent
from rl_multi_agent.furnlift_episodes import JointNavigationEpisode
from rl_multi_agent.furnmove_utils import at_same_location
from rl_multi_agent.multi_agent_utils import TrainingCompleteException
def create_or_append(dict: Dict[str, List], key, value):
if key not in dict:
dict[key] = [value]
else:
dict[key].append(value)
def save_talk_reply_data_frame(
agent, save_path, index: Optional[int], use_hash: bool = False, prefix: str = ""
):
num_agents = agent.environment.num_agents
eval_results = agent.eval_results
object_id = agent.episode.object_id
o = agent.environment.get_object_by_id(object_id, agent_id=0)
o_pos = {**o["position"], "rotation": o["rotation"]["y"]}
data = {}
for i in range(len(eval_results)):
er = eval_results[i]
for k in er:
if "cpu" in dir(er[k]):
er[k] = er[k].cpu()
sr = er["step_result"]
for agent_id in range(num_agents):
agent_key = "agent_{}_".format(agent_id)
for probs_key in ["talk_probs", "reply_probs"]:
probs = er[probs_key][agent_id, :].detach().numpy()
for j in range(len(probs)):
create_or_append(
data, agent_key + probs_key + "_" + str(j), probs[j]
)
sr_i = sr[agent_id]
for key in ["goal_visible", "pickup_action_taken", "action_success"]:
create_or_append(data, agent_key + key, sr_i[key])
create_or_append(
data,
agent_key + "action",
agent.episode.available_actions[sr_i["action"]],
)
before_loc = sr_i["before_location"]
for key in before_loc:
create_or_append(data, agent_key + key, before_loc[key])
create_or_append(
data,
agent_key + "l2_dist_to_object",
math.sqrt(
(o_pos["x"] - before_loc["x"]) ** 2
+ (o_pos["z"] - before_loc["z"]) ** 2
),
)
create_or_append(
data,
agent_key + "manhat_dist_to_object",
abs(o_pos["x"] - before_loc["x"]) + abs(o_pos["z"] - before_loc["z"]),
)
for key in o_pos:
create_or_append(data, "object_" + key, o_pos[key])
mutual_agent_distance = int(
4
* (
abs(data["agent_0_x"][-1] - data["agent_1_x"][-1])
+ abs(data["agent_0_z"][-1] - data["agent_1_z"][-1])
)
)
create_or_append(data, "mutual_agent_distance", mutual_agent_distance)
df = pd.DataFrame(
data={**data, "scene_name": [agent.environment.scene_name] * len(eval_results)}
)
for df_k in df.keys():
if df[df_k].dtype in [float, np.float32, np.float64]:
df[df_k] = np.round(df[df_k], 4)
if use_hash:
file_name = (
agent.environment.scene_name
+ "_"
+ hashlib.md5(pd.util.hash_pandas_object(df, index=True).values).hexdigest()
+ ".tsv"
)
else:
file_name = "{}_{}_talk_reply_data.tsv".format(index, prefix)
df.to_csv(
os.path.join(save_path, file_name),
sep="\t",
# float_format="%.4f",
)
def _create_environment(
num_agents,
env_args,
visible_agents: bool,
render_depth_image: bool,
headless: bool = False,
**environment_args,
) -> AI2ThorEnvironmentWithGraph:
env = AI2ThorEnvironmentWithGraph(
docker_enabled=env_args.docker_enabled,
x_display=env_args.x_display,
local_thor_build=env_args.local_thor_build,
num_agents=num_agents,
restrict_to_initially_reachable_points=True,
visible_agents=visible_agents,
render_depth_image=render_depth_image,
headless=headless,
**environment_args,
)
return env
class FurnLiftEpisodeSamplers(object):
def __init__(
self,
scenes: List[str],
num_agents: int,
object_type: str,
env_args=None,
max_episode_length: int = 500,
episode_class: Callable = JointNavigationEpisode,
player_screen_height=224,
player_screen_width=224,
save_talk_reply_probs_path: Optional[str] = None,
min_dist_between_agents_to_pickup: int = 0,
max_ep_using_expert_actions: int = 10000,
visible_agents: bool = True,
max_visible_positions_push: int = 8,
min_max_visible_positions_to_consider: Tuple[int, int] = (8, 16),
include_depth_frame: bool = False,
allow_agents_to_intersect: bool = False,
):
self.visible_agents = visible_agents
self.max_ep_using_expert_actions = max_ep_using_expert_actions
self.min_dist_between_agents_to_pickup = min_dist_between_agents_to_pickup
self.save_talk_reply_probs_path = save_talk_reply_probs_path
self.player_screen_height = player_screen_height
self.player_screen_width = player_screen_width
self.episode_class = episode_class
self.max_episode_length = max_episode_length
self.env_args = env_args
self.num_agents = num_agents
self.scenes = scenes
self.object_type = object_type
self.max_visible_positions_push = max_visible_positions_push
self.min_max_visible_positions_to_consider = (
min_max_visible_positions_to_consider
)
self.include_depth_frame = include_depth_frame
self.allow_agents_to_intersect = allow_agents_to_intersect
self.grid_size = 0.25
self._current_train_episode = 0
self._internal_episodes = 0
@property
def current_train_episode(self):
return self._current_train_episode
@current_train_episode.setter
def current_train_episode(self, value):
self._current_train_episode = value
def _contain_n_positions_at_least_dist_apart(self, positions, n, min_dist):
for sub_positions in itertools.combinations(positions, n):
if all(
x >= min_dist
for x in sum(
manhattan_dists_between_positions(sub_positions, self.grid_size),
[],
)
):
return True
return False
def __call__(
self,
agent: MultiAgent,
agent_location_seed=None,
env: Optional[AI2ThorEnvironmentWithGraph] = None,
episode_init_queue: Optional[mp.Queue] = None,
) -> None:
self._internal_episodes += 1
if env is None:
if agent.environment is not None:
env = agent.environment
else:
env = _create_environment(
num_agents=self.num_agents,
env_args=self.env_args,
visible_agents=self.visible_agents,
render_depth_image=self.include_depth_frame,
allow_agents_to_intersect=self.allow_agents_to_intersect,
)
env.start(
"FloorPlan1_physics",
move_mag=self.grid_size,
quality="Very Low",
player_screen_height=self.player_screen_height,
player_screen_width=self.player_screen_width,
)
if (
self.max_ep_using_expert_actions != 0
and self.current_train_episode <= self.max_ep_using_expert_actions
):
agent.take_expert_action_prob = 0.9 * (
1.0 - self.current_train_episode / self.max_ep_using_expert_actions
)
else:
agent.take_expert_action_prob = 0
if episode_init_queue is not None:
try:
self.episode_init_data = episode_init_queue.get(timeout=1)
except queue.Empty:
raise TrainingCompleteException("No more data in episode init queue.")
scene = self.episode_init_data["scene"]
env.reset(scene)
assert self.object_type == "Television"
env.step(
{
"action": "DisableAllObjectsOfType",
"objectId": self.object_type,
"agentId": 0,
}
)
for agent_id, agent_location in enumerate(
self.episode_init_data["agent_locations"]
):
env.teleport_agent_to(
**agent_location,
agent_id=agent_id,
only_initially_reachable=False,
force_action=True,
)
assert env.last_event.metadata["lastActionSuccess"]
object_location = self.episode_init_data["object_location"]
env.step(
{
"action": "CreateObjectAtLocation",
"objectType": self.object_type,
**object_location,
"position": {
"x": object_location["x"],
"y": object_location["y"],
"z": object_location["z"],
},
"rotation": {"x": 0, "y": object_location["rotation"], "z": 0},
"agentId": 0,
"forceAction": True,
}
)
assert env.last_event.metadata["lastActionSuccess"]
env.refresh_initially_reachable()
objects_of_type = env.all_objects_with_properties(
{"objectType": self.object_type}, agent_id=0
)
if len(objects_of_type) != 1:
print("len(objects_of_type): {}".format(len(objects_of_type)))
raise (Exception("len(objects_of_type) != 1"))
object = objects_of_type[0]
object_id = object["objectId"]
obj_rot = int(object["rotation"]["y"])
object_points_set = set(
(
round(object["position"]["x"] + t[0], 2),
round(object["position"]["z"] + t[1], 2),
)
for t in TELEVISION_ROTATION_TO_OCCUPATIONS[obj_rot]
)
# Find agent metadata from where the target is visible
env.step(
{
"action": "GetPositionsObjectVisibleFrom",
"objectId": object_id,
"agentId": 0,
}
)
possible_targets = env.last_event.metadata["actionVector3sReturn"]
distances = []
for i, r in enumerate(env.last_event.metadata["actionFloatsReturn"]):
possible_targets[i]["rotation"] = int(r)
possible_targets[i]["horizon"] = 30
distances.append(
env.position_dist(object["position"], possible_targets[i])
)
possible_targets = [
env.get_key(x[1])
for x in sorted(
list(zip(distances, possible_targets)), key=lambda x: x[0]
)
]
if self.min_dist_between_agents_to_pickup != 0:
possible_targets_array = np.array([t[:2] for t in possible_targets])
manhat_dist_mat = np.abs(
(
(
possible_targets_array.reshape((-1, 1, 2))
- possible_targets_array.reshape((1, -1, 2))
)
/ self.grid_size
).round()
).sum(2)
sufficiently_distant = (
manhat_dist_mat >= self.min_dist_between_agents_to_pickup
)
g = nx.Graph(sufficiently_distant)
good_cliques = []
for i, clique in enumerate(find_cliques(g)):
if i > 1000 or len(good_cliques) > 40:
break
if len(clique) == env.num_agents:
good_cliques.append(clique)
elif len(clique) > env.num_agents:
good_cliques.extend(
itertools.combinations(clique, env.num_agents)
)
good_cliques = [
[possible_targets[i] for i in gc] for gc in good_cliques
]
else:
assert False, "Old code."
if len(possible_targets) < env.num_agents:
raise Exception(
(
"Using data from episode queue (scene {} and replication {}) "
"but there seem to be no good final positions for the agents?"
).format(scene, self.episode_init_data["replication"])
)
scene_successfully_setup = True
else:
scene = random.choice(self.scenes)
env.reset(scene)
scene_successfully_setup = False
failure_reasons = []
for _ in range(10):
env.step(
{
"action": "DisableAllObjectsOfType",
"objectId": self.object_type,
"agentId": 0,
}
)
for agent_id in range(self.num_agents):
env.randomize_agent_location(
agent_id=agent_id,
seed=agent_location_seed[agent_id]
if agent_location_seed
else None,
partial_position={"horizon": 30},
only_initially_reachable=True,
)
env.step(
{
"action": "RandomlyCreateAndPlaceObjectOnFloor",
"objectType": self.object_type,
"agentId": 0,
}
)
if (
env.last_event.metadata["lastAction"]
!= "RandomlyCreateAndPlaceObjectOnFloor"
or not env.last_event.metadata["lastActionSuccess"]
):
failure_reasons.append(
"Could not randomize location of {}.".format(
self.object_type, scene
)
)
continue
env.refresh_initially_reachable()
objects_of_type = env.all_objects_with_properties(
{"objectType": self.object_type}, agent_id=0
)
if len(objects_of_type) != 1:
print("len(objects_of_type): {}".format(len(objects_of_type)))
raise (Exception("len(objects_of_type) != 1"))
object = objects_of_type[0]
object_id = object["objectId"]
obj_rot = int(object["rotation"]["y"])
object_points_set = set(
(
round(object["position"]["x"] + t[0], 2),
round(object["position"]["z"] + t[1], 2),
)
for t in TELEVISION_ROTATION_TO_OCCUPATIONS[obj_rot]
)
for agent_id in range(self.num_agents):
env.randomize_agent_location(
agent_id=agent_id,
seed=agent_location_seed[agent_id]
if agent_location_seed
else None,
partial_position={"horizon": 30},
only_initially_reachable=True,
)
# Find agent metadata from where the target is visible
env.step(
{
"action": "GetPositionsObjectVisibleFrom",
"objectId": object_id,
"agentId": 0,
}
)
possible_targets = env.last_event.metadata["actionVector3sReturn"]
distances = []
for i, r in enumerate(env.last_event.metadata["actionFloatsReturn"]):
possible_targets[i]["rotation"] = int(r)
possible_targets[i]["horizon"] = 30
distances.append(
env.position_dist(object["position"], possible_targets[i])
)
possible_targets = [
env.get_key(x[1])
for x in sorted(
list(zip(distances, possible_targets)), key=lambda x: x[0]
)
]
if self.min_dist_between_agents_to_pickup != 0:
possible_targets_array = np.array([t[:2] for t in possible_targets])
manhat_dist_mat = np.abs(
(
(
possible_targets_array.reshape((-1, 1, 2))
- possible_targets_array.reshape((1, -1, 2))
)
/ self.grid_size
).round()
).sum(2)
sufficiently_distant = (
manhat_dist_mat >= self.min_dist_between_agents_to_pickup
)
g = nx.Graph(sufficiently_distant)
good_cliques = []
for i, clique in enumerate(find_cliques(g)):
if i > 1000 or len(good_cliques) > 40:
break
if len(clique) == env.num_agents:
good_cliques.append(clique)
elif len(clique) > env.num_agents:
good_cliques.extend(
itertools.combinations(clique, env.num_agents)
)
if len(good_cliques) == 0:
failure_reasons.append(
"Failed to find a tuple of {} targets all {} steps apart.".format(
env.num_agents, self.min_dist_between_agents_to_pickup
)
)
continue
good_cliques = [
[possible_targets[i] for i in gc] for gc in good_cliques
]
else:
assert False, "Old code."
(
min_considered,
max_considered,
) = self.min_max_visible_positions_to_consider
offset0 = min(
max(len(possible_targets) - min_considered, 0),
self.max_visible_positions_push,
)
# offset1 = min(
# max(len(possible_targets) - max_considered, 0),
# self.max_visible_positions_push,
# )
min_slice = slice(offset0, offset0 + min_considered)
# max_slice = slice(offset1, offset1 + max_considered)
possible_targets = possible_targets[min_slice]
good_cliques = list(
itertools.combinations(possible_targets, self.num_agents)
)
if len(possible_targets) < env.num_agents:
failure_reasons.append(
"The number of positions from which the object was visible was less than the number of agents."
)
continue
scene_successfully_setup = True
break
if not scene_successfully_setup:
warnings.warn(
(
"Failed to randomly initialize objects and agents in scene {} 10 times"
+ "for the following reasons:"
+ "\n\t* ".join(failure_reasons)
+ "\nTrying a new scene."
).format(env.scene_name)
)
yield from self(agent, env=env)
return
# Task data includes the target object id and one agent state/metadata to navigate to.
good_cliques = {tuple(sorted(gc)) for gc in good_cliques}
task_data = {"goal_obj_id": object_id, "target_key_groups": good_cliques}
agent.episode = self.episode_class(
env,
task_data,
max_steps=self.max_episode_length,
num_agents=self.num_agents,
min_dist_between_agents_to_pickup=self.min_dist_between_agents_to_pickup,
object_points_set=object_points_set,
include_depth_frame=self.include_depth_frame,
)
yield True
if self.save_talk_reply_probs_path is not None:
if torch.cuda.is_available():
save_talk_reply_data_frame(
agent,
self.save_talk_reply_probs_path,
None,
use_hash=True, # self._internal_episodes,
)
else:
save_talk_reply_data_frame(
agent, self.save_talk_reply_probs_path, self._internal_episodes
)
class FurnLiftForViewsEpisodeSampler(object):
def __init__(
self,
scene: str,
num_agents: int,
object_type: str,
agent_start_locations: List,
object_start_location: Dict,
env_args=None,
max_episode_length: int = 500,
episode_class: Callable = JointNavigationEpisode,
player_screen_height=224,
player_screen_width=224,
save_talk_reply_probs_path: Optional[str] = None,
min_dist_between_agents_to_pickup: int = 0,
visible_agents: bool = True,
):
self.scene = scene
self.agent_start_locations = agent_start_locations
self.object_start_location = object_start_location
self.visible_agents = visible_agents
self.min_dist_between_agents_to_pickup = min_dist_between_agents_to_pickup
self.save_talk_reply_probs_path = save_talk_reply_probs_path
self.player_screen_height = player_screen_height
self.player_screen_width = player_screen_width
self.episode_class = episode_class
self.max_episode_length = max_episode_length
self.env_args = env_args
self.num_agents = num_agents
self.object_type = object_type
self.grid_size = 0.25
self._current_train_episode = 0
self._internal_episodes = 0
@property
def current_train_episode(self):
return self._current_train_episode
@current_train_episode.setter
def current_train_episode(self, value):
self._current_train_episode = value
def __call__(
self,
agent: MultiAgent,
agent_location_seed=None,
env: Optional[AI2ThorEnvironmentWithGraph] = None,
) -> None:
self._internal_episodes += 1
agent.take_expert_action_prob = 0
if env is None:
if agent.environment is not None:
env = agent.environment
else:
env = _create_environment(
num_agents=self.num_agents,
env_args=self.env_args,
visible_agents=self.visible_agents,
render_depth_image=False,
)
env.start(
self.scene,
move_mag=self.grid_size,
quality="Very Low",
player_screen_height=self.player_screen_height,
player_screen_width=self.player_screen_width,
)
env.reset(self.scene)
env.step(
{
"action": "DisableAllObjectsOfType",
"objectId": self.object_type,
"agentId": 0,
}
)
object_pos = {
"position": {k: self.object_start_location[k] for k in ["x", "y", "z"]},
"rotation": {"x": 0, "y": self.object_start_location["rotation"], "z": 0},
}
env.teleport_agent_to(
**{
**self.agent_start_locations[0],
"rotation": self.agent_start_locations[0]["rotation"]["y"],
},
force_action=True,
agent_id=0,
)
assert at_same_location(
env.last_event.metadata["agent"], self.agent_start_locations[0]
)
env.teleport_agent_to(
**{
**self.agent_start_locations[1],
"rotation": self.agent_start_locations[1]["rotation"]["y"],
},
force_action=True,
agent_id=1,
)
assert at_same_location(
env.last_event.metadata["agent"], self.agent_start_locations[1]
)
env.step(
{
"action": "CreateObjectAtLocation",
"objectType": self.object_type,
"agentId": 0,
**object_pos,
}
)
assert env.last_event.metadata["lastActionSuccess"]
env.refresh_initially_reachable()
objects_of_type = env.all_objects_with_properties(
{"objectType": self.object_type}, agent_id=0
)
if len(objects_of_type) != 1:
print("len(objects_of_type): {}".format(len(objects_of_type)))
raise (Exception("len(objects_of_type) != 1"))
object = objects_of_type[0]
assert at_same_location(object, object_pos, ignore_camera=True)
object_id = object["objectId"]
obj_rot = int(object["rotation"]["y"])
object_points_set = set(
(
round(object["position"]["x"] + t[0], 2),
round(object["position"]["z"] + t[1], 2),
)
for t in TELEVISION_ROTATION_TO_OCCUPATIONS[obj_rot]
)
# Find agent metadata from where the target is visible
env.step(
{
"action": "GetPositionsObjectVisibleFrom",
"objectId": object_id,
"agentId": 0,
}
)
possible_targets = env.last_event.metadata["actionVector3sReturn"]
distances = []
for i, r in enumerate(env.last_event.metadata["actionFloatsReturn"]):
possible_targets[i]["rotation"] = int(r)
possible_targets[i]["horizon"] = 30
distances.append(env.position_dist(object["position"], possible_targets[i]))
possible_targets = [
x[1]
for x in sorted(list(zip(distances, possible_targets)), key=lambda x: x[0])
]
possible_targets = possible_targets[:24]
if len(possible_targets) < env.num_agents:
raise Exception(
"The number of positions from which the object was visible was less than the number of agents."
)
# Task data includes the target object id and one agent state/metadata to naviagate to.
task_data = {"goal_obj_id": object_id, "targets": possible_targets}
agent.episode = self.episode_class(
env,
task_data,
max_steps=self.max_episode_length,
num_agents=self.num_agents,
min_dist_between_agents_to_pickup=self.min_dist_between_agents_to_pickup,
object_points_set=object_points_set,
)
yield True
if self.save_talk_reply_probs_path is not None:
if torch.cuda.is_available():
save_talk_reply_data_frame(
agent,
self.save_talk_reply_probs_path,
None,
use_hash=True, # self._internal_episodes,
)
else:
save_talk_reply_data_frame(
agent, self.save_talk_reply_probs_path, self._internal_episodes
)
| cordial-sync-master | rl_multi_agent/furnlift_episode_samplers.py |
import random
from typing import Tuple, Dict, Union, Optional, List, Any, Sequence
import numpy as np
import torch
import torch.nn.functional as F
from torch import nn
from rl_base import Episode
from rl_base.agent import EnvType, RLAgent, A3CAgent
from utils import net_util
from utils.misc_util import (
log_sum_exp,
outer_product,
joint_probability_tensor_from_mixture,
joint_log_probability_tensor_from_mixture,
outer_sum,
)
class MultiAgent(RLAgent):
def __init__(
self,
model: Optional[nn.Module] = None,
model_expert: Optional[nn.Module] = None,
episode: Optional[Episode[EnvType]] = None,
expert_class_available_actions: Optional[Sequence[str]] = None,
gpu_id: int = -1,
include_test_eval_results: bool = False,
record_all_in_test: bool = False,
include_depth_frame: bool = False,
huber_delta: Optional[float] = None,
discourage_failed_coordination: Union[bool, int, float] = False,
use_smooth_ce_loss_for_expert: bool = False,
resize_image_as: Optional[int] = None,
dagger_mode: Optional[bool] = None,
coordination_use_marginal_entropy: bool = False,
**kwargs,
) -> None:
super().__init__(model=model, episode=episode, gpu_id=gpu_id, **kwargs)
self.last_reward_per_agent: Optional[Tuple[float, ...]] = None
self.include_test_eval_results = include_test_eval_results or record_all_in_test
self.record_all_in_test = record_all_in_test
self.include_depth_frame = include_depth_frame
self.huber_delta = huber_delta
self.discourage_failed_coordination = discourage_failed_coordination
self.resize_image_as = resize_image_as
self.log_prob_of_actions: List[Tuple[torch.FloatTensor]] = []
self.entropy_per_agent: List[Optional[Tuple[torch.FloatTensor, ...]]] = []
self.rewards_per_agent: List[Tuple[float, ...]] = []
self.values_per_agent: List[Optional[Tuple[torch.FloatTensor, ...]]] = []
self.actions: List[int] = []
self.step_results: List[Dict[str, Any]] = []
self.eval_results: List[Dict[str, Any]] = []
self.log_prob_of_expert_action = []
self.log_prob_of_unsafe_action = []
self.expert_actions = []
self.took_expert_action = []
# For model based expert
self._model_expert: Optional[nn.Module] = None
if model_expert is not None:
self.model_expert = model_expert
self.hidden_expert: Optional[Any] = None
self.expert_class_available_actions = expert_class_available_actions
self.use_smooth_ce_loss_for_expert = use_smooth_ce_loss_for_expert
# For rule based expert:
assert not (dagger_mode and model_expert)
self.dagger_mode = dagger_mode
self.coordination_use_marginal_entropy = coordination_use_marginal_entropy
self.take_expert_action_prob = (
0
if "take_expert_action_prob" not in kwargs
else kwargs["take_expert_action_prob"]
)
self._unsafe_action_ind = kwargs.get("unsafe_action_ind")
self._safety_loss_lambda = (
1.0 if "safety_loss_lambda" not in kwargs else kwargs["safety_loss_lambda"]
)
self._use_a3c_loss = (
False
if "use_a3c_loss_when_not_expert_forcing" not in kwargs
else kwargs["use_a3c_loss_when_not_expert_forcing"]
)
self.a3c_gamma = 0.99
self.a3c_tau = 1.0
self.a3c_beta = 1e-2
@property
def model_expert(self) -> nn.Module:
assert self._model_expert is not None
return self._model_expert
@model_expert.setter
def model_expert(self, model_expert_to_set: nn.Module):
if self.gpu_id >= 0:
with torch.cuda.device(self.gpu_id):
self._model_expert = model_expert_to_set.cuda()
else:
self._model_expert = model_expert_to_set
self._model_expert.eval()
def eval_at_state(
self,
state: Dict[str, Union[Tuple, torch.FloatTensor, torch.cuda.FloatTensor]],
hidden: Optional[Tuple[Tuple[torch.FloatTensor, ...]]],
**kwargs,
) -> Dict[str, Union[torch.FloatTensor, Tuple[torch.FloatTensor, ...]]]:
assert type(state) == dict
extra = {}
if "object_ind" in state:
extra["object_ind"] = self.preprocess_action(state["object_ind"])
if "extra_embedding" in state:
extra["extra_embedding"] = state["extra_embedding"]
return self.model(
inputs=torch.cat(tuple(s.unsqueeze(0) for s in state["frames"]), dim=0),
hidden=hidden,
agent_rotations=state["agent_rotations"],
**extra,
)
def eval_at_state_expert(
self,
state: Dict[str, Union[Tuple, torch.FloatTensor, torch.cuda.FloatTensor]],
hidden: Optional[Tuple[Tuple[torch.FloatTensor, ...]]],
**kwargs,
) -> Dict[str, Union[torch.FloatTensor, Tuple[torch.FloatTensor, ...]]]:
assert type(state) == dict
extra = {}
if "object_ind" in state:
extra["object_ind"] = self.preprocess_action(state["object_ind"])
if "extra_embedding" in state:
extra["extra_embedding"] = state["extra_embedding"]
return net_util.recursively_detach(
self.model_expert(
inputs=torch.cat(tuple(s.unsqueeze(0) for s in state["frames"]), dim=0),
hidden=hidden,
agent_rotations=state["agent_rotations"],
**extra,
)
)
def _get_state(self, expert: bool):
if not expert:
states_for_agents = self.episode.states_for_agents()
else:
states_for_agents = self.episode.expert_states_for_agents()
if self.include_depth_frame:
assert (
self.resize_image_as is not None
and states_for_agents[0]["frame"].shape[-1] == 3
)
frames = tuple(
torch.cat(
(
self.preprocess_frame(s["frame"], self.resize_image_as),
self.preprocess_depth_frame(
s["depth_frame"], self.resize_image_as
),
),
dim=0,
)
for s in states_for_agents
)
elif self.resize_image_as is not None:
assert states_for_agents[0]["frame"].shape[-1] == 3
frames = tuple(
self.preprocess_frame(s["frame"], self.resize_image_as)
for s in states_for_agents
)
else:
assert self.resize_image_as is None
frames = tuple(
self.preprocess_long_tensor_frame(s["frame"]) for s in states_for_agents
)
extra = {}
if "object_ind" in states_for_agents[0]:
extra["object_ind"] = states_for_agents[0]["object_ind"]
state = {
"frames": frames,
"agent_rotations": [
90
* int(self.environment.get_agent_location(agent_id=i)["rotation"] / 90)
for i in range(self.environment.num_agents)
],
**extra,
}
if "would_coordinated_action_succeed" in states_for_agents[0]:
state["extra_embedding"] = self.gpuify(
torch.FloatTensor(
states_for_agents[0]["would_coordinated_action_succeed"]
)
.view(1, -1)
.repeat(self.environment.num_agents, 1)
)
return state
@property
def state(
self,
) -> Dict[str, Union[Tuple, torch.FloatTensor, torch.cuda.FloatTensor]]:
return self._get_state(expert=False)
@property
def state_expert(
self,
) -> Dict[str, Union[Tuple, torch.FloatTensor, torch.cuda.FloatTensor]]:
return self._get_state(expert=True)
def next_expert_agent_action(self) -> Dict[str, Tuple[Optional[int], ...]]:
# Currently not supporting model expert
assert hasattr(self.episode, "next_expert_action")
return {"expert_actions": self.episode.next_expert_action()}
def act(
self,
train: bool = True,
overriding_actions: Optional[Tuple[int, ...]] = None,
**kwargs,
) -> Dict:
if self.dagger_mode and overriding_actions:
# Dagger mode and overriding actions
raise Exception(
"Overriding action and dagger flags, both set to true, check!"
)
if not self.model.training and train:
self.model.train()
if self.model.training and not train:
self.model.eval()
assert self.episode is not None
assert not self.episode.is_complete()
if not train and self.hidden is not None:
self.repackage_hidden()
eval_result = self.eval_at_current_state()
eval_result["coordination_type_tensor"] = self.gpuify(
torch.LongTensor(
np.array(self.episode.coordination_type_tensor(), dtype=int)
)
)
m = eval_result["coordination_type_tensor"].squeeze()
m_individual = (m == 0).float()
m_coord = (m > 0).float()
valid_action_tensor = m_individual + m_coord
num_agents = self.environment.num_agents
num_actions = len(self.episode.available_actions)
coordinated_actions = False
coordination_ind = None
if "logit_all" in eval_result:
# Either marginal or central agent
logit_per_agent = tuple(
eval_result["logit_all"][i].unsqueeze(0)
for i in range(len(eval_result["logit_all"]))
)
probs_per_agent = tuple(
F.softmax(logit, dim=1) for logit in logit_per_agent
)
log_probs_per_agent = tuple(
F.log_softmax(logit, dim=1) for logit in logit_per_agent
)
joint_logit_all = eval_result.get("joint_logit_all")
if joint_logit_all is not None and joint_logit_all:
# Central agent
assert len(probs_per_agent) == 1
joint_prob_tensor = probs_per_agent[0].view(
*(num_agents * [num_actions])
)
else:
joint_prob_tensor = outer_product(probs_per_agent)
eval_result["log_probs_per_agent"] = log_probs_per_agent
elif "coordinated_logits" in eval_result:
# Mixture
coordinated_actions = True
randomization_logits = eval_result["randomization_logits"]
coordinated_logits = eval_result["coordinated_logits"].view(
num_agents, -1, num_actions
)
coordinated_probs = F.softmax(coordinated_logits, dim=2)
randomization_probs = F.softmax(randomization_logits, dim=1)
coordinated_log_probs = F.log_softmax(coordinated_logits, dim=2)
randomization_log_probs = F.log_softmax(randomization_logits, dim=1)
eval_result["coordinated_log_probs"] = coordinated_log_probs
eval_result["randomization_log_probs"] = randomization_log_probs
coordination_ind = randomization_probs.multinomial(num_samples=1).item()
probs_per_agent = tuple(
coordinated_probs[agent_id, :, :].unsqueeze(0)
* randomization_probs.view(1, -1, 1)
for agent_id in range(num_agents)
)
log_probs_per_agent = tuple(
coordinated_log_probs[agent_id, :, :].unsqueeze(0)
+ randomization_log_probs.view(1, -1, 1)
for agent_id in range(num_agents)
)
joint_prob_tensor = joint_probability_tensor_from_mixture(
mixture_weights=randomization_probs.view(-1),
marginal_prob_matrices=coordinated_probs,
)
else:
raise NotImplementedError()
low_rank_approx = outer_product(
tuple(
joint_prob_tensor.sum(dim=[j for j in range(num_agents) if j != i])
for i in range(num_agents)
)
)
eval_result["additional_metrics"] = {}
eval_result["additional_metrics"]["dist_to_low_rank"] = torch.dist(
joint_prob_tensor, low_rank_approx, p=1
).item()
eval_result["additional_metrics"]["invalid_prob_mass"] = (
torch.mul(joint_prob_tensor, 1 - valid_action_tensor).sum().item()
)
self.hidden = eval_result.get("hidden_all")
joint_logit_all = eval_result.get("joint_logit_all")
# If expert action needs to be taken or not
if self.dagger_mode:
take_expert_action = (
train
and self.take_expert_action_prob != 0
and random.random() < self.take_expert_action_prob
)
else:
take_expert_action = False
if not take_expert_action:
# Operations when not acting as per expert actions
if self._use_a3c_loss:
if self.coordination_use_marginal_entropy and coordinated_actions:
assert len(log_probs_per_agent[0].shape) == 3
self.entropy_per_agent.append(
tuple(
-(log_sum_exp(log_probs, 1) * probs.sum(1))
.sum()
.unsqueeze(0)
for log_probs, probs in zip(
log_probs_per_agent, probs_per_agent
)
)
)
else:
self.entropy_per_agent.append(
tuple(
-(log_probs * probs).sum().unsqueeze(0)
for log_probs, probs in zip(
log_probs_per_agent, probs_per_agent
)
)
)
# Compute and update the expert actions
# Ensures length of expert actions is always less than (i.e. with None's) or equal to agent action-steps
expert_actions_dict = self.next_expert_agent_action()
self.expert_actions.append(expert_actions_dict["expert_actions"])
# deciding which action to take:
# The flow currently expects "actions":
# joint: is tuple of len 1 (multi index)
# mixture: is tuple of len nagents
# marginal: is tuple of len nagents
if take_expert_action:
assert not overriding_actions
# take actions as per the expert.
if joint_logit_all:
# tuple of length 1
actions = tuple(
[
int(
np.ravel_multi_index(
self.expert_actions[-1], (num_actions,) * num_agents
)
)
]
)
else:
actions = tuple(self.expert_actions[-1])
elif overriding_actions:
# take actions as per the specified overriding actions.
actions = tuple(overriding_actions)
else:
# take actions as per the agent policy
if coordinated_actions:
actions = tuple(
(
probs[:, coordination_ind, :]
/ probs[:, coordination_ind, :].sum()
)
.multinomial(num_samples=1)
.item()
for probs in probs_per_agent
)
else:
actions = tuple(
probs.multinomial(num_samples=1).item() for probs in probs_per_agent
)
self.took_expert_action.append(take_expert_action)
if coordinated_actions:
log_prob_of_action_per_agent = tuple(
log_probs[:, coordination_ind, :].view(num_actions)[action]
for log_probs, action in zip(log_probs_per_agent, actions)
)
else:
log_prob_of_action_per_agent = tuple(
log_probs.view(-1)[action]
for log_probs, action in zip(log_probs_per_agent, actions)
)
log_prob_of_expert_action_per_agent = None
if self.expert_actions[-1] is not None and all(
x is not None for x in self.expert_actions[-1]
):
if self.use_smooth_ce_loss_for_expert:
raise NotImplementedError(
"Expert actions coming from an expert model, currently not supported"
)
elif coordinated_actions:
assert "permute_agent_probs" not in expert_actions_dict
log_probs_tensor = joint_log_probability_tensor_from_mixture(
log_mixture_weights=randomization_log_probs.view(-1),
marginal_log_prob_matrices=coordinated_log_probs,
)
ce = log_probs_tensor
for expert_action in self.expert_actions[-1]:
ce = ce[expert_action]
assert ce.dim() == 0
log_prob_of_expert_action_per_agent = (
ce / self.environment.num_agents,
) * self.environment.num_agents
else:
# Multiagent navigation task update
if joint_logit_all:
# Central
assert len(self.expert_actions[-1]) == num_agents
assert len(log_probs_per_agent) == 1
assert log_probs_per_agent[0].shape[1] == num_actions ** num_agents
log_prob_of_expert_action_per_agent = tuple(
[
log_probs_per_agent[0].view(num_actions ** num_agents)[
int(
np.ravel_multi_index(
self.expert_actions[-1],
(num_actions,) * num_agents,
)
)
]
]
)
else:
# Marginal
log_prob_of_expert_action_per_agent = tuple(
log_probs.view(num_actions)[action]
for log_probs, action in zip(
log_probs_per_agent, self.expert_actions[-1]
)
)
if train and (self._unsafe_action_ind is not None):
raise NotImplementedError()
before_locations = tuple(
self.environment.get_agent_location(agent_id=i)
for i in range(self.environment.num_agents)
)
# Making joint action single agent work with the current multi (marginal) agent setup
if joint_logit_all is not None and joint_logit_all:
assert len(probs_per_agent) == 1
assert probs_per_agent[0].shape[1] == num_actions ** num_agents
actions = tuple(
int(a)
for a in np.unravel_index(actions[0], (num_actions,) * num_agents)
)
step_result = self.episode.multi_step(actions)
after_locations = tuple(
self.environment.get_agent_location(agent_id=i)
for i in range(self.environment.num_agents)
)
rewards = tuple(s.get("reward") for s in step_result)
if any(r is None for r in rewards):
self.last_reward_per_agent = None
elif joint_logit_all is not None:
self.last_reward_per_agent = tuple(
sum(rewards) for _ in range(self.environment.num_agents)
)
else:
self.last_reward_per_agent = tuple(float(r) for r in rewards)
self.values_per_agent.append(eval_result.get("value_all"))
for i in range(len(step_result)):
step_result[i]["before_location"] = before_locations[i]
step_result[i]["after_location"] = after_locations[i]
eval_result["probs_per_agent"] = probs_per_agent
eval_result["log_probs_per_agent"] = log_probs_per_agent
eval_result["step_result"] = step_result
eval_result["actions"] = actions
eval_result["training"] = train
self.step_results.append(step_result)
self.actions.append(eval_result["actions"])
self.rewards_per_agent.append(self.last_reward_per_agent)
if train or self.include_test_eval_results or self.record_all_in_test:
self.eval_results.append(eval_result)
if train or self.record_all_in_test:
self.log_prob_of_actions.append(log_prob_of_action_per_agent)
self.log_prob_of_expert_action.append(log_prob_of_expert_action_per_agent)
return eval_result
def clear_history(self) -> None:
self.clear_graph_data()
self.last_reward_per_agent = None
self.rewards_per_agent = []
self.actions = []
self.expert_actions = []
self.step_results = []
self.took_expert_action = []
def clear_graph_data(self):
self.log_prob_of_actions = []
self.entropy_per_agent = []
self.values_per_agent = []
self.log_prob_of_expert_action = []
self.log_prob_of_unsafe_action = []
self.eval_results = []
def safety_loss(self, loss_dict):
assert self._unsafe_action_ind is not None
divisor = 0
numerator = 0
for expert_actions, actions, log_probs in zip(
self.expert_actions, self.actions, self.log_prob_of_unsafe_action
):
unsafe_but_expert_safe = [
expert_action != self._unsafe_action_ind
and action == self._unsafe_action_ind
for expert_action, action in zip(expert_actions, actions)
]
numerator += sum(
log_prob
for include, log_prob in zip(unsafe_but_expert_safe, log_probs)
if include
)
divisor += sum(unsafe_but_expert_safe)
if divisor != 0:
loss_dict["safety_loss"] = self._safety_loss_lambda * (numerator / divisor)
def failed_coordination_loss(self, loss_dict):
loss = 0.0
nactions = len(self.episode.available_actions)
for er in self.eval_results:
m = er["coordination_type_tensor"].squeeze()
m_individual = (m == 0).float()
m_coord = (m > 0).float()
if "coordinated_log_probs" in er:
joint_log_probs = joint_log_probability_tensor_from_mixture(
log_mixture_weights=er["randomization_log_probs"].view(-1),
marginal_log_prob_matrices=er["coordinated_log_probs"],
)
elif "joint_logit_all" in er and er["joint_logit_all"]:
assert len(er["logit_all"]) == 1
joint_log_probs = er["log_probs_per_agent"][0].view(
*[self.environment.num_agents * [nactions]]
)
else:
assert len(er["logit_all"]) > 1
joint_log_probs = outer_sum(
[log_probs.view(-1) for log_probs in er["log_probs_per_agent"]]
)
loss -= 0.5 * (
(joint_log_probs * m_individual).sum() / (m_individual.sum())
+ (joint_log_probs * m_coord).sum() / (m_coord.sum())
)
loss_dict["failed_coordination"] = loss / len(self.eval_results)
def loss(
self, train: bool = True, size_average: bool = True, **kwargs
) -> Union[Dict[str, torch.Tensor], None]:
loss_dict = {}
filtered_expert_log_prob_sums = [
sum(x) for x in self.log_prob_of_expert_action if x is not None
]
if len(filtered_expert_log_prob_sums) != 0:
loss_dict["xe_loss"] = -sum(filtered_expert_log_prob_sums) / len(
filtered_expert_log_prob_sums
)
if self._unsafe_action_ind is not None:
self.safety_loss(loss_dict)
if (
self._use_a3c_loss
and (not any(self.took_expert_action))
and all(r is not None for r in self.rewards_per_agent)
and all(e is not None for e in self.entropy_per_agent)
and all(v is not None for v in self.values_per_agent)
):
policy_loss = 0.0
value_loss = 0.0
entropy_loss = 0.0
eval = self.eval_at_current_state()
for agent_id in range(len(eval["value_all"])):
if self.episode.is_complete():
future_reward_est = 0.0
else:
future_reward_est = eval["value_all"][agent_id].item()
def extract(inputs: List[Tuple]):
return [input[agent_id] for input in inputs]
a3c_losses = A3CAgent.a3c_loss(
values=extract(self.values_per_agent),
rewards=extract(self.rewards_per_agent),
log_prob_of_actions=extract(self.log_prob_of_actions),
entropies=extract(self.entropy_per_agent),
future_reward_est=future_reward_est,
gamma=self.a3c_gamma,
tau=self.a3c_tau,
beta=self.a3c_beta,
gpu_id=self.gpu_id,
huber_delta=self.huber_delta,
)
policy_loss += a3c_losses["policy"]
value_loss += a3c_losses["value"]
entropy_loss += a3c_losses["entropy"]
policy_loss /= len(self.rewards_per_agent)
value_loss /= len(self.rewards_per_agent)
entropy_loss /= len(self.rewards_per_agent)
loss_dict["a3c_policy_loss"] = policy_loss
loss_dict["a3c_value_loss"] = value_loss
loss_dict["a3c_entropy_loss"] = entropy_loss
if self.discourage_failed_coordination:
if "a3c_entropy_loss" in loss_dict:
loss_dict["a3c_entropy_loss"] = net_util.recursively_detach(
loss_dict["a3c_entropy_loss"]
)
if type(self.discourage_failed_coordination) in [int, float]:
self.failed_coordination_loss(loss_dict)
alpha = max(
self.a3c_beta,
(self.discourage_failed_coordination - self.global_episode_count)
/ self.discourage_failed_coordination,
)
loss_dict["failed_coordination"] *= alpha
else:
assert type(self.discourage_failed_coordination) is bool
self.failed_coordination_loss(loss_dict)
return loss_dict
| cordial-sync-master | rl_multi_agent/agents.py |
from __future__ import division
import random
import sys
import time
import traceback
import warnings
from rl_multi_agent.furnmove_utils import save_agents_path_without_frame_png
from rl_multi_agent.multi_agent_utils import (
compute_losses_no_backprop,
EndProcessException,
)
warnings.simplefilter("always", UserWarning)
from setproctitle import setproctitle as ptitle
from typing import Optional
import networkx
import numpy as np
import torch
import torch.multiprocessing as mp
import torch.nn as nn
import utils.debug_util as debug_util
import matplotlib as mpl
mpl.use("Agg")
from rl_multi_agent import MultiAgent
from rl_multi_agent.experiments.experiment import ExperimentConfig
from utils.net_util import recursively_detach
np.set_printoptions(linewidth=1000)
def test(
args,
shared_model: nn.Module,
experiment: ExperimentConfig,
res_queue: mp.Queue,
end_flag: mp.Value,
worker_num: Optional[int] = None,
num_test_workers: Optional[int] = None,
) -> None:
if worker_num is not None:
ptitle("Test Agent {}".format(worker_num))
if args.test_gpu_ids is not None:
gpu_id = args.test_gpu_ids[worker_num % len(args.test_gpu_ids)]
else:
gpu_id = args.gpu_ids[worker_num % len(args.gpu_ids)]
else:
ptitle("Test Agent")
if args.test_gpu_ids is not None:
gpu_id = args.test_gpu_ids[-1]
else:
gpu_id = args.gpu_ids[-1]
torch.manual_seed(args.seed)
np.random.seed(args.seed)
random.seed(args.seed)
if gpu_id >= 0:
torch.cuda.manual_seed(args.seed)
if "set_worker_num" in dir(experiment.init_test_agent):
experiment.init_test_agent.set_worker_num(
worker_num=worker_num, total_workers=num_test_workers
)
model: nn.Module = experiment.create_model()
if gpu_id >= 0:
with torch.cuda.device(gpu_id):
model.load_state_dict(shared_model.state_dict())
else:
model.load_state_dict(shared_model.state_dict())
agent: MultiAgent = experiment.create_agent(model=model, gpu_id=gpu_id)
res_sampler = debug_util.ReservoirSampler(3)
k = 0
while not end_flag.value:
try:
last_episode_data = None
agent.sync_with_shared(shared_model)
while True:
agent_iterator = experiment.init_test_agent(agent=agent)
try:
next(agent_iterator)
break
except StopIteration as _:
warnings.warn("Agent iterator was empty.")
while not agent.episode.is_complete():
action_result = agent.act(train=False)
if len(agent.eval_results) != 0:
agent.eval_results[-1] = recursively_detach(agent.eval_results[-1])
if end_flag.value:
raise EndProcessException()
last_episode_data = {
"iter": agent.episode.num_steps_taken_in_episode(),
"probs_per_agent": tuple(
probs.detach().cpu()[0, :].numpy()
for probs in action_result["probs_per_agent"]
),
"actions": action_result["actions"],
"action_success": [
action_result["step_result"][i]["action_success"]
for i in range(len(action_result["step_result"]))
],
"reward": [
action_result["step_result"][i]["reward"]
for i in range(len(action_result["step_result"]))
],
}
if not agent.episode.is_complete():
res_sampler.add(last_episode_data)
k += 1
if args.visualize_test_agent:
save_agents_path_without_frame_png(agent, k=k, args=args)
try:
next(agent_iterator)
raise Exception("Agent iterator should only yield once.")
except StopIteration as e:
pass
# Saving losses:
if agent.record_all_in_test:
last_losses = compute_losses_no_backprop(agent=agent)
else:
last_losses = None
info = agent.episode.info()
if last_losses is not None:
info = {**info, **last_losses}
if (
len(agent.eval_results) > 0
and "additional_metrics" in agent.eval_results[0]
):
for a_metric in agent.eval_results[0]["additional_metrics"]:
info[a_metric] = (
1.0
/ len(agent.eval_results)
* sum(
er["additional_metrics"][a_metric]
for er in agent.eval_results
)
)
res_queue.put({k: info[k] for k in info})
if args.verbose and last_episode_data is not None:
res_sample = res_sampler.get_sample()
print(
"\nRandom sample of testing episode in scene {}:".format(
agent.environment.scene_name
)
)
num_agents = agent.environment.num_agents
using_central_agent = num_agents > len(
last_episode_data["probs_per_agent"]
)
for i in range(num_agents if not using_central_agent else 1):
verbose_string = "\nAGENT {}".format(i)
for x in sorted(
res_sample + [last_episode_data], key=lambda y: y["iter"]
):
# TODO: Currently marginalizing to only show the first two agents.
# As things become quickly messy. Is there a nice way to visualize
# the probabilities for >2 agents?
probs_str = np.array2string(
x["probs_per_agent"][i]
if not using_central_agent
else np.reshape(
x["probs_per_agent"][i],
[
round(
len(x["probs_per_agent"][i]) ** (1 / num_agents)
)
]
* 2
+ [-1],
).sum(-1),
precision=2,
floatmode="fixed",
suppress_small=True,
)
if (
len(x["probs_per_agent"][i].squeeze().shape) == 1
and not using_central_agent
):
probs_split = probs_str.split(" ")
if len(probs_split) > 10:
probs_split = (
probs_split[0:5]
+ ["(..{}..)".format(len(probs_split) - 10)]
+ probs_split[-5:]
)
probs_str = " ".join(probs_split)
else:
probs_str = "\n" + probs_str + "\n"
verbose_string += "\n\tIter {0:>3}: probs {1}, action {2}, success: {3}, reward: {4}".format(
x["iter"],
probs_str,
x["actions"][i],
x["action_success"][i],
x["reward"][i],
)
print(verbose_string)
print(info)
res_sampler = debug_util.ReservoirSampler(3)
except EndProcessException as _:
print(
"End process signal received in test worker. Quitting...", flush=True,
)
sys.exit()
except (RuntimeError, ValueError, networkx.exception.NetworkXNoPath) as e:
print(
"RuntimeError, ValueError, or networkx exception when training, attempting to print traceback, print metadata, and reset."
)
print("-" * 60)
traceback.print_exc(file=sys.stdout)
print("-" * 60)
try:
print(agent.environment.last_event.metadata)
except NameError:
print("Unable to print environment metadata.")
print("-" * 60)
agent.clear_history()
agent.repackage_hidden()
time.sleep(args.val_timeout)
| cordial-sync-master | rl_multi_agent/test_process_runner.py |
import warnings
from typing import Dict
import torch
from torch import nn, optim as optim
from rl_multi_agent import MultiAgent
from utils.misc_util import ensure_shared_grads
def compute_losses_no_backprop(agent: MultiAgent):
full_loss = None
last_losses = {}
for k, loss in agent.loss().items():
loss = loss.squeeze()
last_losses["loss/" + k] = loss.item()
if full_loss is None:
full_loss = loss
elif (full_loss.is_cuda == loss.is_cuda) and (
not full_loss.is_cuda or full_loss.get_device() == loss.get_device()
):
full_loss += loss
else:
warnings.warn("Loss {} is on a different device!".format(k))
assert full_loss is not None
return last_losses
class TrainingCompleteException(Exception):
pass
class EndProcessException(Exception):
pass
def compute_losses_and_backprop(
agent: MultiAgent,
shared_model: nn.Module,
optimizer: optim.Optimizer,
update_lock,
gpu: bool,
retain_graph: bool = False,
skip_backprop: bool = False,
) -> Dict[str, float]:
agent.model.zero_grad()
full_loss = None
last_losses = {}
for k, loss in agent.loss().items():
loss = loss.squeeze()
last_losses["loss/" + k] = loss.item()
if full_loss is None:
full_loss = loss
elif (full_loss.is_cuda == loss.is_cuda) and (
not full_loss.is_cuda or full_loss.get_device() == loss.get_device()
):
full_loss += loss
else:
warnings.warn("Loss {} is on a different device!".format(k))
if full_loss is not None:
if not skip_backprop:
full_loss.backward(retain_graph=retain_graph)
torch.nn.utils.clip_grad_norm_(agent.model.parameters(), 3, "inf")
if update_lock is not None:
update_lock.acquire()
ensure_shared_grads(agent.model, shared_model, gpu=gpu)
optimizer.step()
if update_lock is not None:
update_lock.release()
else:
warnings.warn(
(
"No loss avaliable for agent.\n"
"Episode length: {}\n"
"Rewards: {}\n"
"Actions: {}\n"
"Expert actions: {}\n"
).format(
agent.episode.num_steps_taken_in_episode(),
agent.rewards_per_agent,
agent.actions,
agent.expert_actions,
)
)
return last_losses
| cordial-sync-master | rl_multi_agent/multi_agent_utils.py |
import itertools
import re
import warnings
from collections import defaultdict
from enum import Enum
from typing import Sequence, Tuple, Callable, Dict, Any, Optional, List, Union
import frozendict
import numpy as np
import scipy.spatial
import constants
from rl_ai2thor.ai2thor_environment import AI2ThorEnvironment
from rl_ai2thor.ai2thor_episodes import MultiAgentAI2ThorEpisode
from rl_ai2thor.ai2thor_gridworld_environment import AI2ThorLiftedObjectGridEnvironment
from utils.misc_util import all_equal
STEP_PENALTY = -0.01
FAILED_ACTION_PENALTY = -0.02
JOINT_MOVE_WITH_OBJECT_KEYWORD = "WithObject"
JOINT_MOVE_OBJECT_KEYWORD = "MoveLifted"
JOINT_ROTATE_KEYWORD = "RotateLifted"
EXPLORATION_BONUS = 0.5
JOINT_PASS_PENALTY = -0.1
CARD_DIR_STRS = ["North", "East", "South", "West"]
EGO_DIR_STRS = ["Ahead", "Right", "Back", "Left"]
ROTATE_OBJECT_ACTIONS = ["RotateLiftedObjectLeft", "RotateLiftedObjectRight"]
def allocentric_action_groups(include_move_obj_actions):
actions = (
"MoveNorth",
"MoveEast",
"MoveSouth",
"MoveWest",
"Pass",
"MoveAgentsNorthWithObject",
"MoveAgentsEastWithObject",
"MoveAgentsSouthWithObject",
"MoveAgentsWestWithObject",
"RotateLiftedObjectRight",
)
if include_move_obj_actions:
actions = actions + (
"MoveLiftedObjectNorth",
"MoveLiftedObjectEast",
"MoveLiftedObjectSouth",
"MoveLiftedObjectWest",
)
return (actions,)
def semiallocentric_action_groups(include_move_obj_actions):
actions = (
"MoveAhead",
"RotateLeft",
"RotateRight",
"Pass",
"MoveAgentsNorthWithObject",
"MoveAgentsEastWithObject",
"MoveAgentsSouthWithObject",
"MoveAgentsWestWithObject",
"RotateLiftedObjectRight",
)
if include_move_obj_actions:
actions = actions + (
"MoveLiftedObjectNorth",
"MoveLiftedObjectEast",
"MoveLiftedObjectSouth",
"MoveLiftedObjectWest",
)
return (actions,)
def egocentric_action_groups(include_move_obj_actions):
actions = (
"MoveAhead",
"RotateLeft",
"RotateRight",
"Pass",
"MoveAgentsAheadWithObject",
"MoveAgentsRightWithObject",
"MoveAgentsBackWithObject",
"MoveAgentsLeftWithObject",
"RotateLiftedObjectRight",
)
if include_move_obj_actions:
actions = actions + (
"MoveLiftedObjectAhead",
"MoveLiftedObjectRight",
"MoveLiftedObjectBack",
"MoveLiftedObjectLeft",
)
return (actions,)
def egocentric_no_rotate_action_groups(include_move_obj_actions):
actions = (
"MoveAhead",
"MoveLeft",
"MoveRight",
"MoveBack",
"Pass",
"MoveAgentsAheadWithObject",
"MoveAgentsRightWithObject",
"MoveAgentsBackWithObject",
"MoveAgentsLeftWithObject",
"RotateLiftedObjectRight",
)
if include_move_obj_actions:
actions = actions + (
"MoveLiftedObjectAhead",
"MoveLiftedObjectRight",
"MoveLiftedObjectBack",
"MoveLiftedObjectLeft",
)
return (actions,)
class CoordType(Enum):
# DO NOT CHANGE THIS WITHOUT CHANGING coordination_type_tensor to match
# INDIVIDUAL should be the smallest value and equal 0
INDIVIDUAL = 0
ROTATE_LIFTED = 1
MOVE_LIFTED_CARD = 2
MOVE_LIFTED_EGO = 3
MOVE_WITH_LIFTED_CARD = 4
MOVE_WITH_LIFTED_EGO = 5
PICKUP = 6
ACTION_TO_COORD_TYPE = frozendict.frozendict(
{
"Pass": CoordType.INDIVIDUAL,
#
**{
"Move{}".format(dir): CoordType.INDIVIDUAL
for dir in CARD_DIR_STRS + EGO_DIR_STRS
},
**{"Rotate{}".format(dir): CoordType.INDIVIDUAL for dir in ["Left", "Right"]},
#
**{
"RotateLiftedObject{}".format(dir): CoordType.ROTATE_LIFTED
for dir in ["Left", "Right"]
},
#
**{
"MoveLiftedObject{}".format(dir): CoordType.MOVE_LIFTED_CARD
for dir in CARD_DIR_STRS
},
#
**{
"MoveAgents{}WithObject".format(dir): CoordType.MOVE_WITH_LIFTED_CARD
for dir in CARD_DIR_STRS
},
#
**{
"MoveLiftedObject{}".format(dir): CoordType.MOVE_LIFTED_EGO
for dir in EGO_DIR_STRS
},
#
**{
"MoveAgents{}WithObject".format(dir): CoordType.MOVE_WITH_LIFTED_EGO
for dir in EGO_DIR_STRS
},
"Pickup": CoordType.PICKUP,
}
)
def rotate_clockwise(l, n):
return l[-n:] + l[:-n]
def are_actions_coordinated(env: AI2ThorEnvironment, action_strs: Sequence[str]):
action_types = [ACTION_TO_COORD_TYPE[a] for a in action_strs]
if not all_equal(action_types):
return False
action_type = action_types[0]
if action_type == CoordType.INDIVIDUAL:
return True
if action_type in [
CoordType.ROTATE_LIFTED,
CoordType.MOVE_LIFTED_CARD,
CoordType.MOVE_WITH_LIFTED_CARD,
CoordType.PICKUP,
]:
return all_equal(action_strs)
elif action_type in [CoordType.MOVE_LIFTED_EGO, CoordType.MOVE_WITH_LIFTED_EGO]:
action_relative_ind = [None] * env.num_agents
for i, action in enumerate(action_strs):
for j, dir in enumerate(EGO_DIR_STRS):
if dir in action:
action_relative_ind[i] = j
break
if action_relative_ind[i] is None:
raise RuntimeError("Ego action but no ego dir in action name?")
agent_rot_inds = [
round(env.get_agent_location(agent_id)["rotation"] / 90)
for agent_id in range(env.num_agents)
]
return all_equal(
[
int(dir_rel_ind + agent_rot_ind) % 4
for dir_rel_ind, agent_rot_ind in zip(
action_relative_ind, agent_rot_inds
)
]
)
else:
raise NotImplementedError(
"Cannot determine if {} actions are coordinated.".format(action_strs)
)
def are_actions_coordinated_with_pass_conditioning(
env: AI2ThorEnvironment, action_strs: Sequence[str]
):
action_types = [ACTION_TO_COORD_TYPE[a] for a in action_strs]
if not all_equal(action_types):
return False
action_type = action_types[0]
if action_type == CoordType.INDIVIDUAL:
if "Pass" in action_strs:
return True
else:
return False
if action_type in [
CoordType.ROTATE_LIFTED,
CoordType.MOVE_LIFTED_CARD,
CoordType.MOVE_WITH_LIFTED_CARD,
CoordType.PICKUP,
]:
return all_equal(action_strs)
elif action_type in [CoordType.MOVE_LIFTED_EGO, CoordType.MOVE_WITH_LIFTED_EGO]:
action_relative_ind = [None] * env.num_agents
for i, action in enumerate(action_strs):
for j, dir in enumerate(EGO_DIR_STRS):
if dir in action:
action_relative_ind[i] = j
break
if action_relative_ind[i] is None:
raise RuntimeError("Ego action but no ego dir in action name?")
agent_rot_inds = [
round(env.get_agent_location(agent_id)["rotation"] / 90)
for agent_id in range(env.num_agents)
]
return all_equal(
[
int(dir_rel_ind + agent_rot_ind) % 4
for dir_rel_ind, agent_rot_ind in zip(
action_relative_ind, agent_rot_inds
)
]
)
else:
raise NotImplementedError(
"Cannot determine if {} actions are coordinated.".format(action_strs)
)
COORDINATION_TYPE_TENSOR_CACHE = {}
def coordination_type_tensor(
env,
action_strings: Tuple[str],
action_coordination_checker: Callable[[AI2ThorEnvironment, Sequence[str]], bool],
):
agent_rot_inds = tuple(
round(env.get_agent_location(i)["rotation"] / 90) % 4
for i in range(env.num_agents)
)
key = (agent_rot_inds, action_strings, action_coordination_checker)
if key in COORDINATION_TYPE_TENSOR_CACHE:
return COORDINATION_TYPE_TENSOR_CACHE[key]
coord_tensor = np.full(
(len(action_strings),) * env.num_agents, fill_value=-1, dtype=int
)
for ind in range(np.product(coord_tensor.shape)):
multi_ind = np.unravel_index(ind, coord_tensor.shape)
multi_action = tuple(action_strings[i] for i in multi_ind)
if action_coordination_checker(env, multi_action):
coord_tensor[multi_ind] = int(ACTION_TO_COORD_TYPE[multi_action[0]].value)
COORDINATION_TYPE_TENSOR_CACHE[key] = coord_tensor
return coord_tensor
def lifted_furniture_step(
episode, action: str, action_as_int: int, agent_id: int = None, **kwargs
) -> Dict[str, Any]:
if any(dir in action for dir in CARD_DIR_STRS):
action_dir = None
for dir in CARD_DIR_STRS:
if dir in action:
action_dir = dir
break
joint_actions_nesw_to_local_index = dict(North=0, East=1, South=2, West=3)
joint_actions_aligned_to_agent = ["Ahead", "Right", "Back", "Left"]
rotation = episode.environment.get_agent_location(agent_id)["rotation"]
clock_90 = round(rotation / 90.0)
joint_actions_aligned_to_agent = rotate_clockwise(
joint_actions_aligned_to_agent, clock_90
)
# joint_actions_aligned_to_agent is Ahead, Right, Back and Left aligned as per
# agent rotation. Eg. for a 90 degree agent the (relative) joint actions would be:
# [
# "MoveAgentsLeftWithObject",
# "MoveAgentsAheadWithObject",
# "MoveAgentsRightWithObject",
# "MoveAgentsBackWithObject",
# ]
# reference:
# L
# |
# B ---> A
# |
# R
#
# This converts N,E,S,W (nesw) to 0,1,2,3 and then to L,A,R,B (arbl)
joint_action_nesw_index = joint_actions_nesw_to_local_index[action_dir]
assert joint_action_nesw_index >= 0
joint_action_arbl = joint_actions_aligned_to_agent[joint_action_nesw_index]
action_dict = {
"action": action.replace(action_dir, joint_action_arbl),
"agentId": agent_id,
**kwargs,
}
else:
action_dict = {"action": action, "agentId": agent_id, **kwargs}
action_dict["maxAgentsDistance"] = episode.max_distance_from_object
episode.environment.step(action_dict)
action_success = episode.environment.last_event.events[agent_id].metadata[
"lastActionSuccess"
]
episode.environment.last_event.metadata["lastAction"] = action
if (
"actionReturn"
in episode.environment.last_event.events[agent_id].metadata.keys()
):
action_return = episode.environment.last_event.events[agent_id].metadata[
"actionReturn"
]
else:
action_return = None
return {
"action": action_as_int,
"action_success": action_success,
"action_return": action_return,
}
class MultiAgentMovingWithFurnitureBaseEpisode(MultiAgentAI2ThorEpisode):
def __init__(
self,
env: AI2ThorEnvironment,
task_data: Dict[str, Any],
max_steps: int,
max_distance_from_object: float,
frame_type: str = "image",
grid_output_shape: Optional[Tuple[int, int]] = None,
first_correct_coord_reward: Optional[float] = None,
increasing_rotate_penalty: bool = False,
step_penalty=STEP_PENALTY,
failed_action_penalty=FAILED_ACTION_PENALTY,
exploration_bonus=EXPLORATION_BONUS,
expert_frame_type: Optional[str] = None,
return_likely_successfuly_move_actions: bool = False,
return_likely_successfuly_move_actions_for_expert: bool = False,
pass_conditioned_coordination: bool = False,
**kwargs,
):
super(MultiAgentMovingWithFurnitureBaseEpisode, self).__init__(
env=env, task_data=task_data, max_steps=max_steps, **kwargs
)
self.frame_type = frame_type
self.first_correct_coord_reward = first_correct_coord_reward
self.increasing_rotate_penalty = increasing_rotate_penalty
self.initial_agent_metadata = env.get_all_agent_metadata()
self.object_id = task_data["move_obj_id"]
self.to_object_id = task_data.get("move_to_obj_id")
self.grid_output_shape = grid_output_shape
self.visited_xz = set()
self.visited_xzr = set()
self._max_distance_from_object = max_distance_from_object
self.total_reward = 0.0
self.action_counts = defaultdict(int)
self.coordinated_actions_taken = set()
self.agent_num_sequential_rotates = {}
# For now we assume we're moving a Television
assert "Television" in self.object_id
assert self.to_object_id is None or "Dresser" in self.to_object_id
self.step_penalty = step_penalty
self.failed_action_penalty = failed_action_penalty
self.exploration_bonus = exploration_bonus
self.joint_pass_penalty = JOINT_PASS_PENALTY
self.expert_frame_type = expert_frame_type
self.return_likely_successfuly_move_actions = (
return_likely_successfuly_move_actions
)
self.return_likely_successfuly_move_actions_for_expert = (
return_likely_successfuly_move_actions_for_expert
)
self.tv_reachable_positions_tensor = None
self._tv_reachable_positions_set = None
self.pass_conditioned_coordination = pass_conditioned_coordination
self.coordinated_action_checker: Callable[
[AI2ThorEnvironment, Sequence[str]], bool
] = None
if self.pass_conditioned_coordination:
self.coordinated_action_checker = (
are_actions_coordinated_with_pass_conditioning
)
else:
self.coordinated_action_checker = are_actions_coordinated
@property
def max_distance_from_object(self):
return self._max_distance_from_object
@property
def tv_reachable_positions_set(self):
if self._tv_reachable_positions_set is not None:
return self._tv_reachable_positions_set
self.environment.step(
{
"action": "GetReachablePositionsForObject",
"objectId": self.object_id,
"agentId": 0,
}
)
self._tv_reachable_positions_set = set(
(round(pos["x"], 2), round(pos["z"], 2))
for pos in self.environment.last_event.metadata["actionReturn"]
)
return self._tv_reachable_positions_set
def _points_set_for_rotation(self, obj_id, obj_points_dict):
obj = self.environment.get_object_by_id(obj_id, agent_id=0)
obj_rot = 90 * int(obj["rotation"]["y"] / 90)
return set(
(
round(obj["position"]["x"] + t[0], 2),
round(obj["position"]["z"] + t[1], 2),
)
for t in obj_points_dict[obj_rot]
)
def current_object_points_set(self):
return self._points_set_for_rotation(
self.object_id, constants.TELEVISION_ROTATION_TO_OCCUPATIONS
)
def current_to_object_points_set(self):
return self._points_set_for_rotation(
self.to_object_id, constants.TV_STAND_ROTATION_TO_OCCUPATIONS
)
def current_distance_between_lifted_and_goal_objects(self):
move_obj = self.environment.get_object_by_id(self.object_id, agent_id=0)
move_to_obj = self.environment.get_object_by_id(self.to_object_id, agent_id=0)
return round(
abs(move_obj["position"]["x"] - move_to_obj["position"]["x"])
+ abs(move_obj["position"]["z"] - move_to_obj["position"]["z"]),
2,
)
def coordination_type_tensor(self):
return coordination_type_tensor(
self.environment, self.available_actions, self.coordinated_action_checker
)
def _increment_num_steps_taken_in_episode_by_n(self, n) -> None:
self._num_steps_taken_in_episode += n
def info(self):
def cammel_to_snake(name):
# See https://stackoverflow.com/questions/1175208/elegant-python-function-to-convert-camelcase-to-snake-case
s1 = re.sub("(.)([A-Z][a-z]+)", r"\1_\2", name)
return re.sub("([a-z0-9])([A-Z])", r"\1_\2", s1).lower()
info = {
**super(MultiAgentMovingWithFurnitureBaseEpisode, self).info(),
"num_pass": self.action_counts["Pass"],
"num_rotation": self.action_counts["Rotate"],
"reward": self.total_reward,
"percent_points_visited": len(self.visited_xz)
/ len(self.environment.initially_reachable_points)
* 100.0,
}
for k_attempt in list(self.action_counts.keys()):
if "Attempted" in k_attempt:
k_base = k_attempt.replace("Attempted", "")
k_success = k_attempt.replace("Attempted", "Successful")
n_attempt = self.action_counts[k_attempt]
n_success = self.action_counts[k_success]
info[cammel_to_snake(k_base) + "/attempted/count"] = n_attempt
if n_attempt > 0:
info[cammel_to_snake(k_base) + "/successful/percent"] = 100 * (
n_success / n_attempt
)
return info
def add_likely_successful_move_actions(self, states, frame_type):
assert frame_type == "allocentric-tensor-centered-at-tv"
tensor = states[0]["frame"]
agent_reachable = tensor[0]
tv_reachable = tensor[-2]
a0_pos_mat = tensor[1:5].sum(0) != 0
a1_pos_mat = tensor[5:9].sum(0) != 0
tv_pos_mat = tensor[-3] != 0
try:
a0row, a0col = tuple(zip(*np.where(a0_pos_mat)))[0]
a1row, a1col = tuple(zip(*np.where(a1_pos_mat)))[0]
all_tv_pos = np.stack(np.where(tv_pos_mat), axis=1)
tv_row, tv_col = np.array(np.median(all_tv_pos, axis=0), dtype=int)
would_succeed = []
moves = [(-1, 0), (0, 1), (1, 0), (0, -1)]
# Move with tv
for m in moves:
r_off, c_off = m
try:
would_succeed.append(
agent_reachable[a0row + r_off, a0col + c_off]
and agent_reachable[a1row + r_off, a1col + c_off]
and tv_reachable[tv_row + r_off, tv_col + c_off]
)
except IndexError as _:
would_succeed.append(False)
# Move tv alone
for m in moves:
r_off, c_off = m
shifted_tv_pos = all_tv_pos + np.array((m,))
would_succeed.append(
tv_reachable[a0row + r_off, a0col + c_off]
and (
not np.any(
np.abs(shifted_tv_pos - np.array(((a0row, a0col),))).sum(1)
== 0
)
)
and (
not np.any(
np.abs(shifted_tv_pos - np.array(((a1row, a1col),))).sum(1)
== 0
)
)
)
would_succeed = [bool(w) for w in would_succeed]
for s in states:
s["would_coordinated_action_succeed"] = [bool(w) for w in would_succeed]
except IndexError as _:
for s in states:
s["would_coordinated_action_succeed"] = [False] * 4
warnings.warn(
"\nCould not compute whether coordinated actions would succeed.\nIn scene {}, agent positions {} and {}. Current tensor: {}".format(
self.environment.scene_name,
self.environment.get_agent_location(0),
self.environment.get_agent_location(1),
tensor,
)
)
def states_for_expert_agents(self):
states = self.states_for_agents(self.expert_frame_type)
if self.return_likely_successfuly_move_actions_for_expert:
if "would_coordinated_action_succeed" not in states[0]:
self.add_likely_successful_move_actions(
states=states, frame_type=self.expert_frame_type
)
return states
def _step(
self, action_as_int: int, agent_id: int = None, **kwargs
) -> Dict[str, Any]:
return lifted_furniture_step(
episode=self,
action=self.available_actions[action_as_int],
action_as_int=action_as_int,
agent_id=agent_id,
**kwargs,
)
def multi_step(self, actions_as_ints: Tuple[int, ...]) -> List[Dict[str, Any]]:
assert not self.is_paused() and not self.is_complete()
self._increment_num_steps_taken_in_episode_by_n(self.environment.num_agents)
available_actions = self.available_actions
# if all actions_as_ints are valid for this episode's available_actions
assert all(
[action_id < len(available_actions) for action_id in actions_as_ints]
)
actions_as_strings = tuple(
[available_actions[action_id] for action_id in actions_as_ints]
)
for agent_id, action in enumerate(actions_as_strings):
if agent_id not in self.agent_num_sequential_rotates:
self.agent_num_sequential_rotates[agent_id] = 0
if [action == ra for ra in ["RotateRight", "RotateLeft"]]:
self.agent_num_sequential_rotates[agent_id] += 1
elif action.lower() != "pass":
self.agent_num_sequential_rotates[agent_id] = 0
before_object_metadata = self.environment.get_object_by_id(
object_id=self.object_id, agent_id=0
)
before_object_location = {
"x": before_object_metadata["position"]["x"],
"y": before_object_metadata["position"]["y"],
"z": before_object_metadata["position"]["z"],
"rotation": before_object_metadata["rotation"]["y"],
}
step_results = []
before_info = (
None
if self.before_step_function is None
else self.before_step_function(episode=self)
)
for i, action in enumerate(actions_as_strings):
step_results.append(
{
"action": actions_as_ints[i],
"action_as_string": action,
"reward": self.step_penalty,
}
)
# If an action is for movement with object
is_joint_move_with = tuple(
[
JOINT_MOVE_WITH_OBJECT_KEYWORD.lower() in action.lower()
for action in actions_as_strings
]
)
# If an action is for moving the object
is_joint_move_object = tuple(
[
JOINT_MOVE_OBJECT_KEYWORD.lower() in action.lower()
for action in actions_as_strings
]
)
# If joint rotate action
is_joint_rotate = tuple(
[
JOINT_ROTATE_KEYWORD.lower() in action.lower()
for action in actions_as_strings
]
)
# Correct coordination?
correct_coordination = self.coordinated_action_checker(
self.environment, actions_as_strings
)
if correct_coordination and (
all(is_joint_rotate) or all(is_joint_move_object) or all(is_joint_move_with)
):
self.coordinated_multi_step(
actions_as_ints=actions_as_ints,
actions_as_strings=actions_as_strings,
available_actions=available_actions,
step_results=step_results,
)
else:
if not self.pass_conditioned_coordination:
# Handles the old coordination setting
self.uncoordinated_multi_step(
actions_as_strings=actions_as_strings,
available_actions=available_actions,
is_joint_action=list(
any(z)
for z in zip(
is_joint_move_with, is_joint_move_object, is_joint_rotate
)
),
step_results=step_results,
)
elif "Pass" in actions_as_strings:
# (Pass, X) can always go through the uncoordinated_multi_step
# If X is joint, it would lead to failed_penalty error
self.uncoordinated_multi_step(
actions_as_strings=actions_as_strings,
available_actions=available_actions,
is_joint_action=list(
any(z)
for z in zip(
is_joint_move_with, is_joint_move_object, is_joint_rotate
)
),
step_results=step_results,
)
else:
# (Non pass, Non pass) actions which aren't coordindated (joint, joint)
for agent_id, action in enumerate(actions_as_strings):
step_results[agent_id]["reward"] += self.failed_action_penalty
step_results[agent_id]["action_success"] = False
self.total_reward += sum(sr["reward"] for sr in step_results)
after_object_metadata = self.environment.get_object_by_id(
object_id=self.object_id, agent_id=0
)
after_object_location = {
"x": after_object_metadata["position"]["x"],
"y": after_object_metadata["position"]["y"],
"z": after_object_metadata["position"]["z"],
"rotation": after_object_metadata["rotation"]["y"],
}
object_location = {
self.object_id: {
"before_location": before_object_location,
"after_location": after_object_location,
}
}
for sr in step_results:
sr["object_location"] = object_location
if self.after_step_function is not None:
self.after_step_function(
step_results=step_results, before_info=before_info, episode=self
)
return step_results
# @profile
def coordinated_multi_step(
self, actions_as_ints, actions_as_strings, available_actions, step_results
):
if self.first_correct_coord_reward is not None:
assert type(actions_as_ints) == tuple
if actions_as_ints not in self.coordinated_actions_taken:
self.coordinated_actions_taken.add(actions_as_ints)
for sr in step_results:
sr["reward"] += self.first_correct_coord_reward
found_substring = False
action_count_string = None
for substr in CARD_DIR_STRS + EGO_DIR_STRS:
if substr in actions_as_strings[0]:
found_substring = True
action_count_string = (
actions_as_strings[0].replace(substr, "") + "Attempted"
)
break
if not found_substring:
raise Exception("Could not construct action_count_string")
self.action_counts[action_count_string] += self.environment.num_agents
step_result = self._step(
action_as_int=available_actions.index(actions_as_strings[0]),
agent_id=0,
objectId=self.object_id,
maxAgentsDistance=self._max_distance_from_object,
)
action_success = step_result["action_success"]
self.action_counts[action_count_string.replace("Attempted", "Successful")] += (
action_success * self.environment.num_agents
)
object = self.environment.get_object_by_id(self.object_id, agent_id=0)
object_position = object["position"]
object_pos_rot_tuple = (
round(object_position["x"], 2),
round(object_position["z"], 2),
round(object["rotation"]["y"] / 90) % 2,
)
object_pos_tuple = object_pos_rot_tuple[:2]
additional_reward = self.failed_action_penalty * (1 - action_success)
is_new_object_rotation = False
if (
object_pos_tuple in self.visited_xz
and object_pos_rot_tuple not in self.visited_xzr
):
is_new_object_rotation = True
additional_reward += 0.5 * self.exploration_bonus
is_new_object_position = False
if object_pos_tuple not in self.visited_xz:
is_new_object_position = True
additional_reward += self.exploration_bonus
self.visited_xz.add(object_pos_tuple)
self.visited_xzr.add(object_pos_rot_tuple)
for srs in step_results:
srs["successfully_coordinated"] = True
srs["object_pos_rot_tuple"] = object_pos_rot_tuple
srs["is_new_object_position"] = is_new_object_position
srs["is_new_object_rotation"] = is_new_object_rotation
srs["action_success"] = action_success
srs["reward"] += additional_reward
return step_results
# @profile
def uncoordinated_multi_step(
self, actions_as_strings, available_actions, is_joint_action, step_results
):
for agent_id, action in enumerate(actions_as_strings):
additional_reward = 0.0
action_success = False
if is_joint_action[agent_id]:
additional_reward += self.failed_action_penalty
elif action == "Pass":
additional_reward += 0.0
action_success = True
self.action_counts["Pass"] += 1
self._step(
action_as_int=available_actions.index(action), agent_id=agent_id
)
elif action == "RotateLeft" or action == "RotateRight":
if self.increasing_rotate_penalty:
additional_reward += max(
-1,
self.step_penalty
* (
2 ** ((self.agent_num_sequential_rotates[agent_id] // 4))
- 1
),
)
action_success = True
self.action_counts["Rotate"] += 1
self._step(
action_as_int=available_actions.index(action), agent_id=agent_id
)
elif "Move" in action:
# Based on the order of conditions, this will be executed for single
# agent move actions, bucketed under MoveAheadAttempted/Successful
# Could be:
# MoveAhead, MoveRight, MoveBack, MoveLeft or
# MoveNorth, MoveEast, MoveSouth, MoveWest
self.action_counts["MoveAheadAttempted"] += 1
sr = self._step(
action_as_int=available_actions.index(action),
agent_id=agent_id,
maxAgentsDistance=self._max_distance_from_object,
objectId=self.object_id,
)
action_success = sr["action_success"]
self.action_counts["MoveAheadSuccessful"] += 1 * action_success
additional_reward += self.failed_action_penalty * (1 - action_success)
else:
raise Exception(
"Something wrong with conditions for action {}".format(action)
)
step_results[agent_id]["reward"] += additional_reward
step_results[agent_id]["action_success"] = action_success
return step_results
def expert_states_for_agents(self):
return self.states_for_agents(frame_type=self.expert_frame_type)
def states_for_agents(self, frame_type: Optional[str] = None):
frames = []
if frame_type is None:
frame_type = self.frame_type
if frame_type == "image":
return super(
MultiAgentMovingWithFurnitureBaseEpisode, self
).states_for_agents()
elif frame_type == "grid-matrix":
# Reachable, unreachable and agent locations are marked
(
matrix_all_agents,
point_to_element_map,
) = self.environment.get_current_occupancy_matrices_two_agents(
padding=0.5, use_initially_reachable_points_matrix=True
)
# TODO: Edit to have generic n-agent version of this
# Configured for two agents only
assert self.environment.num_agents == 2
# Mark visited locations
nskipped = 0
for point_tuple in self.visited_xz:
if point_tuple not in point_to_element_map:
nskipped += 1
continue
row, col = point_to_element_map[point_tuple]
# Shouldn't overwrite agent embedding information or goal object information
if not (
matrix_all_agents[0][row, col]
in [
constants.REACHABLE_SYM,
constants.UNREACHABLE_SYM,
constants.NO_INFO_SYM,
]
and matrix_all_agents[1][row, col]
in [
constants.REACHABLE_SYM,
constants.UNREACHABLE_SYM,
constants.NO_INFO_SYM,
]
):
continue
matrix_all_agents[0][row, col] = constants.VISITED_SYM
matrix_all_agents[1][row, col] = constants.VISITED_SYM
if nskipped > 10:
warnings.warn(
"Skipping many object's visited points ({}) in scene {}.".format(
nskipped, self.environment.scene_name
)
)
# Mark goal object locations
nskipped = 0
object_points_set = self.current_object_points_set()
agent_position_consts = {
constants.AGENT_OTHER_0,
constants.AGENT_OTHER_90,
constants.AGENT_OTHER_180,
constants.AGENT_OTHER_270,
constants.AGENT_SELF_0,
constants.AGENT_SELF_90,
constants.AGENT_SELF_180,
constants.AGENT_SELF_270,
}
for point_tuple in object_points_set:
if point_tuple not in point_to_element_map:
nskipped += 1
continue
row, col = point_to_element_map[point_tuple]
if matrix_all_agents[0][row, col] not in agent_position_consts:
matrix_all_agents[0][row, col] = constants.GOAL_OBJ_SYM
if matrix_all_agents[1][row, col] not in agent_position_consts:
matrix_all_agents[1][row, col] = constants.GOAL_OBJ_SYM
if nskipped == len(object_points_set):
raise RuntimeError(
"Skipped all object points in scene {}.".format(
self.environment.scene_name
)
)
elif nskipped > 10:
warnings.warn(
"Skipping many object points ({}) in scene {}.".format(
nskipped, self.environment.scene_name
)
)
if self.to_object_id is not None:
raise NotImplementedError(
"to_object_id must be None when using frame_tupe=='grid-matrix'"
)
for agent_id in range(self.environment.num_agents):
matrix_allo_per_agent = self.environment.current_allocentric_matrix_frame(
agent_id, matrix_all_agents[agent_id], point_to_element_map, 10
)
frames.append(matrix_allo_per_agent)
elif frame_type in [
"allocentric-tensor",
"allocentric-tensor-centered-at-tv",
"egocentric-tensor",
"allocentric-tensor-no-rotations",
]:
if frame_type in [
"allocentric-tensor",
"allocentric-tensor-centered-at-tv",
"egocentric-tensor",
]:
# Reachable, unreachable and agent locations are marked
(
state_tensor_per_agent,
point_to_element_map,
) = self.environment.get_current_multi_agent_occupancy_tensors(
padding=1.0, use_initially_reachable_points_matrix=True
)
elif frame_type in ["allocentric-tensor-no-rotations"]:
(
state_tensor_per_agent,
point_to_element_map,
) = self.environment.get_current_multi_agent_occupancy_tensors_no_rot(
padding=1.0, use_initially_reachable_points_matrix=True
)
else:
raise Exception("Check conditions!")
nrow_ncol = state_tensor_per_agent[0].shape[-2:]
# TODO: Edit to have generic n-agent version of this
# Configured for two agents only
assert self.environment.num_agents == 2
# Mark visited locations
nskipped = 0
visited_tensor = np.zeros((1, *nrow_ncol), dtype=bool)
for point_tuple in self.visited_xz:
if point_tuple not in point_to_element_map:
nskipped += 1
continue
row, col = point_to_element_map[point_tuple]
visited_tensor[0, row, col] = True
if nskipped > 10:
warnings.warn(
"Skipping many object's visited points ({}) in scene {}.".format(
nskipped, self.environment.scene_name
)
)
# Mark goal object locations
nskipped = 0
object_points_set = self.current_object_points_set()
object_tensor = np.zeros((1, *nrow_ncol), dtype=bool)
for point_tuple in object_points_set:
if point_tuple not in point_to_element_map:
nskipped += 1
continue
row, col = point_to_element_map[point_tuple]
object_tensor[0, row, col] = True
if nskipped == len(object_points_set):
raise RuntimeError(
"Skipped all object points in scene {}.".format(
self.environment.scene_name
)
)
elif nskipped > 10:
warnings.warn(
"Skipping many object points ({}) in scene {}.".format(
nskipped, self.environment.scene_name
)
)
if self.tv_reachable_positions_tensor is None:
self.tv_reachable_positions_tensor = np.zeros(
(1, *nrow_ncol), dtype=bool
)
nskipped = 0
for point_tuple in self.tv_reachable_positions_set:
if point_tuple not in point_to_element_map:
nskipped += 1
continue
row, col = point_to_element_map[point_tuple]
self.tv_reachable_positions_tensor[0, row, col] = True
to_object_tensor_tuple = tuple()
if self.to_object_id is not None:
nskipped = 0
to_object_points_set = self.current_to_object_points_set()
to_object_tensor = np.zeros((1, *nrow_ncol), dtype=bool)
for point_tuple in to_object_points_set:
if point_tuple not in point_to_element_map:
nskipped += 1
continue
row, col = point_to_element_map[point_tuple]
to_object_tensor[0, row, col] = True
if nskipped == len(to_object_points_set):
raise RuntimeError(
"Skipped all to_object points in scene {}.".format(
self.environment.scene_name
)
)
elif nskipped > 10:
warnings.warn(
"Skipping many to_object points ({}) in scene {}.".format(
nskipped, self.environment.scene_name
)
)
to_object_tensor_tuple = (to_object_tensor,)
if frame_type == "egocentric-tensor":
reachable = state_tensor_per_agent[0][:1]
agent_rot_inds = []
other_agent_position_tensors = []
for agent_id, state in enumerate(state_tensor_per_agent):
agent_rot_inds.append(
round(
self.environment.get_agent_location(agent_id)["rotation"]
/ 90
)
% 4
)
other_agent_position_tensor = state[5:9]
if agent_rot_inds[-1] == 0:
order = [0, 1, 2, 3]
elif agent_rot_inds[-1] == 1:
order = [1, 2, 3, 0]
elif agent_rot_inds[-1] == 2:
order = [2, 3, 0, 1]
elif agent_rot_inds[-1] == 3:
order = [3, 0, 1, 2]
else:
raise NotImplementedError()
other_agent_position_tensors.append(
np.stack(
[other_agent_position_tensor[i] for i in order], axis=0
)
)
for agent_id in range(self.environment.num_agents):
visibility_mask = np.zeros((1, *nrow_ncol), dtype=bool)
assert (
len(
self.environment.last_event.events[agent_id].metadata[
"visibleRange"
]
)
== 26
)
visible_tuples = [
(p["x"], p["z"])
for p in self.environment.last_event.events[agent_id].metadata[
"visibleRange"
]
]
visible_hull = scipy.spatial.Delaunay(np.array(visible_tuples))
for point_tuple in point_to_element_map:
if visible_hull.find_simplex(point_tuple) >= 0:
row, col = point_to_element_map[point_tuple]
visibility_mask[0, row, col] = True
tensor = np.concatenate(
(
self.tv_reachable_positions_tensor,
reachable,
visited_tensor,
visibility_mask,
other_agent_position_tensors[agent_id],
object_tensor,
)
+ to_object_tensor_tuple,
axis=0,
)
tensor *= visibility_mask
outsize = 15
padding = outsize - 1
assert outsize % 2 == 1
tensor = np.pad(
tensor,
[(0, 0), (padding, padding), (padding, padding)],
"constant",
constant_values=False,
)
agent_pos = self.environment.get_agent_location(agent_id=agent_id)
agent_x = round(agent_pos["x"], 2)
agent_z = round(agent_pos["z"], 2)
pos_tuple = (agent_x, agent_z)
row, col = point_to_element_map[pos_tuple]
row = row + padding
col = col + padding
half_pad = padding // 2
if agent_rot_inds[agent_id] == 0:
egocentric_tensor = tensor[
:,
(row - padding) : (row + 1),
(col - half_pad) : (col + half_pad + 1),
]
elif agent_rot_inds[agent_id] == 1:
egocentric_tensor = tensor[
:,
(row - half_pad) : (row + half_pad + 1),
col : (col + padding + 1),
]
egocentric_tensor = np.rot90(egocentric_tensor, axes=(1, 2))
elif agent_rot_inds[agent_id] == 2:
egocentric_tensor = tensor[
:,
row : (row + padding + 1),
(col - half_pad) : (col + half_pad + 1),
]
egocentric_tensor = np.rot90(
egocentric_tensor, k=2, axes=(1, 2)
)
elif agent_rot_inds[agent_id] == 3:
egocentric_tensor = tensor[
:,
(row - half_pad) : (row + half_pad + 1),
(col - padding) : (col + 1),
]
egocentric_tensor = np.rot90(
egocentric_tensor, k=3, axes=(1, 2)
)
else:
raise NotImplementedError()
frames.append(np.array(egocentric_tensor, dtype=float))
else:
state_tensor_per_agent = [
np.concatenate(
(
state,
visited_tensor,
object_tensor,
self.tv_reachable_positions_tensor,
)
+ to_object_tensor_tuple,
axis=0,
)
for state in state_tensor_per_agent
]
if frame_type in [
"allocentric-tensor",
"allocentric-tensor-no-rotations",
]:
for agent_id in range(self.environment.num_agents):
agent_pos = self.environment.get_agent_location(
agent_id=agent_id
)
agent_x = round(agent_pos["x"], 2)
agent_z = round(agent_pos["z"], 2)
pos_tuple = (agent_x, agent_z)
row, col = point_to_element_map[pos_tuple]
tensor = np.pad(
state_tensor_per_agent[agent_id],
[(0, 0), (10, 10), (10, 10)],
"constant",
constant_values=False,
)
allocentric_tensor = tensor[
:, row : (row + 21), col : (col + 21)
]
frames.append(np.array(allocentric_tensor, dtype=float))
elif frame_type == "allocentric-tensor-centered-at-tv":
object_metadata = self.environment.get_object_by_id(
object_id=self.object_id, agent_id=0
)
object_location = {
"x": object_metadata["position"]["x"],
"y": object_metadata["position"]["y"],
"z": object_metadata["position"]["z"],
"rotation": object_metadata["rotation"]["y"],
}
for agent_id in range(self.environment.num_agents):
object_x = round(object_location["x"], 2)
object_z = round(object_location["z"], 2)
pos_tuple = (object_x, object_z)
row, col = point_to_element_map[pos_tuple]
tensor = np.pad(
state_tensor_per_agent[agent_id],
[(0, 0), (10, 10), (10, 10)],
"constant",
constant_values=False,
)
allocentric_tensor = tensor[
:, row : (row + 21), col : (col + 21)
]
frames.append(np.array(allocentric_tensor, dtype=float))
else:
raise Exception("something wrong with conditions")
else:
raise Exception("Invalid frame type {}.".format(frame_type))
states = []
for agent_id in range(self.environment.num_agents):
last_action = (
None if self._last_actions is None else self._last_actions[agent_id]
)
last_action_success = (
None
if self._last_actions_success is None
else self._last_actions_success[agent_id]
)
states.append(
{
"last_action": last_action,
"last_action_success": last_action_success,
"frame": frames[agent_id],
}
)
if (
frame_type == self.frame_type
and self.return_likely_successfuly_move_actions
) or (
frame_type == self.expert_frame_type
and self.return_likely_successfuly_move_actions_for_expert
):
self.add_likely_successful_move_actions(
states=states, frame_type=frame_type
)
return states
class FurnMoveEpisode(MultiAgentMovingWithFurnitureBaseEpisode):
def __init__(
self,
env: AI2ThorEnvironment,
task_data: Dict[str, Any],
max_steps: int,
max_distance_from_object: float,
min_dist_to_to_object: float,
include_move_obj_actions: bool = False,
reached_target_reward: float = 1.0,
moved_closer_reward: float = 0.1,
**kwargs,
):
super().__init__(
env=env,
task_data=task_data,
max_steps=max_steps,
max_distance_from_object=max_distance_from_object,
**kwargs,
)
self.include_move_obj_actions = include_move_obj_actions
self.reached_target_reward = reached_target_reward
self.moved_closer_reward = moved_closer_reward
self.min_dist_to_to_object = min_dist_to_to_object
self.closest_to_object_achieved = (
self.current_distance_between_lifted_and_goal_objects()
)
self.initial_move_obj = self.environment.get_object_by_id(
self.object_id, agent_id=0
)
self.initial_move_to_obj = self.environment.get_object_by_id(
self.to_object_id, agent_id=0
)
@property
def available_action_groups(self) -> Tuple[Tuple[str, ...], ...]:
return self.class_available_action_groups(
include_move_obj_actions=self.include_move_obj_actions
)
@classmethod
def class_available_action_groups(
cls, include_move_obj_actions: bool = False, **kwargs
) -> Tuple[Tuple[str, ...], ...]:
raise NotImplementedError
def info(self):
info = super(FurnMoveEpisode, self).info()
info["navigation/reached_target"] = self.reached_terminal_state()
info[
"navigation/final_distance"
] = self.current_distance_between_lifted_and_goal_objects()
initial_manhattan_distance = round(
abs(
self.initial_move_obj["position"]["x"]
- self.initial_move_to_obj["position"]["x"]
)
+ abs(
self.initial_move_obj["position"]["z"]
- self.initial_move_to_obj["position"]["z"]
),
2,
)
initial_manhattan_steps = round(
initial_manhattan_distance / self.environment.grid_size
)
path_length = self.num_steps_taken_in_episode() / self.environment.num_agents
info["navigation/spl_manhattan"] = info["navigation/reached_target"] * (
(initial_manhattan_steps + 0.0001)
/ (max(initial_manhattan_steps, path_length) + 0.0001)
)
info["navigation/initial_manhattan_steps"] = initial_manhattan_steps
return info
def reached_terminal_state(self) -> bool:
return self.closest_to_object_achieved < self.min_dist_to_to_object
def next_expert_action(self) -> Tuple[Union[int, None], ...]:
# No expert supervision for exploring the environment.
expert_actions = [None] * self.environment.num_agents
return tuple(expert_actions)
def coordinated_multi_step(
self, actions_as_ints, actions_as_strings, available_actions, step_results
):
super(FurnMoveEpisode, self).coordinated_multi_step(
actions_as_ints=actions_as_ints,
actions_as_strings=actions_as_strings,
available_actions=available_actions,
step_results=step_results,
)
dist = self.current_distance_between_lifted_and_goal_objects()
additional_reward = 0.0
if self.reached_terminal_state():
additional_reward += self.reached_target_reward
elif dist < self.closest_to_object_achieved:
additional_reward += self.moved_closer_reward
self.closest_to_object_achieved = min(dist, self.closest_to_object_achieved)
for sr in step_results:
sr["reward"] += additional_reward
return step_results
class FurnMoveAllocentricEpisode(FurnMoveEpisode):
def __init__(
self,
env: AI2ThorEnvironment,
task_data: Dict[str, Any],
max_steps: int,
max_distance_from_object: float,
include_move_obj_actions: bool = False,
reached_target_reward: float = 1.0,
moved_closer_reward: float = 0.1,
**kwargs,
):
assert kwargs["frame_type"] == "allocentric-tensor"
super().__init__(
env=env,
task_data=task_data,
max_steps=max_steps,
max_distance_from_object=max_distance_from_object,
include_move_obj_actions=include_move_obj_actions,
reached_target_reward=reached_target_reward,
moved_closer_reward=moved_closer_reward,
**kwargs,
)
@classmethod
def class_available_action_groups(
cls, include_move_obj_actions: bool = False, **kwargs
) -> Tuple[Tuple[str, ...], ...]:
return semiallocentric_action_groups(
include_move_obj_actions=include_move_obj_actions
)
class FurnMoveEgocentricEpisode(FurnMoveEpisode):
def __init__(
self,
env: AI2ThorEnvironment,
task_data: Dict[str, Any],
max_steps: int,
max_distance_from_object: float,
include_move_obj_actions: bool = False,
reached_target_reward: float = 1.0,
moved_closer_reward: float = 0.1,
**kwargs,
):
assert "frame_type" in kwargs
assert kwargs["frame_type"] in ["egocentric-tensor", "image"]
super().__init__(
env=env,
task_data=task_data,
max_steps=max_steps,
max_distance_from_object=max_distance_from_object,
include_move_obj_actions=include_move_obj_actions,
reached_target_reward=reached_target_reward,
moved_closer_reward=moved_closer_reward,
**kwargs,
)
if "track_time_averaged_action_pairs" in kwargs:
self.track_time_averaged_action_pairs = kwargs[
"track_time_averaged_action_pairs"
]
num_actions = len(self.available_actions)
self.action_pairs_matrix_attempted = np.zeros(
(num_actions, num_actions), dtype=float
)
self.action_pairs_matrix_successful = np.zeros(
(num_actions, num_actions), dtype=float
)
else:
self.track_time_averaged_action_pairs = False
@classmethod
def class_available_action_groups(
cls, include_move_obj_actions: bool = False, **kwargs
) -> Tuple[Tuple[str, ...], ...]:
return egocentric_action_groups(
include_move_obj_actions=include_move_obj_actions
)
def multi_step(self, actions_as_ints: Tuple[int, ...]):
step_results = super(FurnMoveEgocentricEpisode, self).multi_step(
actions_as_ints
)
if self.track_time_averaged_action_pairs:
assert self.environment.num_agents == 2
self.action_pairs_matrix_attempted[
actions_as_ints[0], actions_as_ints[1]
] += 1
self.action_pairs_matrix_successful[
actions_as_ints[0], actions_as_ints[1]
] += int(all([sr["action_success"] for sr in step_results]))
return step_results
def info(self):
info = super(FurnMoveEgocentricEpisode, self).info()
if self.track_time_averaged_action_pairs:
total_actions = np.sum(self.action_pairs_matrix_attempted)
info[
"action_pairs_matrix_attempted"
] = self.action_pairs_matrix_attempted / (total_actions + 0.0001)
info[
"action_pairs_matrix_successful"
] = self.action_pairs_matrix_successful / (total_actions + 0.0001)
return info
class FurnMoveEgocentricFastGridEpisode(FurnMoveEpisode):
def __init__(
self,
env: AI2ThorLiftedObjectGridEnvironment,
task_data: Dict[str, Any],
max_steps: int,
max_distance_from_object: float,
include_move_obj_actions: bool = False,
reached_target_reward: float = 1.0,
moved_closer_reward: float = 0.1,
**kwargs,
):
assert kwargs["frame_type"] in [
"fast-egocentric-tensor",
"fast-egocentric-relative-tensor",
]
if "visualize_test_gridworld" in kwargs:
self.visualize_test_gridworld = kwargs["visualize_test_gridworld"]
assert "visualizing_ms" in kwargs
self.visualizing_ms = kwargs["visualizing_ms"]
else:
self.visualize_test_gridworld = False
self.visualizing_ms = None
super().__init__(
env=env,
task_data=task_data,
max_steps=max_steps,
max_distance_from_object=max_distance_from_object,
include_move_obj_actions=include_move_obj_actions,
reached_target_reward=reached_target_reward,
moved_closer_reward=moved_closer_reward,
**kwargs,
)
if "track_time_averaged_action_pairs" in kwargs:
self.track_time_averaged_action_pairs = kwargs[
"track_time_averaged_action_pairs"
]
num_actions = len(self.available_actions)
self.action_pairs_matrix_attempted = np.zeros(
(num_actions, num_actions), dtype=float
)
self.action_pairs_matrix_successful = np.zeros(
(num_actions, num_actions), dtype=float
)
else:
self.track_time_averaged_action_pairs = False
@classmethod
def class_available_action_groups(
cls, include_move_obj_actions: bool = False, **kwargs
) -> Tuple[Tuple[str, ...], ...]:
return egocentric_action_groups(
include_move_obj_actions=include_move_obj_actions
)
@property
def tv_reachable_positions_set(self):
if self._tv_reachable_positions_set is not None:
return self._tv_reachable_positions_set
self.environment.step(
{
"action": "GetReachablePositionsForObject",
"objectId": self.object_id,
"agentId": 0,
}
)
self._tv_reachable_positions_set = set(
(round(pos["x"], 2), round(pos["z"], 2))
for pos in itertools.chain.from_iterable(
self.environment.last_event.metadata["actionReturn"].values()
)
)
return self._tv_reachable_positions_set
def states_for_agents(self, frame_type: Optional[str] = None):
frames = []
if frame_type is None:
frame_type = self.frame_type
if frame_type in ["fast-egocentric-tensor", "fast-egocentric-relative-tensor"]:
# Reachable, unreachable and agent locations are marked
# Since it's the same cost, may as well set
# use_initially_reachable_points_matrix to False
(
state_tensor_per_agent,
point_to_element_map,
) = self.environment.get_current_multi_agent_occupancy_tensors(
use_initially_reachable_points_matrix=False
)
nrow_ncol = state_tensor_per_agent[0].shape[-2:]
# Mark visited locations
nskipped = 0
visited_tensor = np.zeros((1, *nrow_ncol), dtype=bool)
for point_tuple in self.visited_xz:
if point_tuple not in point_to_element_map:
nskipped += 1
continue
row, col = point_to_element_map[point_tuple]
visited_tensor[0, row, col] = True
if nskipped > 10:
warnings.warn(
"Skipping many object's visited points ({}) in scene {}.".format(
nskipped, self.environment.scene_name
)
)
# Mark goal object locations
nskipped = 0
object_points_set = self.current_object_points_set()
object_tensor = np.zeros((1, *nrow_ncol), dtype=bool)
for point_tuple in object_points_set:
if point_tuple not in point_to_element_map:
nskipped += 1
continue
row, col = point_to_element_map[point_tuple]
object_tensor[0, row, col] = True
if nskipped == len(object_points_set):
raise RuntimeError(
"Skipped all object points in scene {}.".format(
self.environment.scene_name
)
)
elif nskipped > 10:
warnings.warn(
"Skipping many object points ({}) in scene {}.".format(
nskipped, self.environment.scene_name
)
)
# TODO: This can be replaced with rotation_to_lifted_object_reachable_position_masks
if self.tv_reachable_positions_tensor is None:
self.tv_reachable_positions_tensor = np.zeros(
(1, *nrow_ncol), dtype=bool
)
nskipped = 0
for point_tuple in self.tv_reachable_positions_set:
if point_tuple not in point_to_element_map:
nskipped += 1
continue
row, col = point_to_element_map[point_tuple]
self.tv_reachable_positions_tensor[0, row, col] = True
to_object_tensor_tuple = tuple()
if self.to_object_id is not None:
nskipped = 0
to_object_points_set = self.current_to_object_points_set()
to_object_tensor = np.zeros((1, *nrow_ncol), dtype=bool)
for point_tuple in to_object_points_set:
if point_tuple not in point_to_element_map:
nskipped += 1
continue
row, col = point_to_element_map[point_tuple]
to_object_tensor[0, row, col] = True
if nskipped == len(to_object_points_set):
raise RuntimeError(
"Skipped all to_object points in scene {}.".format(
self.environment.scene_name
)
)
elif nskipped > 10:
warnings.warn(
"Skipping many to_object points ({}) in scene {}.".format(
nskipped, self.environment.scene_name
)
)
to_object_tensor_tuple = (to_object_tensor,)
if frame_type == "fast-egocentric-tensor":
output_tensor_per_agent = [
np.concatenate(
(
state,
visited_tensor,
object_tensor,
self.tv_reachable_positions_tensor,
)
+ to_object_tensor_tuple,
axis=0,
)
for state in state_tensor_per_agent
]
elif frame_type == "fast-egocentric-relative-tensor":
reachable = state_tensor_per_agent[0][:1]
agent_rot_inds = []
other_agent_position_tensors_list = []
for agent_id, state in enumerate(state_tensor_per_agent):
agent_rot_inds.append(
round(
self.environment.get_agent_location(agent_id)["rotation"]
/ 90
)
% 4
)
other_agent_positions = state[5:]
if agent_rot_inds[-1] == 0:
order = [0, 1, 2, 3]
elif agent_rot_inds[-1] == 1:
order = [1, 2, 3, 0]
elif agent_rot_inds[-1] == 2:
order = [2, 3, 0, 1]
elif agent_rot_inds[-1] == 3:
order = [3, 0, 1, 2]
else:
raise NotImplementedError()
other_agent_position_tensors_list.append(
other_agent_positions[
sum(
[
[o + 4 * i for o in order]
for i in range(self.environment.num_agents - 1)
],
[],
)
]
)
output_tensor_per_agent = [
np.concatenate(
(
self.tv_reachable_positions_tensor,
reachable,
visited_tensor,
other_agent_position_tensors_list[agent_id],
object_tensor,
)
+ to_object_tensor_tuple,
axis=0,
)
for agent_id in range(self.environment.num_agents)
]
else:
raise Exception("something wrong with conditions, check!")
for agent_id in range(self.environment.num_agents):
agent_pos = self.environment.get_agent_location(agent_id=agent_id)
agent_x = round(agent_pos["x"], 2)
agent_z = round(agent_pos["z"], 2)
pos_tuple = (agent_x, agent_z)
row, col = point_to_element_map[pos_tuple]
agent_location_in_mask = self.environment.get_agent_location_in_mask(
agent_id=agent_id
)
if not row == agent_location_in_mask["row"]:
print(
"row: {} | agent_location_in_mask[row]: {}".format(
row, agent_location_in_mask["row"]
)
)
if not col == agent_location_in_mask["col"]:
print(
"col: {} | agent_location_in_mask[col]: {}".format(
col, agent_location_in_mask["col"]
)
)
rot = round(agent_location_in_mask["rot"] / 90) % 4
outsize = 15
padding = outsize - 1
assert outsize % 2 == 1
tensor = np.pad(
output_tensor_per_agent[agent_id],
[(0, 0), (padding, padding), (padding, padding)],
"constant",
constant_values=False,
)
row = row + padding
col = col + padding
half_pad = padding // 2
if rot == 0:
egocentric_tensor = tensor[
:,
(row - padding) : (row + 1),
(col - half_pad) : (col + half_pad + 1),
]
elif rot == 1:
egocentric_tensor = tensor[
:,
(row - half_pad) : (row + half_pad + 1),
col : (col + padding + 1),
]
egocentric_tensor = np.rot90(egocentric_tensor, axes=(1, 2))
elif rot == 2:
egocentric_tensor = tensor[
:,
row : (row + padding + 1),
(col - half_pad) : (col + half_pad + 1),
]
egocentric_tensor = np.rot90(egocentric_tensor, k=2, axes=(1, 2))
elif rot == 3:
egocentric_tensor = tensor[
:,
(row - half_pad) : (row + half_pad + 1),
(col - padding) : (col + 1),
]
egocentric_tensor = np.rot90(egocentric_tensor, k=3, axes=(1, 2))
else:
raise NotImplementedError()
# TODO: See if this copy is needed?
frames.append(np.array(egocentric_tensor, dtype=float))
else:
raise Exception("Invalid frame type {}.".format(frame_type))
states = []
for agent_id in range(self.environment.num_agents):
last_action = (
None if self._last_actions is None else self._last_actions[agent_id]
)
last_action_success = (
None
if self._last_actions_success is None
else self._last_actions_success[agent_id]
)
states.append(
{
"last_action": last_action,
"last_action_success": last_action_success,
"frame": frames[agent_id],
}
)
if (
frame_type == self.frame_type
and self.return_likely_successfuly_move_actions
) or (
frame_type == self.expert_frame_type
and self.return_likely_successfuly_move_actions_for_expert
):
self.add_likely_successful_move_actions(
states=states, frame_type=frame_type
)
return states
def multi_step(self, actions_as_ints: Tuple[int, ...]):
step_results = super(FurnMoveEgocentricFastGridEpisode, self).multi_step(
actions_as_ints
)
if self.visualize_test_gridworld:
self.environment.visualize(self.visualizing_ms)
if self.track_time_averaged_action_pairs:
assert self.environment.num_agents == 2
self.action_pairs_matrix_attempted[
actions_as_ints[0], actions_as_ints[1]
] += 1
self.action_pairs_matrix_successful[
actions_as_ints[0], actions_as_ints[1]
] += int(all([sr["action_success"] for sr in step_results]))
return step_results
def info(self):
info = super(FurnMoveEgocentricFastGridEpisode, self).info()
if self.track_time_averaged_action_pairs:
total_actions = np.sum(self.action_pairs_matrix_attempted)
info[
"action_pairs_matrix_attempted"
] = self.action_pairs_matrix_attempted / (total_actions + 0.0001)
info[
"action_pairs_matrix_successful"
] = self.action_pairs_matrix_successful / (total_actions + 0.0001)
return info
class FurnMoveEgocentricNoRotationsFastGridEpisode(FurnMoveEgocentricFastGridEpisode):
def __init__(
self,
env: AI2ThorLiftedObjectGridEnvironment,
task_data: Dict[str, Any],
max_steps: int,
max_distance_from_object: float,
include_move_obj_actions: bool = False,
reached_target_reward: float = 1.0,
moved_closer_reward: float = 0.1,
**kwargs,
):
super().__init__(
env=env,
task_data=task_data,
max_steps=max_steps,
max_distance_from_object=max_distance_from_object,
include_move_obj_actions=include_move_obj_actions,
reached_target_reward=reached_target_reward,
moved_closer_reward=moved_closer_reward,
**kwargs,
)
@classmethod
def class_available_action_groups(
cls, include_move_obj_actions: bool = False, **kwargs
) -> Tuple[Tuple[str, ...], ...]:
return egocentric_no_rotate_action_groups(
include_move_obj_actions=include_move_obj_actions
)
class FurnMoveAllocentricTVCenteredEpisode(FurnMoveEpisode):
def __init__(
self,
env: AI2ThorEnvironment,
task_data: Dict[str, Any],
max_steps: int,
max_distance_from_object: float,
include_move_obj_actions: bool = False,
reached_target_reward: float = 1.0,
moved_closer_reward: float = 0.1,
**kwargs,
):
assert "frame_type" not in kwargs
kwargs["frame_type"] = "allocentric-tensor-centered-at-tv"
super().__init__(
env=env,
task_data=task_data,
max_steps=max_steps,
max_distance_from_object=max_distance_from_object,
include_move_obj_actions=include_move_obj_actions,
reached_target_reward=reached_target_reward,
moved_closer_reward=moved_closer_reward,
**kwargs,
)
@classmethod
def class_available_action_groups(
cls, include_move_obj_actions: bool = False, **kwargs
) -> Tuple[Tuple[str, ...], ...]:
return semiallocentric_action_groups(
include_move_obj_actions=include_move_obj_actions
)
| cordial-sync-master | rl_multi_agent/furnmove_episodes.py |
from __future__ import division
import sys
import traceback
import warnings
from typing import Optional, Dict
import networkx
import torch
import torch.multiprocessing as mp
import torch.optim as optim
from setproctitle import setproctitle as ptitle
from torch import nn
from rl_multi_agent import MultiAgent
from rl_multi_agent.experiments.experiment import ExperimentConfig
from rl_multi_agent.multi_agent_utils import (
TrainingCompleteException,
EndProcessException,
compute_losses_and_backprop,
)
from utils.net_util import recursively_detach
warnings.simplefilter("always", UserWarning)
def train(
worker_number: int,
args,
shared_model: nn.Module,
experiment: ExperimentConfig,
optimizer: optim.Optimizer,
res_queue: mp.Queue,
end_flag: mp.Value,
train_total_ep: mp.Value,
update_lock: Optional[mp.Lock] = None,
save_data_queue: Optional[mp.Queue] = None,
episode_init_queue: Optional[mp.Queue] = None,
) -> None:
ptitle("Training Agent: {}".format(worker_number))
experiment.init_train_agent.worker_number = worker_number
gpu_id = args.gpu_ids[worker_number % len(args.gpu_ids)]
torch.manual_seed(args.seed + worker_number)
if gpu_id >= 0:
with torch.cuda.device(gpu_id):
torch.cuda.manual_seed(args.seed + worker_number)
model: nn.Module = experiment.create_model()
if gpu_id >= 0:
with torch.cuda.device(gpu_id):
model.load_state_dict(shared_model.state_dict())
else:
model.load_state_dict(shared_model.state_dict())
agent: MultiAgent = experiment.create_agent(model=model, gpu_id=gpu_id)
num_sequential_errors = 0
while not end_flag.value:
last_losses: Dict[str, float] = {}
additional_metrics = {}
try:
experiment.init_train_agent.current_train_episode = train_total_ep.value
agent.global_episode_count = train_total_ep.value
agent.sync_with_shared(shared_model)
while True:
agent_iterator = experiment.init_train_agent(
agent=agent, episode_init_queue=episode_init_queue
)
try:
next(agent_iterator)
break
except StopIteration as e:
warnings.warn("Agent iterator was empty.")
additional_metrics = {}
step_results = []
while not agent.episode.is_complete():
agent.global_episode_count = train_total_ep.value
for step in range(args.num_steps):
agent.act(train=True)
if args.skip_backprop:
agent.repackage_hidden()
if len(agent.eval_results) != 0:
agent.eval_results[-1] = recursively_detach(
agent.eval_results[-1]
)
if agent.episode.is_complete():
break
if end_flag.value:
raise EndProcessException()
last_losses = compute_losses_and_backprop(
agent=agent,
shared_model=shared_model,
optimizer=optimizer,
update_lock=update_lock,
gpu=gpu_id >= 0,
retain_graph=False,
skip_backprop=args.skip_backprop,
)
if (
len(agent.eval_results) > 0
and "additional_metrics" in agent.eval_results[0]
):
for a_metric in agent.eval_results[0]["additional_metrics"]:
if a_metric not in additional_metrics:
additional_metrics[a_metric] = []
additional_metrics[a_metric].extend(
er["additional_metrics"][a_metric]
for er in agent.eval_results
)
if save_data_queue is not None:
step_results.extend(agent.step_results)
if (not agent.episode.is_complete()) or (not args.skip_backprop):
agent.clear_history()
agent.repackage_hidden()
agent.sync_with_shared(shared_model)
additional_metrics = {
k: float(sum(additional_metrics[k]) / len(additional_metrics[k]))
for k in additional_metrics
if len(additional_metrics[k]) != 0
}
try:
next(agent_iterator)
raise Exception("Agent iterator should only yield once.")
except StopIteration as _:
pass
info = agent.episode.info()
if save_data_queue is not None:
data_to_save = experiment.create_episode_summary(
agent,
additional_metrics=additional_metrics,
step_results=step_results,
)
if data_to_save is not None:
save_data_queue.put(data_to_save)
if last_losses is not None:
info = {**info, **last_losses}
if additional_metrics:
info = {**info, **additional_metrics}
res_queue.put({k: info[k] for k in info})
agent.clear_history()
agent.repackage_hidden()
num_sequential_errors = 0
except EndProcessException as _:
print(
"End process signal received in worker {}. Quitting...".format(
worker_number
),
flush=True,
)
sys.exit()
except TrainingCompleteException as _:
print(
(
"Training complete signal received in worker {}. "
"Closing open files/subprocesses exiting."
).format(worker_number),
flush=True,
)
try:
agent.environment.stop()
except Exception as _:
pass
sys.exit()
except (RuntimeError, ValueError, networkx.exception.NetworkXNoPath) as e:
num_sequential_errors += 1
if num_sequential_errors == 10:
print("Too many sequential errors in training.")
raise e
if "out of memory at" in str(e):
raise e
print(
"RuntimeError, ValueError, or networkx exception when"
" training, attempting to print traceback, print metadata, and reset."
)
print("-" * 60)
traceback.print_exc(file=sys.stdout)
print("-" * 60)
try:
print(agent.environment.last_event.metadata)
except NameError:
print("Unable to print environment metadata.")
print("-" * 60)
agent.clear_history()
agent.repackage_hidden()
| cordial-sync-master | rl_multi_agent/train_process_runner.py |
from __future__ import print_function, division
import collections
import copy
import ctypes
import importlib
import os
import queue
import random
import signal
import sys
import time
import warnings
from typing import Optional
import numpy as np
import torch
import torch.multiprocessing as mp
from setproctitle import setproctitle as ptitle
from tensorboardX import SummaryWriter
from torch import nn
from rl_base.shared_optim import SharedRMSprop, SharedAdam
from rl_multi_agent.test_process_runner import test
from rl_multi_agent.train_process_runner import train
from utils import flag_parser
from utils.misc_util import save_project_state_in_log
from utils.net_util import (
ScalarMeanTracker,
load_model_from_state_dict,
TensorConcatTracker,
)
np.set_printoptions(threshold=10000000, suppress=True, linewidth=100000)
os.environ["OMP_NUM_THREADS"] = "1"
# Based on
# https://github.com/pytorch/examples/tree/master/mnist_hogwild
# Training settings
# Implemented multiprocessing using locks but was not beneficial. Hogwild
# training was far superior
if __name__ == "__main__":
ptitle("Train/Test Manager")
args = flag_parser.parse_arguments()
task = args.task
task = task.replace("-", "_")
if task[-7:] == "_config":
task = task[:-7]
if task[-10:] == "_config.py":
task = task[:-10]
module = importlib.import_module(
"{}.{}_config".format(
args.experiment_dir.replace("/", "."), task.replace("-", "_")
)
)
experiment = module.get_experiment()
experiment.init_train_agent.env_args = args
experiment.init_test_agent.env_args = args
start_time = time.time()
local_start_time_str = time.strftime(
"%Y-%m-%d_%H-%M-%S", time.localtime(start_time)
)
if args.enable_logging:
# Caching current state of the project
log_file_path = save_project_state_in_log(
sys.argv,
task + ("" if args.tag == "" else "/{}".format(args.tag)),
local_start_time_str,
None
if experiment.saved_model_path is None
else (experiment.saved_model_path,),
args.log_dir,
)
# Create a tensorboard logger
log_writer = SummaryWriter(log_file_path)
for arg in vars(args):
log_writer.add_text("call/" + arg, str(getattr(args, arg)), 0)
s = ""
for arg in vars(args):
s += "--" + arg + " "
s += str(getattr(args, arg)) + " "
log_writer.add_text("call/full", s, 0)
log_writer.add_text("log-path/", log_file_path, 0)
if experiment.saved_model_path is not None:
log_writer.add_text("model-load-path", experiment.saved_model_path, 0)
# Seed (hopefully) all sources of randomness
np.random.seed(args.seed)
torch.manual_seed(args.seed)
random.seed(args.seed)
if args.gpu_ids == -1:
args.gpu_ids = [-1]
else:
torch.cuda.manual_seed(args.seed)
mp = mp.get_context("spawn")
if any(gpu_id >= 0 for gpu_id in args.gpu_ids):
assert torch.cuda.is_available(), (
f"You have specified gpu_ids=={args.gpu_ids} but no GPUs are available."
" Please check that your machine has GPUs installed and that the correct (GPU compatible)"
" version of torch has been installed."
)
shared_model: nn.Module = experiment.create_model()
optimizer_state = None
restarted_from_episode = None
if experiment.saved_model_path is not None:
path = experiment.saved_model_path
saved_state = torch.load(path, map_location=lambda storage, loc: storage)
print("\nLoading pretrained weights from {}...".format(path))
if "model_state" in saved_state:
load_model_from_state_dict(shared_model, saved_state["model_state"])
optimizer_state = saved_state["optimizer_state"]
restarted_from_episode = saved_state["episodes"]
else:
load_model_from_state_dict(shared_model, saved_state)
print("Done.")
shared_model.share_memory()
pytorch_total_params = sum(p.numel() for p in shared_model.parameters())
pytorch_total_trainable_params = sum(
p.numel() for p in shared_model.parameters() if p.requires_grad
)
print("pytorch_total_params:" + str(pytorch_total_params))
print("pytorch_total_trainable_params:" + str(pytorch_total_trainable_params))
if not args.shared_optimizer:
raise NotImplementedError("Must use shared optimizer.")
optimizer: Optional[torch.optim.Optimizer] = None
if args.shared_optimizer:
if args.optimizer == "RMSprop":
optimizer = SharedRMSprop(
filter(lambda param: param.requires_grad, shared_model.parameters()),
lr=args.lr,
saved_state=optimizer_state,
)
elif args.optimizer == "Adam":
optimizer = SharedAdam(
filter(lambda param: param.requires_grad, shared_model.parameters()),
lr=args.lr,
amsgrad=args.amsgrad,
saved_state=optimizer_state,
)
else:
raise NotImplementedError(
"Must choose a shared optimizer from 'RMSprop' or 'Adam'."
)
processes = []
end_flag = mp.Value(ctypes.c_bool, False)
train_scalars = ScalarMeanTracker()
train_tensors = TensorConcatTracker()
train_total_ep = mp.Value(
ctypes.c_int32, restarted_from_episode if restarted_from_episode else 0
)
train_res_queue = mp.Queue()
assert (
args.x_display is None or args.x_displays is None
), "One of x_display ({}) and x_displays ({}) must not have a value.".format(
args.x_display, args.x_displays
)
save_data_queue = None if not args.save_extra_data else mp.Queue()
episode_init_queue = (
None
if not args.use_episode_init_queue
else experiment.create_episode_init_queue(mp_module=mp)
)
if experiment.stopping_criteria_reached():
warnings.warn("Stopping criteria reached before any computations started!")
print("All done.")
sys.exit()
for rank in range(0, args.workers):
train_experiment = copy.deepcopy(experiment)
train_experiment.init_train_agent.seed = random.randint(0, 10 ** 10)
if args.x_displays is not None:
args = copy.deepcopy(args)
args.x_display = args.x_displays[rank % len(args.x_displays)]
train_experiment.init_train_agent.env_args = args
train_experiment.init_test_agent.env_args = args
p = mp.Process(
target=train,
args=(
rank,
args,
shared_model,
train_experiment,
optimizer,
train_res_queue,
end_flag,
train_total_ep,
None, # Update lock
save_data_queue,
episode_init_queue,
),
)
p.start()
processes.append(p)
time.sleep(0.2)
time.sleep(5)
valid_res_queue = mp.Queue()
valid_total_ep = mp.Value(
ctypes.c_int32, restarted_from_episode if restarted_from_episode else 0
)
if args.enable_val_agent:
test_experiment = copy.deepcopy(experiment)
if args.x_displays is not None:
args = copy.deepcopy(args)
args.x_display = args.x_displays[-1]
test_experiment.init_train_agent.env_args = args
test_experiment.init_test_agent.env_args = args
p = mp.Process(
target=test,
args=(args, shared_model, test_experiment, valid_res_queue, end_flag, 0, 1),
)
p.start()
processes.append(p)
time.sleep(0.2)
time.sleep(1)
train_thin = 500
valid_thin = 1
n_frames = 0
try:
while (
(not experiment.stopping_criteria_reached())
and any(p.is_alive() for p in processes)
and train_total_ep.value < args.max_ep
):
try:
train_result = train_res_queue.get(timeout=10)
if len(train_result) != 0:
train_scalars.add_scalars(train_result)
train_tensors.add_tensors(train_result)
ep_length = sum(
train_result[k] for k in train_result if "ep_length" in k
)
train_total_ep.value += 1
n_frames += ep_length
if args.enable_logging and train_total_ep.value % train_thin == 0:
tracked_means = train_scalars.pop_and_reset()
for k in tracked_means:
log_writer.add_scalar(
k + "/train", tracked_means[k], train_total_ep.value
)
if train_total_ep.value % (20 * train_thin) == 0:
tracked_tensors = train_tensors.pop_and_reset()
for k in tracked_tensors:
log_writer.add_histogram(
k + "/train",
tracked_tensors[k],
train_total_ep.value,
)
if args.enable_logging and train_total_ep.value % (10 * train_thin):
log_writer.add_scalar(
"n_frames", n_frames, train_total_ep.value
)
if args.enable_logging and args.enable_val_agent:
while not valid_res_queue.empty():
valid_result = valid_res_queue.get()
if len(valid_result) == 0:
continue
key = list(valid_result.keys())[0].split("/")[0]
valid_total_ep.value += 1
if valid_total_ep.value % valid_thin == 0:
for k in valid_result:
if np.isscalar(valid_result[k]):
log_writer.add_scalar(
k + "/valid",
valid_result[k],
train_total_ep.value,
)
elif isinstance(valid_result[k], collections.Iterable):
log_writer.add_histogram(
k + "/train",
valid_result[k],
train_total_ep.value,
)
# Saving extra data (if any)
if save_data_queue is not None:
while True:
try:
experiment.save_episode_summary(
data_to_save=save_data_queue.get(timeout=0.2)
)
except queue.Empty as _:
break
# Checkpoints
if (
train_total_ep.value == args.max_ep
or (train_total_ep.value % args.save_freq) == 0
):
if not os.path.exists(args.save_model_dir):
os.makedirs(args.save_model_dir)
state_to_save = shared_model.state_dict()
save_path = os.path.join(
args.save_model_dir,
"{}_{}_{}.dat".format(
task, train_total_ep.value, local_start_time_str
),
)
torch.save(
{
"model_state": shared_model.state_dict(),
"optimizer_state": optimizer.state_dict(),
"episodes": train_total_ep.value,
},
save_path,
)
except queue.Empty as _:
pass
finally:
end_flag.value = True
print(
"Stopping criteria reached: {}".format(
experiment.stopping_criteria_reached()
),
flush=True,
)
print(
"Any workers still alive: {}".format(any(p.is_alive() for p in processes)),
flush=True,
)
print(
"Reached max episodes: {}".format(train_total_ep.value >= args.max_ep),
flush=True,
)
if args.enable_logging:
log_writer.close()
for p in processes:
p.join(0.1)
if p.is_alive():
os.kill(p.pid, signal.SIGTERM)
print("All done.", flush=True)
| cordial-sync-master | rl_multi_agent/main.py |
"""
This script can be used to reproduce the training curves presented in our paper assuming you
have followed the directions in our README.md to download the `data` directory. The plots will
be saved in the `PATH/TO/THIS/PROJECT/analysis_output/plots` directory.
"""
import json
import os
import sys
import traceback
import warnings
import matplotlib.pyplot as plt
import numpy as np
from skmisc.loess import loess
from constants import ABS_PATH_TO_ANALYSIS_RESULTS_DIR, ABS_PATH_TO_DATA_DIR
from utils.misc_util import unzip
DIR_TO_SAVE_PROCESSED_OUTPUT = os.path.join(
ABS_PATH_TO_DATA_DIR, "furnmove_and_furnlift_logs_processed_to_jsons/"
)
os.makedirs(DIR_TO_SAVE_PROCESSED_OUTPUT, exist_ok=True)
# Vision
VISION_ID_TO_NAME = {
"vision_bigcentral_cl_rot": "Central",
"vision_mixture_cl_rot": "SYNC",
"vision_marginal_nocl_rot": "Marginal",
"vision_marginalnocomm_nocl_rot": "Marginal (w/o comm)",
}
# Grid
GRID_ID_TO_NAME = {
"grid_bigcentral_cl_rot": "Central",
"grid_mixture_cl_rot": "SYNC",
"grid_marginal_nocl_rot": "Marginal",
"grid_marginalnocomm_nocl_rot": "Marginal (w/o comm)",
}
# FurnLift vision noimplicit
FURNLIFT_VISION_TO_NAME = {
"furnlift__vision_noimplicit_central_cl": "Central",
"furnlift__vision_noimplicit_mixture_cl": "SYNC",
"furnlift__vision_noimplicit_marginal_nocl": "Marginal",
"furnlift__vision_noimplicit_marginalnocomm_nocl": "Marginal (w/o comm)",
}
METRIC_ID_TO_NAME = {
"reached_target": "Reached Target Successfully",
"ep_length": "Episode Length",
"accuracy": "Find and Lift Success",
"spl_manhattan": "MD-SPL",
"pickupable_but_not_picked": "Pickupable But Not Picked",
"picked_but_not_pickupable": "Picked But Not Pickupable",
"invalid_prob_mass": "Invalid Probability Mass",
"dist_to_low_rank": "TVD",
"reward": "Reward",
}
HEAD_NAME_TO_COLOR = {
"Central": "red",
"SYNC": "blue",
"Marginal": "green",
"Marginal (w/o comm)": "black",
}
def plot_metric_for_saved_logs(id_to_name, metric_id, max_step, max_points=50000):
for id in id_to_name:
file_name = "furnmove_{}_info.json".format(id)
try:
with open(os.path.join(DIR_TO_SAVE_PROCESSED_OUTPUT, file_name), "r") as f:
tb_data = json.load(f)
span = 0.1
except Exception as _:
file_name = "{}_info.json".format(id)
span = 0.1
try:
with open(
os.path.join(DIR_TO_SAVE_PROCESSED_OUTPUT, file_name), "r"
) as f:
tb_data = json.load(f)
except Exception as _:
raise RuntimeError("Could not find json log file with id {}".format(id))
name = id_to_name[id]
color = HEAD_NAME_TO_COLOR[name]
for split in ["train", "valid"]:
print(split)
is_train = split == "train"
tb_split_data = tb_data[split]
if metric_id not in tb_split_data:
warnings.warn(
"{} does not exist for metric {} and method {}".format(
split, METRIC_ID_TO_NAME[metric_id], name
)
)
continue
x, y = unzip(tb_split_data[metric_id])
x = np.array(x)
inds = np.arange(0, len(x) - 1, max(len(x) // max_points, 1))
inds = inds[x[inds] <= max_step]
x, y = np.array(x)[inds], np.array(y)[inds]
x = x / 1000
print("Fitting loess")
l = loess(x, y, span=span, degree=1)
l.fit()
pred = l.predict(x, stderror=True)
conf = pred.confidence(alpha=0.05)
y_pred = (
pred.values if metric_id == "reward" else np.maximum(pred.values, 0)
)
ll = conf.lower if metric_id == "reward" else np.maximum(conf.lower, 0)
ul = conf.upper
print("Plotting line")
plt.plot(
x,
y_pred,
color=color,
dashes=[] if is_train else [10, 5], # [5, 5, 5, 5],
linewidth=0.75 if is_train else 0.5,
alpha=1,
label=name if is_train else None,
)
print("Plotting fill")
plt.fill_between(
x,
ll,
ul,
alpha=0.10 * (2 if is_train else 1),
color=color,
linewidth=0.0,
)
plt.ylabel(METRIC_ID_TO_NAME[metric_id])
plt.xlabel("Training Episodes (Thousands)")
ax = plt.gca()
ax.spines["top"].set_visible(False)
ax.spines["right"].set_visible(False)
ymin, ymax = ax.get_ylim()
if ymax < 1 and "spl" not in metric_id:
plt.scatter([1], [1], alpha=0)
# ax.spines["left"].set_smart_bounds(True)
# ax.spines["bottom"].set_smart_bounds(True)
return ax
if __name__ == "__main__":
os.makedirs(os.path.join(ABS_PATH_TO_ANALYSIS_RESULTS_DIR, "plots"), exist_ok=True)
figsize = (4, 3)
# Do you wish to overwrite existing plots?
overwrite = True
# Select tasks for which metrics are to be plotted.
task_interested_in = ["furnlift_vision", "vision", "grid"]
for type in task_interested_in:
for metric_id in (
[
"reached_target",
"ep_length",
"spl_manhattan",
"invalid_prob_mass",
"dist_to_low_rank",
"reward",
]
if type != "furnlift_vision"
else [
"pickupable_but_not_picked",
"picked_but_not_pickupable",
"reward",
"ep_length",
"accuracy",
"invalid_prob_mass",
"dist_to_low_rank",
]
):
plot_save_path = os.path.join(
ABS_PATH_TO_ANALYSIS_RESULTS_DIR, "plots/{}__{}.pdf"
).format(type, metric_id)
if (not overwrite) and os.path.exists(plot_save_path):
continue
print("\n" * 3)
print(metric_id, type)
print("\n")
if type == "vision":
id_to_name = VISION_ID_TO_NAME
max_step = 500000
elif type == "grid":
id_to_name = GRID_ID_TO_NAME
max_step = 1000000
elif type == "furnlift_vision":
id_to_name = FURNLIFT_VISION_TO_NAME
max_step = 100000
else:
raise NotImplementedError
plt.figure(figsize=figsize)
try:
plot_metric_for_saved_logs(
id_to_name=id_to_name,
metric_id=metric_id,
max_step=max_step,
max_points=20000,
)
if any(k in metric_id for k in ["reached", "accuracy"]):
plt.legend(loc="best")
plt.savefig(plot_save_path, bbox_inches="tight")
except Exception as e:
traceback.print_exc(file=sys.stdout)
print("Continuing")
finally:
plt.close()
| cordial-sync-master | rl_multi_agent/analysis/visualize_furnmove_and_furnlift_log_jsons.py |
"""
This script generates joint policy summaries presented in the paper assuming you
have followed the directions in our README.md to download the `data` directory. The plots will
be saved in the `PATH/TO/THIS/PROJECT/analysis_output/plots/ATTEMPT_OR_SUCCESS` directory.
The `ATTEMPT_OR_SUCCESS` flag helps visualize the actions attempted and actions successful (note
that in the paper we show `attempt` joint policy summaries.
"""
import glob
import json
import os
import matplotlib.colors as mcolors
import matplotlib.pyplot as plt
import numpy as np
from matplotlib import patches
from constants import ABS_PATH_TO_ANALYSIS_RESULTS_DIR, ABS_PATH_TO_DATA_DIR
def make_colormap(seq):
"""Return a LinearSegmentedColormap
seq: a sequence of floats and RGB-tuples. The floats should be increasing
and in the interval (0,1).
"""
seq = [(None,) * 3, 0.0] + list(seq) + [1.0, (None,) * 3]
cdict = {"red": [], "green": [], "blue": []}
for i, item in enumerate(seq):
if isinstance(item, float):
r1, g1, b1 = seq[i - 1]
r2, g2, b2 = seq[i + 1]
cdict["red"].append([item, r1, r2])
cdict["green"].append([item, g1, g2])
cdict["blue"].append([item, b1, b2])
return mcolors.LinearSegmentedColormap("CustomMap", cdict)
if __name__ == "__main__":
split = "test"
id_to_time_averaged_action_mat = {}
id_to_time_averaged_success_action_mat = {}
for dir in glob.glob(
os.path.join(ABS_PATH_TO_DATA_DIR, "furnmove_evaluations__{}/*").format(split)
):
dir_name = os.path.basename(dir)
id = dir_name.split("__")[0]
if "3agents" in id:
continue
print()
print(id)
time_averaged_success_action_mats = []
time_averaged_action_mats = []
for i, p in enumerate(glob.glob(os.path.join(dir, "*.json"))):
if i % 100 == 0:
print(i)
with open(p, "r") as f:
result_dict = json.load(f)
time_averaged_success_action_mats.append(
np.array(result_dict["action_taken_success_matrix"])
)
time_averaged_success_action_mats[-1] = time_averaged_success_action_mats[
-1
] / (time_averaged_success_action_mats[-1].sum() + 1e-6)
time_averaged_action_mats.append(
np.array(result_dict["action_taken_matrix"])
)
time_averaged_action_mats[-1] = time_averaged_action_mats[-1] / (
time_averaged_action_mats[-1].sum() + 1e-6
)
id_to_time_averaged_success_action_mat[id] = np.stack(
time_averaged_success_action_mats, axis=0
).mean(0)
id_to_time_averaged_action_mat[id] = np.stack(
time_averaged_action_mats, axis=0
).mean(0)
c = mcolors.ColorConverter().to_rgb
wr = make_colormap([c("white"), c("red")])
success_or_attempt = "attempt"
if success_or_attempt == "attempt":
id_to_mat = id_to_time_averaged_action_mat
else:
id_to_mat = id_to_time_averaged_success_action_mat
dir = os.path.join(
ABS_PATH_TO_ANALYSIS_RESULTS_DIR, "plots/average_policy_mats/{}"
).format(success_or_attempt)
os.makedirs(dir, exist_ok=True)
for id in id_to_mat:
if "3agents" in id:
continue
a = id_to_mat[id]
size = a.shape[0]
plt.figure(figsize=(3, 3))
plt.matshow(a, cmap=wr)
ax = plt.gca()
ax.spines["top"].set_visible(False)
ax.spines["right"].set_visible(False)
ax.spines["bottom"].set_visible(False)
ax.spines["left"].set_visible(False)
ax.set_xticks([])
ax.set_yticks([])
plt.tick_params(
axis="x", # changes apply to the x-axis
which="both", # both major and minor ticks are affected
bottom=False, # ticks along the bottom edge are off
top=False, # True,
labelbottom=False,
length=0,
)
ax.set_ylim(size + 0.1 - 0.5, -0.5 - 0.1)
plt.tick_params(axis="y", which="both", left=False, length=0) # left=True,
for i in range(size):
for j in range(size):
rect = patches.Rectangle(
(i - 0.5, j - 0.5),
1,
1,
linewidth=1,
edgecolor="lightgrey",
facecolor="none",
)
# Add the patch to the Axes
ax.add_patch(rect)
for i in range(size):
for j in range(size):
if (
size == 13
and (
(i < 4 and j < 4)
or (3 < i <= 7 and 3 < j <= 7)
or (i == j == 8)
or (8 < i and 8 < j)
)
or (
size == 14
and (
(i < 5 and j < 5)
or (4 < i <= 8 and 4 < j <= 8)
or (i == j == 9)
or (9 < i and 9 < j)
)
)
):
rect = patches.Rectangle(
(i - 0.5, j - 0.5),
1,
1,
linewidth=1,
edgecolor="black",
facecolor="none",
)
ax.add_patch(rect)
plt.savefig(os.path.join(dir, "{}.pdf".format(id)), bbox_inches="tight")
| cordial-sync-master | rl_multi_agent/analysis/visualize_furnmove_time_averaged_policies.py |
cordial-sync-master | rl_multi_agent/analysis/__init__.py |
|
"""
This script parses the saved data in `furnmove_evaluations__test/METHOD` to create
trajectory visualizations included in the supplementary of the paper and the videos included in the
qualitative result video. Particularly, the gridworld 2-agent experiments are analysed and the
trajectory summaries (4 top down views, evenly spread over the episode) and selected video
trajectories are saved in `ABS_PATH_TO_ANALYSIS_RESULTS_DIR/furnmove_traj_visualizations_gridworld/{METHOD}` directory.
Moreover, if the `METHOD` is communicative, an audio rendering of the agent's communication is also saved as a MIDI file.
Set the appropriate options/flags at the start of the script for the desired outputs. Guiding comments are included.
Run using command:
`python rl_multi_agent/analysis/visualize_gridbased_furnmove_trajectories.py`
See `visualize_visionbased_furnmove_trajectories.py` for an analogous vision-based script.
"""
import copy
import glob
import json
import os
import random
from typing import Tuple, Optional, List
import PIL
import colour as col
import imageio
import numpy as np
import torch
from PIL import Image
from midiutil import MIDIFile
import constants
from constants import ABS_PATH_TO_ANALYSIS_RESULTS_DIR, ABS_PATH_TO_DATA_DIR
from constants import ABS_PATH_TO_LOCAL_THOR_BUILD
from rl_ai2thor.ai2thor_environment import AI2ThorEnvironment
from rl_ai2thor.ai2thor_gridworld_environment import AI2ThorLiftedObjectGridEnvironment
from rl_multi_agent.analysis.visualize_visionbased_furnmove_trajectories import (
get_icon,
add_outline,
interleave,
add_progress_bar,
)
from utils.visualization_utils import save_frames_to_mp4
def add_actions_to_frame(
frame,
ba: Optional[int],
aa: Optional[int],
outline_colors: Optional[List[Optional[Tuple[int, int, int]]]] = None,
position_top=False,
):
import copy
frame = copy.deepcopy(frame)
h = frame.shape[0]
icon_size = h // 6
ba_icon, aa_icon = [
get_icon(i, size=icon_size) if i is not None else None for i in [ba, aa]
]
if outline_colors is not None:
for icon, color in zip([ba_icon, aa_icon], outline_colors):
if icon is not None and color is not None:
add_outline(icon, color=color)
margin = icon_size // 8
row_slice = (
slice(-(margin + icon_size), -margin)
if not position_top
else slice(margin, margin + icon_size)
)
if ba_icon is not None:
frame[row_slice, margin : (margin + icon_size)] = ba_icon
if aa_icon is not None:
frame[row_slice, -(margin + icon_size) : (-margin)] = aa_icon
return frame
if __name__ == "__main__":
# Set some options for the desired output:
# ---------
# Note that a snapshot/summary of all trajectories will be saved by this script.
# You could decide to save the video or not
SAVE_VIDEO = True
# Which episode do you wish to save the video for.
# Scheme: FloorPlan{SCENE_NUMBER}_physics_{TEST_EPISODE}, where SCENE_NUMBER goes from 226 to 230
# and TEST_EPISODE goes from 0 to 199.
video_save_path_ids = {
# "FloorPlan229_physics__0",
"FloorPlan229_physics__15",
# "FloorPlan229_physics__193",
# "FloorPlan230_physics__47",
# "FloorPlan230_physics__70",
# "FloorPlan230_physics__72",
}
# To add a progress bar to indicate how much of the episode has been completed
ADD_PROGRESS_BAR = True
# FPS for the video, faster/slower viewing.
FPS = 5
# Method to test
METHOD = "grid_mixture_cl_rot"
# ---------
# Additional path variables are set
load_from_dir = os.path.join(
ABS_PATH_TO_DATA_DIR, "furnmove_evaluations__test", METHOD
)
top_save_dir = os.path.join(
ABS_PATH_TO_ANALYSIS_RESULTS_DIR,
"furnmove_traj_visualizations_gridworld",
METHOD,
)
os.makedirs(top_save_dir, exist_ok=True)
env = AI2ThorLiftedObjectGridEnvironment(
local_thor_build=ABS_PATH_TO_LOCAL_THOR_BUILD,
num_agents=2,
lifted_object_height=1.3,
object_type="Television",
max_dist_to_lifted_object=0.76,
min_steps_between_agents=2,
remove_unconnected_positions=True,
)
env.start("FloorPlan1_physics")
paths = sorted(glob.glob(os.path.join(load_from_dir, "*.json")))
if not torch.cuda.is_available():
random.shuffle(paths)
for path in paths:
path_id = str(os.path.basename(path).replace(".json", ""))
if SAVE_VIDEO and path_id not in video_save_path_ids:
continue
with open(path, "r") as f:
traj_info = json.load(f)
step_results = traj_info["step_results"]
last_sr = copy.deepcopy(step_results[-1])
step_results.append(last_sr)
for i in range(len(last_sr)):
last_sr[i]["before_location"] = last_sr[i]["after_location"]
last_sr[i]["object_location"]["Television|1"]["before_location"] = last_sr[
i
]["object_location"]["Television|1"]["after_location"]
episode_init_data = traj_info["episode_init_data"]
tv_loc_start = episode_init_data["lifted_object_location"]
tv_loc_end = step_results[-1][0]["object_location"]["Television|1"][
"after_location"
]
if (not SAVE_VIDEO) and AI2ThorEnvironment.position_dist(
tv_loc_start, tv_loc_end, use_l1=True
) < 3.0:
print("Skipping {}".format(path_id))
continue
print("Visualizing {}".format(path_id))
env.reset(path_id.split("__")[0], move_mag=0.25)
for agent_id, loc in enumerate(episode_init_data["agent_locations"]):
env.step(
{
"action": "TeleportFull",
"agentId": agent_id,
"x": loc["x"],
"z": loc["z"],
"rotation": {"y": loc["rotation"]},
"allowAgentIntersection": True,
"makeReachable": True,
}
)
assert env.last_event.metadata["lastActionSuccess"]
env.step(
{
"action": "CreateLiftedFurnitureAtLocation",
"objectType": "Television",
"x": tv_loc_start["x"],
"z": tv_loc_start["z"],
"rotation": {"y": tv_loc_start["rotation"]},
"agentId": 0,
"forceAction": True,
}
)
assert env.last_event.metadata["lastActionSuccess"]
tv_id = "Television|1"
to_object_loc = episode_init_data["to_object_location"]
env.step(
{
"action": "CreateAndPlaceObjectOnFloorAtLocation",
"objectType": "Dresser",
"object_mask": env.controller.parse_template_to_mask(
constants.DRESSER_SILHOUETTE_STRING
),
**to_object_loc,
"rotation": {"y": to_object_loc["rotation"]},
"agentId": 0,
"forceAction": True,
}
)
assert env.last_event.metadata["lastActionSuccess"]
dresser_id = "Dresser|2"
video_inds = list(range(len(step_results)))
png_inds = list(
set(int(x) for x in np.linspace(0, len(step_results) - 1, num=4).round())
)
png_inds.sort()
save_path_png = os.path.join(
top_save_dir,
"{}__{}.png".format(
os.path.basename(path).replace(".json", ""),
"_".join([str(ind) for ind in png_inds]),
),
)
save_path_mp4 = os.path.join(
top_save_dir,
"{}__video.mp4".format(os.path.basename(path).replace(".json", "")),
)
if os.path.exists(save_path_png) and (
os.path.exists(save_path_png) or not SAVE_VIDEO
):
print("{} already exists, skipping...".format(save_path_png))
continue
map_frames = []
a0_frames = []
a1_frames = []
colors_list = [
list(col.Color("red").range_to(col.Color("#ffc8c8"), len(step_results))),
list(col.Color("green").range_to(col.Color("#c8ffc8"), len(step_results))),
list(col.Color("blue").range_to(col.Color("#c8c8ff"), len(step_results))),
]
for ind in png_inds if not SAVE_VIDEO else video_inds:
if SAVE_VIDEO and ind % 50 == 0:
print("Processed {} steps".format(ind))
sr0, sr1 = step_results[ind]
loc0 = sr0["before_location"]
loc1 = sr1["before_location"]
tv_loc = sr0["object_location"][tv_id]["before_location"]
traj0 = [x["before_location"] for x, _ in step_results[: (ind + 1)]]
traj1 = [y["before_location"] for _, y in step_results[: (ind + 1)]]
tv_traj = [
x["object_location"][tv_id]["before_location"]
for x, _ in step_results[: (ind + 1)]
]
if ind == len(step_results) - 1:
traj0.append(loc0)
traj1.append(loc1)
tv_traj.append(tv_loc)
for agent_id, loc in enumerate([loc0, loc1]):
env.step(
{
"action": "TeleportFull",
"agentId": agent_id,
"x": loc["x"],
"z": loc["z"],
"rotation": {"y": loc["rotation"]},
"allowAgentIntersection": True,
"makeReachable": True,
"forceAction": True,
}
)
assert env.last_event.metadata[
"lastActionSuccess"
], env.last_event.metadata["errorMessage"]
env.step(
{
"action": "TeleportObject",
"objectId": tv_id,
**tv_loc,
"rotation": {"y": tv_loc["rotation"]},
"forceAction": True,
"agentId": 0,
}
)
assert env.last_event.metadata["lastActionSuccess"]
map_frames.append(env.controller.viz_world(20, False, 0, True))
if SAVE_VIDEO:
def to_outline_color(success):
if success is None:
return None
elif success:
return (0, 255, 0)
else:
return (255, 0, 0)
action_success0 = [None, None]
action_success1 = [None, None]
# ba{i} aa{i} are before and after actions of agent i
if ind == 0:
ba0, ba1 = None, None
aa0, aa1 = sr0["action"], sr1["action"]
action_success0[1] = sr0["action_success"]
action_success1[1] = sr1["action_success"]
elif ind == len(video_inds):
oldsr0, oldsr1 = step_results[ind - 1]
ba0, ba1 = oldsr0["action"], step_results["action"]
aa0, aa1 = None, None
action_success0[0] = sr0["action_success"]
action_success1[0] = sr1["action_success"]
else:
oldsr0, oldsr1 = step_results[ind - 1]
ba0, ba1 = oldsr0["action"], oldsr1["action"]
aa0, aa1 = sr0["action"], sr1["action"]
action_success0[0] = oldsr0["action_success"]
action_success1[0] = oldsr1["action_success"]
action_success0[1] = sr0["action_success"]
action_success1[1] = sr1["action_success"]
ego_frames = env.controller.viz_ego_agent_views(40, array_only=True)
ego_frames = [
np.pad(
ef,
((4, 4), (4, 4), (0, 0)),
mode="constant",
constant_values=200,
)
for ef in ego_frames
]
a0_frames.append(
add_actions_to_frame(
ego_frames[0],
ba=ba0,
aa=None, # aa0,
outline_colors=list(map(to_outline_color, action_success0)),
position_top=True,
)
)
a1_frames.append(
add_actions_to_frame(
ego_frames[1],
ba=ba1,
aa=None, # aa1,
outline_colors=list(map(to_outline_color, action_success1)),
position_top=True,
)
)
map_height, map_width, _ = map_frames[0].shape
fp_frame_height = a0_frames[0].shape[0] if len(a0_frames) != 0 else map_height
spacer = np.full(
(fp_frame_height, fp_frame_height // 20, 3), fill_value=255, dtype=np.uint8
)
if SAVE_VIDEO and not os.path.exists(save_path_mp4):
new_map_width = round(fp_frame_height * map_width / map_height)
f = lambda x: np.array(
Image.fromarray(x.astype("uint8"), "RGB").resize(
(new_map_width, fp_frame_height), resample=PIL.Image.LANCZOS
)
)
joined_frames = []
for ind, (map_frame, a0_frame, a1_frame) in enumerate(
zip(map_frames, a0_frames, a1_frames)
):
joined_frames.append(
np.concatenate(
interleave(
[a0_frame, a1_frame, f(map_frame)],
# [f(a0_frame), f(a1_frame), map_frame],
[spacer] * len(map_frames),
)[:-1],
axis=1,
)
)
if ADD_PROGRESS_BAR:
color = (100, 100, 255)
if ind == len(map_frames) - 1:
color = (0, 255, 0) if len(step_results) < 250 else (255, 0, 0)
joined_frames[-1] = add_progress_bar(
joined_frames[-1], ind / (len(map_frames) - 1), color=color
)
final_frame = copy.deepcopy(joined_frames[-1])
save_frames_to_mp4(
joined_frames + [final_frame] * FPS, save_path_mp4, fps=FPS,
)
for agent_id in range(2):
MyMIDI = MIDIFile(1)
MyMIDI.addTempo(0, 0, FPS * 60)
current_time = 0
for probs in traj_info["a{}_reply_probs".format(agent_id)]:
MyMIDI.addNote(
0, 0, round(127 * probs[0]), current_time, 1, volume=100
)
current_time += 1
for i in range(FPS):
MyMIDI.addNote(0, 0, 0, current_time, 1, volume=0)
current_time += 1
midi_save_path = os.path.join(
top_save_dir,
"{}_{}.mid".format(
os.path.basename(path).replace(".json", ""), agent_id
),
)
with open(midi_save_path, "wb") as output_file:
MyMIDI.writeFile(output_file)
if not os.path.exists(save_path_png):
if SAVE_VIDEO:
# Select the frames from the video. No selection is needed if `SAVE_VIDEO` is `False`.
map_frames = [map_frames[ind] for ind in png_inds]
spacer = np.full(
(map_height, fp_frame_height // 20, 3), fill_value=255, dtype=np.uint8
)
big_frame = np.concatenate(
interleave(map_frames, [spacer] * len(map_frames))[:-1], axis=1
)
imageio.imwrite(save_path_png, big_frame)
| cordial-sync-master | rl_multi_agent/analysis/visualize_gridbased_furnmove_trajectories.py |
"""
A script to summarize data saved in the `data/furnlift_evaluations__test/` directory,
as a result of downloading data or running evaluation using the script:
`rl_multi_agent/scripts/run_furnmove_or_furnlift_evaluations.py`
Set the metrics, methods, dataset split and generate a csv-styled table of metrics and confidence
intervals.
Run using command:
`python rl_multi_agent/analysis/summarize_furnlift_eval_results.py`
Also, see `rl_multi_agent/analysis/summarize_furnmove_eval_results.py` for a similar script for
FurnMove task.
"""
import glob
import json
import math
import os
from collections import defaultdict
import numpy as np
import pandas as pd
from constants import ABS_PATH_TO_DATA_DIR, EASY_MAX, MED_MAX
pd.set_option("display.max_rows", 500)
pd.set_option("display.max_columns", 500)
pd.set_option("display.width", 1000)
def create_table(id_to_df, metrics_to_record, split: str = "all"):
if split == "easy":
min_dist = -1
max_dist = EASY_MAX
elif split == "medium":
min_dist = EASY_MAX + 1
max_dist = MED_MAX
elif split == "hard":
min_dist = MED_MAX + 1
max_dist = 1000
elif split == "all":
min_dist = -1
max_dist = 10000
else:
raise NotImplementedError()
sorted_ids = sorted(list(id_to_df.keys()))
result_means = defaultdict(lambda: [])
result_95_confs = defaultdict(lambda: [])
for id in sorted_ids:
df = id_to_df[id].query(
"initial_manhattan_steps <= {max_dist} and initial_manhattan_steps > {min_dist}".format(
max_dist=max_dist, min_dist=min_dist
)
)
for metric_name in metrics_to_record:
values = np.array(df[metric_name])
result_means[metric_name].append(np.mean(values))
result_95_confs[metric_name].append(
1.96 * np.std(values) / math.sqrt(len(values))
)
means_df = pd.DataFrame(dict(result_means), index=sorted_ids)
confs_df = pd.DataFrame(dict(result_95_confs), index=sorted_ids)
return means_df.round(6), confs_df.round(6)
if __name__ == "__main__":
# Set constants for summarizing results.
# Select metrics to summarize
metrics_to_record = [
"spl_manhattan",
"accuracy",
"ep_length",
"final_distance",
"invalid_prob_mass",
"tvd",
"picked_but_not_pickupable",
"pickupable_but_not_picked",
]
# Listing all methods
vision_multi_nav = [
"vision_noimplicit_central_cl",
"vision_noimplicit_marginal_nocl",
"vision_noimplicit_marginalnocomm_nocl",
"vision_noimplicit_mixture_cl",
]
# Split of the dataset. The relevant directory would be searched for saved evaluation information
split = "test"
# Which methods to summarize
subset_interested_in = vision_multi_nav
# Summarize absolute values, 95% confidence intervals, and show them with a (val \pm conf) schema
# The last is useful to input into a latex table.
display_values = True
display_95conf = True
display_combined = False
id_to_df = {}
for dir in glob.glob(
os.path.join(ABS_PATH_TO_DATA_DIR, "furnlift_evaluations__{}/*__*").format(
split
)
):
dir_name = os.path.basename(dir)
id = dir_name.split("__")[1]
if id not in subset_interested_in:
continue
print()
print("Processing method: {}".format(id))
recorded = defaultdict(lambda: [])
for i, p in enumerate(glob.glob(os.path.join(dir, "*.json"))):
if i % 100 == 0:
print("Completed {} episodes".format(i))
with open(p, "r") as f:
result_dict = json.load(f)
for k in metrics_to_record + ["initial_manhattan_steps"]:
if k == "tvd":
assert "dist_to_low_rank" in result_dict
recorded[k].append(result_dict["dist_to_low_rank"] / 2)
elif k != "action_success_percent":
recorded[k].append(result_dict[k])
id_to_df[id] = pd.DataFrame(dict(recorded))
if id_to_df[id].shape[0] == 0:
del id_to_df[id]
a, b = create_table(id_to_df, metrics_to_record)
if display_values:
# Metrics only
print(a.round(3).to_csv())
if display_95conf:
# Corresponding 95% confidence intervals
print(b.round(3).to_csv())
if display_combined:
# Combining the above
a_str = a.round(3).to_numpy().astype("str").astype(np.object)
b_str = b.round(3).to_numpy().astype("str").astype(np.object)
combined_str = a_str + " pm " + b_str
print(
pd.DataFrame(data=combined_str, index=a.index, columns=a.columns).to_csv()
)
| cordial-sync-master | rl_multi_agent/analysis/summarize_furnlift_eval_results.py |
"""
This script parses the saved data in `furnmove_evaluations__test/METHOD` to create
trajectory visualizations included in the supplementary of the paper and the videos included in the
qualitative result video. Particularly, the marginal and SYNC 2-agent experiments are analysed and the
trajectory summaries (4 top down views, evenly spread over the episode) and selected video
trajectories are saved in `ABS_PATH_TO_ANALYSIS_RESULTS_DIR/furnmove_traj_visualizations_vision/{METHOD}` directory.
Moreover, if the `METHOD` is communicative, an audio rendering of the agent's communication is also saved as a MIDI file.
Set the appropriate options/flags at the start of the script for the desired outputs. Guiding comments are included.
Run using command:
`python rl_multi_agent/analysis/visualize_visionbased_furnmove_trajectories.py`
See `visualize_gridbased_furnmove_trajectories.py` for an analogous grid-based script.
"""
import copy
import glob
import json
import math
import os
from functools import lru_cache
from typing import Tuple, Optional, List
import PIL
import colour as col
import imageio
import numpy as np
from PIL import Image
from midiutil import MIDIFile
from constants import (
ABS_PATH_TO_LOCAL_THOR_BUILD,
ABS_PATH_TO_ANALYSIS_RESULTS_DIR,
ABS_PATH_TO_DATA_DIR,
PROJECT_TOP_DIR,
)
from rl_ai2thor.ai2thor_environment import AI2ThorEnvironment
from rl_multi_agent.furnmove_episodes import FurnMoveEgocentricEpisode
from utils.visualization_utils import (
visualize_agent_path,
save_frames_to_mp4,
get_agent_map_data,
)
def interleave(l0, l1):
return [v for p in zip(l0, l1) for v in p]
IND_TO_ACTION_STR = FurnMoveEgocentricEpisode.class_available_actions(
include_move_obj_actions=True
)
@lru_cache(128)
def get_icon(ind, size):
im = Image.open(
os.path.join(
PROJECT_TOP_DIR,
"images/action_icons/furnmove/{}.png".format(IND_TO_ACTION_STR[int(ind)]),
),
"r",
).convert("RGBA")
white_frame = Image.new("RGBA", im.size, color=(255, 255, 255, 255))
return np.array(
Image.alpha_composite(white_frame, im)
.convert("RGB")
.resize((size, size), resample=PIL.Image.LANCZOS)
)
def add_outline(frame, color, size=2):
color = np.array(color).reshape((1, 1, 3))
frame[:, :size, :] = color
frame[:, -size:, :] = color
frame[:size, :, :] = color
frame[-size:, :, :] = color
def add_progress_bar(frame, p, color=(100, 100, 255)):
height, width, _ = frame.shape
bar = np.full((max(20, height // 30), width, 3), fill_value=255, dtype=np.uint8)
spacer = np.copy(bar[: bar.shape[0] // 3, :, :])
bar[:, : round(width * p), :] = np.array(color).reshape((1, 1, 3))
add_outline(bar, (200, 200, 200), size=max(1, height // 200))
return np.concatenate((frame, spacer, bar), axis=0)
def add_actions_to_frame(
frame,
ba: Optional[int],
aa: Optional[int],
outline_colors: Optional[List[Optional[Tuple[int, int, int]]]] = None,
):
import copy
frame = copy.deepcopy(frame)
h = frame.shape[0]
icon_size = h // 6
ba_icon, aa_icon = [
get_icon(i, size=icon_size) if i is not None else None for i in [ba, aa]
]
if outline_colors is not None:
for icon, color in zip([ba_icon, aa_icon], outline_colors):
if icon is not None and color is not None:
add_outline(icon, color=color)
margin = icon_size // 8
if ba_icon is not None:
frame[
-(margin + icon_size) : (-margin), margin : (margin + icon_size)
] = ba_icon
if aa_icon is not None:
frame[
-(margin + icon_size) : (-margin), -(margin + icon_size) : (-margin)
] = aa_icon
return frame
def unit_vector(vector):
""" Returns the unit vector of the vector.
Taken from https://stackoverflow.com/questions/2827393/angles-between-two-n-dimensional-vectors-in-python/13849249#13849249.
"""
return vector / np.linalg.norm(vector)
def linear_increase_decrease(p):
if p <= 1 / 2:
return 2 * (p ** 2)
else:
return -1 + 4 * p - 2 * (p ** 2)
def signed_angle_between(v1, v2):
""" Returns the angle in radians between vectors 'v1' and 'v2'::
Taken from https://stackoverflow.com/questions/2827393/angles-between-two-n-dimensional-vectors-in-python/13849249#13849249.
"""
v1_u = unit_vector(v1)
v2_u = unit_vector(v2)
minor = np.linalg.det(np.stack((v1_u[-2:], v2_u[-2:])))
angle = np.arccos(np.clip(np.dot(v1_u, v2_u), -1.0, 1.0))
if round(angle, 8) == 0:
return angle
if minor == 0:
raise NotImplementedError("Too odd vectors =(")
return np.sign(minor) * angle
def interpolate_step_results(step_results, multiplier):
if multiplier == 1:
return step_results
@lru_cache(100)
def signed_angle(rota, rotb):
avec = np.array(
[math.cos(-2 * math.pi * rota / 360), math.sin(-2 * math.pi * rota / 360)]
)
bvec = np.array(
[math.cos(-2 * math.pi * rotb / 360), math.sin(-2 * math.pi * rotb / 360)]
)
return 360 * (-signed_angle_between(avec, bvec)) / (2 * math.pi)
def recursive_dict_interpolator(a, b, p, is_angle=False):
if is_angle:
is_angle_dict = type(a) == dict
if is_angle_dict:
rota = a["y"]
rotb = b["y"]
else:
rota = a
rotb = b
rota = 90 * round(rota / 90) % 360
rotb = 90 * round(rotb / 90) % 360
angle_from_a_to_b = signed_angle(rota, rotb)
if is_angle_dict:
return {
"x": a["x"],
"y": (a["y"] + p * angle_from_a_to_b) % 360,
"z": a["z"],
}
else:
return (a + p * angle_from_a_to_b) % 360
t = type(a)
if t == dict:
return {
k: (
(1 - p) * a[k] + p * b[k]
if k in ["x", "y", "z", "horizon"]
else (recursive_dict_interpolator(a[k], b[k], p, "rotation" in k))
)
for k in a
if k in b
}
else:
if p == 1:
return b
return a
new_step_results = [step_results[0]]
for next_sr in step_results[1:]:
last_sr = new_step_results[-1]
new_step_results.extend(
[
recursive_dict_interpolator(
last_sr[j], next_sr[j], (i + 1) / multiplier, False,
)
for j in range(2)
]
for i in range(multiplier)
)
return new_step_results
if __name__ == "__main__":
# See `visualize_gridbased_furnmove_trajectories.py` for a guide on how to set the options for
# desired visualizations.
# The additional flag of `MULTIPLIER` helps generate a smoother video, by dividing one step into `MULTIPLIER` steps.
SAVE_VIDEO = False
ADD_PROGRESS_BAR = True
FPS = 5
MULTIPLIER = 5
mode = "mixture"
# mode = "marginal"
if mode == "mixture":
METHOD = "vision_mixture_cl_rot"
elif mode == "marginal":
METHOD = "vision_marginal_nocl_rot"
else:
raise NotImplementedError
load_from_dir = os.path.join(
ABS_PATH_TO_DATA_DIR, "furnmove_evaluations__test", METHOD
)
top_save_dir = os.path.join(
ABS_PATH_TO_ANALYSIS_RESULTS_DIR, "furnmove_traj_visualizations_vision", METHOD
)
os.makedirs(top_save_dir, exist_ok=True)
video_save_path_ids = {
# "FloorPlan229_physics__0",
"FloorPlan229_physics__15",
# "FloorPlan229_physics__193",
# "FloorPlan230_physics__47",
# "FloorPlan230_physics__70",
# "FloorPlan230_physics__72",
}
screen_size = 500
env = AI2ThorEnvironment(
local_thor_build=ABS_PATH_TO_LOCAL_THOR_BUILD, num_agents=2
)
env.start(
"FloorPlan1_physics",
player_screen_height=screen_size,
player_screen_width=screen_size,
quality="Ultra",
move_mag=0.25 / MULTIPLIER,
)
paths = sorted(glob.glob(os.path.join(load_from_dir, "*.json")))
for path in paths:
path_id = str(os.path.basename(path).replace(".json", ""))
if path_id not in video_save_path_ids:
continue
with open(path, "r") as f:
traj_info = json.load(f)
map_row_slice = None
map_col_slice = None
step_results = traj_info["step_results"]
last_sr = copy.deepcopy(step_results[-1])
step_results.append(last_sr)
for i in range(len(last_sr)):
last_sr[i]["before_location"] = last_sr[i]["after_location"]
last_sr[i]["object_location"]["Television|1"]["before_location"] = last_sr[
i
]["object_location"]["Television|1"]["after_location"]
episode_init_data = traj_info["episode_init_data"]
tv_loc_start = episode_init_data["lifted_object_location"]
tv_loc_end = step_results[-1][0]["object_location"]["Television|1"][
"after_location"
]
if (not SAVE_VIDEO) and AI2ThorEnvironment.position_dist(
tv_loc_start, tv_loc_end, use_l1=True
) < 3.0:
print("Skipping {}".format(path_id))
continue
if SAVE_VIDEO:
step_results = interpolate_step_results(step_results, multiplier=MULTIPLIER)
print("Visualizing {}".format(path_id))
env.reset(path_id.split("__")[0], move_mag=0.25)
init_reachable = env.initially_reachable_points_set
env.reset(path_id.split("__")[0], move_mag=0.25 / MULTIPLIER)
env.step(
{
"action": "CreateObjectAtLocation",
"objectType": "Television",
"forceAction": True,
"agentId": 0,
"renderImage": False,
"position": {
k: v for k, v in tv_loc_start.items() if k in ["x", "y", "z"]
},
"rotation": {"x": 0, "y": tv_loc_start["rotation"], "z": 0},
}
)
assert env.last_event.metadata["lastActionSuccess"]
tv_id = "Television|1"
to_object_loc = episode_init_data["to_object_location"]
env.step(
{
"action": "CreateObjectAtLocation",
"objectType": "Dresser",
"forceAction": True,
"agentId": 0,
"renderImage": False,
"position": {
k: v for k, v in to_object_loc.items() if k in ["x", "y", "z"]
},
"rotation": {"x": 0, "y": to_object_loc["rotation"], "z": 0},
}
)
assert env.last_event.metadata["lastActionSuccess"]
dresser_id = "Dresser|2"
video_inds = list(range(len(step_results)))
png_inds = list(
set(int(x) for x in np.linspace(0, len(step_results) - 1, num=4).round())
)
png_inds.sort()
save_path_png = os.path.join(
top_save_dir,
"{}__{}.png".format(
os.path.basename(path).replace(".json", ""),
"_".join(
[str(ind // MULTIPLIER if SAVE_VIDEO else ind) for ind in png_inds]
),
),
)
save_path_mp4 = os.path.join(
top_save_dir,
"{}__video.mp4".format(os.path.basename(path).replace(".json", "")),
)
if os.path.exists(save_path_png) and (
os.path.exists(save_path_png) or not SAVE_VIDEO
):
print("{} already exists, skipping...".format(save_path_png))
continue
map_frames = []
a0_frames = []
a1_frames = []
colors_list = [
list(col.Color("red").range_to(col.Color("#ffc8c8"), len(step_results))),
list(col.Color("green").range_to(col.Color("#c8ffc8"), len(step_results))),
list(col.Color("blue").range_to(col.Color("#c8c8ff"), len(step_results))),
]
agent_0_full_traj = [x["before_location"] for x, _ in step_results]
agent_1_full_traj = [y["before_location"] for _, y in step_results]
full_tv_traj = [
x["object_location"][tv_id]["before_location"] for x, _ in step_results
]
for ind in png_inds if not SAVE_VIDEO else video_inds:
if SAVE_VIDEO and ind % (50 * MULTIPLIER) == 0:
print("Processed {} steps".format(ind // MULTIPLIER))
sr0, sr1 = step_results[ind]
loc0 = sr0["before_location"]
loc1 = sr1["before_location"]
tv_loc = sr0["object_location"][tv_id]["before_location"]
traj0 = agent_0_full_traj[: (ind + 1)]
traj1 = agent_1_full_traj[: (ind + 1)]
tv_traj = full_tv_traj[: (ind + 1)]
if ind == len(step_results) - 1:
traj0.append(loc0)
traj1.append(loc1)
tv_traj.append(tv_loc)
env.teleport_agent_to(
**loc0, force_action=True, agent_id=0, render_image=False
)
assert env.last_event.metadata["lastActionSuccess"]
env.teleport_agent_to(
**loc1, force_action=True, agent_id=1, render_image=False
)
assert env.last_event.metadata["lastActionSuccess"]
env.step(
{
"action": "TeleportObject",
"objectId": tv_id,
**tv_loc,
"rotation": tv_loc["rotation"],
"forceAction": True,
"agentId": 0,
"renderImage": False,
}
)
assert env.last_event.metadata["lastActionSuccess"]
map_data = get_agent_map_data(env)
frame = map_data["frame"]
for i, (traj, show_vis_cone, opacity) in enumerate(
zip([traj0, traj1, tv_traj], [True, True, False], [0.4, 0.4, 0.8])
):
frame = visualize_agent_path(
traj,
frame,
map_data["pos_translator"],
color_pair_ind=i,
show_vis_cone=show_vis_cone,
only_show_last_visibility_cone=True,
opacity=opacity,
colors=colors_list[i],
)
if map_row_slice is None:
xs = [p[0] for p in init_reachable]
zs = [p[1] for p in init_reachable]
minx, maxx = min(xs) - 0.5, max(xs) + 0.5
minz, maxz = min(zs) - 0.5, max(zs) + 0.5
minrow, mincol = map_data["pos_translator"]((minx, maxz))
maxrow, maxcol = map_data["pos_translator"]((maxx, minz))
map_row_slice = slice(max(minrow, 0), min(maxrow, screen_size))
map_col_slice = slice(max(mincol, 0), min(maxcol, screen_size))
frame = frame[
map_row_slice, map_col_slice,
]
map_frames.append(frame)
if SAVE_VIDEO:
def to_outline_color(success):
if success is None:
return None
elif success:
return (0, 255, 0)
else:
return (255, 0, 0)
action_success0 = [None, None]
action_success1 = [None, None]
if ind == 0:
ba0, ba1 = None, None
aa0, aa1 = sr0["action"], sr1["action"]
action_success0[1] = sr0["action_success"]
action_success1[1] = sr1["action_success"]
elif ind == len(video_inds):
oldsr0, oldsr1 = step_results[ind - 1]
ba0, ba1 = oldsr0["action"], step_results["action"]
aa0, aa1 = None, None
action_success0[0] = sr0["action_success"]
action_success1[0] = sr1["action_success"]
else:
oldsr0, oldsr1 = step_results[ind - 1]
ba0, ba1 = oldsr0["action"], oldsr1["action"]
aa0, aa1 = sr0["action"], sr1["action"]
action_success0[0] = oldsr0["action_success"]
action_success1[0] = oldsr1["action_success"]
action_success0[1] = sr0["action_success"]
action_success1[1] = sr1["action_success"]
a0_frames.append(
add_actions_to_frame(
env.current_frames[0],
ba=ba0,
aa=None,
outline_colors=list(map(to_outline_color, action_success0)),
)
)
a1_frames.append(
add_actions_to_frame(
env.current_frames[1],
ba=ba1,
aa=None,
outline_colors=list(map(to_outline_color, action_success1)),
)
)
map_height, map_width, _ = map_frames[0].shape
fp_frame_height = a0_frames[0].shape[0] if len(a0_frames) != 0 else map_height
spacer = np.full(
(fp_frame_height, screen_size // 20, 3), fill_value=255, dtype=np.uint8
)
if SAVE_VIDEO and not os.path.exists(save_path_mp4):
new_map_width = round(fp_frame_height * map_width / map_height)
f = lambda x: np.array(
Image.fromarray(x.astype("uint8"), "RGB").resize(
(new_map_width, fp_frame_height), resample=PIL.Image.LANCZOS
)
)
joined_frames = []
for ind, (map_frame, a0_frame, a1_frame) in enumerate(
zip(map_frames, a0_frames, a1_frames)
):
joined_frames.append(
np.concatenate(
interleave(
[a0_frame, a1_frame, f(map_frame)],
[spacer] * len(map_frames),
)[:-1],
axis=1,
)
)
if ADD_PROGRESS_BAR:
color = (100, 100, 255)
if ind == len(map_frames) - 1:
color = (
(0, 255, 0)
if len(step_results) // MULTIPLIER < 250
else (255, 0, 0)
)
joined_frames[-1] = add_progress_bar(
joined_frames[-1], ind / (len(map_frames) - 1), color=color
)
final_frame = copy.deepcopy(joined_frames[-1])
if MULTIPLIER == 1:
save_frames_to_mp4(
joined_frames + [final_frame] * FPS, save_path_mp4, fps=FPS,
)
else:
save_frames_to_mp4(
joined_frames + [final_frame] * FPS * MULTIPLIER,
save_path_mp4,
fps=FPS * MULTIPLIER,
)
for agent_id in range(2):
MyMIDI = MIDIFile(1)
MyMIDI.addTempo(0, 0, FPS * 60)
current_time = 0
for probs in traj_info["a{}_reply_probs".format(agent_id)]:
MyMIDI.addNote(0, 0, round(127 * probs[0]), current_time, 1, volume=100)
current_time += 1
for i in range(FPS):
MyMIDI.addNote(0, 0, 0, current_time, 1, volume=0)
current_time += 1
midi_save_path = os.path.join(
top_save_dir,
"{}_{}.mid".format(
os.path.basename(path).replace(".json", ""), agent_id
),
)
with open(midi_save_path, "wb") as output_file:
MyMIDI.writeFile(output_file)
if not os.path.exists(save_path_png):
map_frames = [
map_frames[ind]
for ind in (png_inds if SAVE_VIDEO else range(len(map_frames)))
]
spacer = np.full(
(map_height, screen_size // 20, 3), fill_value=255, dtype=np.uint8
)
big_frame = np.concatenate(
interleave(map_frames, [spacer] * len(map_frames))[:-1], axis=1
)
imageio.imwrite(save_path_png, big_frame)
| cordial-sync-master | rl_multi_agent/analysis/visualize_visionbased_furnmove_trajectories.py |
"""
A script to summarize data saved in the `data/furnmove_evaluations__test/` directory,
as a result of downloading data or running evaluation using the script:
`rl_multi_agent/scripts/run_furnmove_or_furnlift_evaluations.py`
Set the metrics, methods, dataset split and generate a csv-styled table of metrics and confidence
intervals. Particularly, which of the methods:
* grid_vision_furnmove: Four prominent methods (no comm, marginal, SYNC, central) for gridworld and
visual environments.
* grid_3agents: Marginal, SYNC, and central methods for three agent gridworld-FurnMove setting.
* vision_mixtures: Effect of number of mixture components m on SYNC’s performance (in FurnMove)
* vision_cl_ablation: Effect of CORDIAL (`cl`) on marginal, SYNC, and central methods.
* vision_3agents: Marginal, SYNC, and central methods for three agent FurnMove setting.
Run using command:
`python rl_multi_agent/analysis/summarize_furnmove_eval_results.py`
Also, see `rl_multi_agent/analysis/summarize_furnlift_eval_results.py` for a similar script for
FurnLift task.
"""
import glob
import json
import os
from collections import defaultdict
import numpy as np
import pandas as pd
from constants import ABS_PATH_TO_DATA_DIR
from rl_multi_agent.analysis.summarize_furnlift_eval_results import create_table
pd.set_option("display.max_rows", 500)
pd.set_option("display.max_columns", 500)
pd.set_option("display.width", 1000)
if __name__ == "__main__":
# Set constants for summarizing results.
# Select metrics to summarize
metrics_to_record = [
"spl_manhattan",
"reached_target",
"ep_length",
"final_distance",
"invalid_prob_mass",
"tvd",
# "reward",
]
# Split of the dataset. The relevant directory would be searched for saved evaluation information
split = "test"
id_to_df = {}
# Listing all methods
grid_vision_furnmove = [
"grid_bigcentral_cl_rot",
"grid_marginal_nocl_rot",
"grid_marginalnocomm_nocl_rot",
"grid_mixture_cl_rot",
"vision_bigcentral_cl_rot",
"vision_marginal_nocl_rot",
"vision_marginalnocomm_nocl_rot",
"vision_mixture_cl_rot",
]
grid_3agents = [
"grid_central_3agents",
"grid_marginal_3agents",
"grid_mixture_3agents",
]
vision_mixtures = [
"vision_marginal_cl_rot",
"vision_mixture2mix_cl_rot",
"vision_mixture4mix_cl_rot",
"vision_mixture_cl_rot",
]
vision_cl_ablation = [
"vision_marginal_nocl_rot",
"vision_marginal_cl_rot",
"vision_bigcentral_nocl_rot",
"vision_bigcentral_cl_rot",
"vision_mixture_nocl_rot",
"vision_mixture_cl_rot",
]
vision_3agents = [
"vision_central_3agents",
"vision_marginal_3agents",
"vision_mixture_3agents",
]
# Which methods to summarize
subset_interested_in = grid_vision_furnmove
# Summarize absolute values, 95% confidence intervals, and show them with a (val \pm conf) schema
# The last is useful to input into a latex table.
display_values = True
display_95conf = True
display_combined = False
for dir in glob.glob(
os.path.join(ABS_PATH_TO_DATA_DIR, "furnmove_evaluations__{}/*").format(split)
):
id = os.path.basename(dir)
if id not in subset_interested_in:
continue
print()
print("Processing method: {}".format(id))
recorded = defaultdict(lambda: [])
for i, p in enumerate(glob.glob(os.path.join(dir, "*.json"))):
if i % 100 == 0:
print("Completed {} episodes".format(i))
with open(p, "r") as f:
result_dict = json.load(f)
for k in metrics_to_record + ["initial_manhattan_steps"]:
if k == "tvd":
assert "dist_to_low_rank" in result_dict
recorded[k].append(result_dict["dist_to_low_rank"] / 2)
elif k != "action_success_percent":
recorded[k].append(result_dict[k])
recorded["action_success_percent"].append(
np.array(result_dict["action_taken_success_matrix"]).sum()
/ (result_dict["ep_length"] // 2)
)
id_to_df[id] = pd.DataFrame(dict(recorded))
if id_to_df[id].shape[0] == 0:
del id_to_df[id]
a, b = create_table(id_to_df, metrics_to_record)
if display_values:
# Metrics only
print(a.round(3).to_csv())
if display_95conf:
# Corresponding 95% confidence intervals
print(b.round(3).to_csv())
if display_combined:
# Combining the above
a_str = a.round(3).to_numpy().astype("str").astype(np.object)
b_str = b.round(3).to_numpy().astype("str").astype(np.object)
combined_str = a_str + " pm " + b_str
print(
pd.DataFrame(data=combined_str, index=a.index, columns=a.columns).to_csv()
)
| cordial-sync-master | rl_multi_agent/analysis/summarize_furnmove_eval_results.py |
from rl_multi_agent.experiments.furnlift_vision_marginalnocomm_nocl_config import (
FurnLiftMinDistUncoordinatedNocommExperimentConfig,
)
class FurnLiftMinDistUncoordinatedNocommNoimplicitExperimentConfig(
FurnLiftMinDistUncoordinatedNocommExperimentConfig
):
# Env/episode config
visible_agents = False
def get_experiment():
return FurnLiftMinDistUncoordinatedNocommNoimplicitExperimentConfig()
| cordial-sync-master | rl_multi_agent/experiments/furnlift_vision_noimplicit_marginalnocomm_nocl_config.py |
from typing import Optional
from rl_multi_agent.experiments.furnmove_grid_marginal_cl_base_config import (
FurnMoveExperimentConfig,
)
class FurnMoveGridExperimentConfig(FurnMoveExperimentConfig):
@classmethod
def get_init_train_params(cls):
init_train_params = FurnMoveExperimentConfig.get_init_train_params()
init_train_params["environment_args"] = {"min_steps_between_agents": 2}
return init_train_params
@property
def saved_model_path(self) -> Optional[str]:
return None
def get_experiment():
return FurnMoveGridExperimentConfig()
| cordial-sync-master | rl_multi_agent/experiments/furnmove_grid_marginal_cl_rot_config.py |
from typing import Callable, Optional
import torch
from torch import nn
import constants
from rl_multi_agent import MultiAgent
from rl_multi_agent.experiments.experiment import ExperimentConfig
from rl_multi_agent.furnmove_episode_samplers import FurnMoveEpisodeSampler
from rl_multi_agent.furnmove_episodes import FurnMoveEgocentricEpisode
from rl_multi_agent.models import A3CLSTMBigCentralEgoVision
class FurnMoveCentralVision3AgentsExperimentConfig(ExperimentConfig):
# Env/episode config
num_agents = 3
screen_size = 84
episode_class = FurnMoveEgocentricEpisode
frame_type = "image"
episode_sampler_class = FurnMoveEpisodeSampler
visible_agents = True
include_depth_frame = False
include_move_obj_actions = True
headless = False
# Model config
state_repr_length = 512
# Agent config
agent_class = MultiAgent
discourage_failed_coordination = 5000
# Training config
max_ep_using_expert_actions = 0
train_scenes = constants.TRAIN_SCENE_NAMES[20:40]
valid_scenes = constants.VALID_SCENE_NAMES[5:10]
use_a3c_loss_when_not_expert_forcing = True
# Misc (e.g. visualization)
record_all_in_test = False
save_talk_reply_probs_path = None
include_test_eval_results = not torch.cuda.is_available()
return_likely_successfuly_move_actions = False
@classmethod
def get_init_train_params(cls):
init_train_params = {
"scenes": cls.train_scenes,
"num_agents": cls.num_agents,
"object_type": "Television",
"to_object_type": "Dresser",
"episode_class": cls.episode_class,
"player_screen_height": cls.screen_size,
"player_screen_width": cls.screen_size,
"max_ep_using_expert_actions": cls.max_ep_using_expert_actions,
"visible_agents": cls.visible_agents,
"include_depth_frame": cls.include_depth_frame,
"object_initial_height": 1.3,
"headless": cls.headless,
"max_distance_from_object": 0.76,
"max_episode_length": 250 * cls.num_agents,
"episode_args": {
"include_move_obj_actions": cls.include_move_obj_actions,
"first_correct_coord_reward": 0.0,
"exploration_bonus": 0.0,
"failed_action_penalty": -0.02,
"step_penalty": -0.01,
"joint_pass_penalty": -0.09,
"moved_closer_reward": 2.0,
"min_dist_to_to_object": 0.26,
"frame_type": cls.frame_type,
"reached_target_reward": 2.0,
"return_likely_successfuly_move_actions": cls.return_likely_successfuly_move_actions,
"pass_conditioned_coordination": True,
},
}
return init_train_params
@classmethod
def get_init_valid_params(cls):
init_valid_params = {
**cls.get_init_train_params(),
"scenes": cls.valid_scenes,
"player_screen_height": 224,
"player_screen_width": 224,
"headless": False,
}
if cls.save_talk_reply_probs_path is not None:
init_valid_params[
"save_talk_reply_probs_path"
] = cls.save_talk_reply_probs_path
return init_valid_params
def __init__(self):
self._init_train_agent = self.episode_sampler_class(
**self.get_init_train_params()
)
self._init_test_agent = self.episode_sampler_class(
**self.get_init_valid_params()
)
@classmethod
def create_model(cls, **kwargs) -> nn.Module:
return A3CLSTMBigCentralEgoVision(
num_inputs_per_agent=3 + 1 * cls.include_depth_frame,
action_groups=cls.episode_class.class_available_action_groups(
include_move_obj_actions=cls.include_move_obj_actions
),
num_agents=cls.num_agents,
state_repr_length=cls.state_repr_length,
)
@classmethod
def create_agent(cls, **kwargs) -> MultiAgent:
return cls.agent_class(
model=kwargs["model"],
gpu_id=kwargs["gpu_id"],
include_test_eval_results=cls.include_test_eval_results,
use_a3c_loss_when_not_expert_forcing=cls.use_a3c_loss_when_not_expert_forcing,
record_all_in_test=cls.record_all_in_test,
include_depth_frame=cls.include_depth_frame,
resize_image_as=cls.screen_size,
discourage_failed_coordination=cls.discourage_failed_coordination,
)
@property
def init_train_agent(self) -> Callable:
return self._init_train_agent
@property
def init_test_agent(self) -> Callable:
return self._init_test_agent
@property
def saved_model_path(cls) -> Optional[str]:
return None
def get_experiment():
return FurnMoveCentralVision3AgentsExperimentConfig()
| cordial-sync-master | rl_multi_agent/experiments/furnmove_vision_central_3agents_config.py |
from typing import Callable, Optional
import torch
from torch import nn
import constants
from rl_multi_agent import MultiAgent
from rl_multi_agent.experiments.experiment import ExperimentConfig
from rl_multi_agent.furnmove_episode_samplers import FurnMoveEpisodeSampler
from rl_multi_agent.furnmove_episodes import FurnMoveEgocentricEpisode
from rl_multi_agent.models import A3CLSTMNStepComCoordinatedActionsEgoVision
class FurnMoveExperimentConfig(ExperimentConfig):
"""
All scenes + 1.0 reward for getting closer to the target
Discrete communication + discourage failed coordination loss
"""
# Env/episode config
num_agents = 2
screen_size = 84
episode_class = FurnMoveEgocentricEpisode
frame_type = "image"
episode_sampler_class = FurnMoveEpisodeSampler
visible_agents = True
include_depth_frame = False
include_move_obj_actions = True
headless = False
# Model config
state_repr_length = 512
talk_embed_length = 16
reply_embed_length = 16
agent_num_embed_length = 8
num_talk_symbols = 2
num_reply_symbols = 2
coordinate_actions = True
# Agent config
agent_class = MultiAgent
turn_off_communication = False
# Training config
max_ep_using_expert_actions = 0
train_scenes = constants.TRAIN_SCENE_NAMES[20:40]
valid_scenes = constants.VALID_SCENE_NAMES[5:10]
use_a3c_loss_when_not_expert_forcing = True
# Misc (e.g. visualization)
record_all_in_test = False
save_talk_reply_probs_path = None
include_test_eval_results = not torch.cuda.is_available()
return_likely_successfuly_move_actions = False
@classmethod
def get_init_train_params(cls):
init_train_params = {
"scenes": cls.train_scenes,
"num_agents": cls.num_agents,
"object_type": "Television",
"to_object_type": "Dresser",
"episode_class": cls.episode_class,
"player_screen_height": cls.screen_size,
"player_screen_width": cls.screen_size,
"max_ep_using_expert_actions": cls.max_ep_using_expert_actions,
"visible_agents": cls.visible_agents,
"include_depth_frame": cls.include_depth_frame,
"object_initial_height": 1.3,
"headless": cls.headless,
"max_distance_from_object": 0.76,
"max_episode_length": 500,
"episode_args": {
"include_move_obj_actions": cls.include_move_obj_actions,
"first_correct_coord_reward": 0.0,
"exploration_bonus": 0.0,
"failed_action_penalty": -0.02,
"step_penalty": -0.01,
# "increasing_rotate_penalty": True,
"joint_pass_penalty": -0.09,
"moved_closer_reward": 1.0,
# "moved_closer_reward": 0.50,
"min_dist_to_to_object": 0.26,
"frame_type": cls.frame_type,
"reached_target_reward": 1.0,
"return_likely_successfuly_move_actions": cls.return_likely_successfuly_move_actions,
"pass_conditioned_coordination": True,
},
}
return init_train_params
@classmethod
def get_init_valid_params(cls):
init_valid_params = {
**cls.get_init_train_params(),
"scenes": cls.valid_scenes,
"player_screen_height": 224,
"player_screen_width": 224,
"headless": False,
}
if cls.save_talk_reply_probs_path is not None:
init_valid_params[
"save_talk_reply_probs_path"
] = cls.save_talk_reply_probs_path
return init_valid_params
def __init__(self):
self._init_train_agent = self.episode_sampler_class(
**self.get_init_train_params()
)
self._init_test_agent = self.episode_sampler_class(
**self.get_init_valid_params()
)
@classmethod
def create_model(cls, **kwargs) -> nn.Module:
return A3CLSTMNStepComCoordinatedActionsEgoVision(
num_inputs_per_agent=3 + 1 * cls.include_depth_frame,
action_groups=cls.episode_class.class_available_action_groups(
include_move_obj_actions=cls.include_move_obj_actions
),
num_agents=cls.num_agents,
state_repr_length=cls.state_repr_length,
talk_embed_length=cls.talk_embed_length,
agent_num_embed_length=cls.agent_num_embed_length,
reply_embed_length=cls.reply_embed_length,
num_talk_symbols=cls.num_talk_symbols,
num_reply_symbols=cls.num_reply_symbols,
turn_off_communication=cls.turn_off_communication,
coordinate_actions=cls.coordinate_actions,
coordinate_actions_dim=2 if cls.coordinate_actions else None,
separate_actor_weights=False,
)
@classmethod
def create_agent(cls, **kwargs) -> MultiAgent:
return cls.agent_class(
model=kwargs["model"],
gpu_id=kwargs["gpu_id"],
include_test_eval_results=cls.include_test_eval_results,
use_a3c_loss_when_not_expert_forcing=cls.use_a3c_loss_when_not_expert_forcing,
record_all_in_test=cls.record_all_in_test,
include_depth_frame=cls.include_depth_frame,
resize_image_as=cls.screen_size,
# discourage_failed_coordination=5000,
)
@property
def init_train_agent(self) -> Callable:
return self._init_train_agent
@property
def init_test_agent(self) -> Callable:
return self._init_test_agent
@property
def saved_model_path(self) -> Optional[str]:
# return "trained_models/furnmove_a3c_mixture_ego_vision_ego_actions_v3_490000_2019-10-28_16-43-13.dat"
return None
def get_experiment():
return FurnMoveExperimentConfig()
| cordial-sync-master | rl_multi_agent/experiments/furnmove_vision_mixture2mix_nocl_rot_config.py |
from typing import Optional
from torch import nn
import constants
from rl_multi_agent.experiments.furnmove_grid_marginalnocomm_base_config import (
FurnMoveExperimentConfig,
)
from rl_multi_agent.furnmove_episodes import (
FurnMoveEgocentricNoRotationsFastGridEpisode,
)
from rl_multi_agent.models import A3CLSTMNStepComCoordinatedActionsEgoGridsEmbedCNN
class FurnMoveNoCommNoRotationsExperimentConfig(FurnMoveExperimentConfig):
episode_class = FurnMoveEgocentricNoRotationsFastGridEpisode
# Increasing the params of marginal to match mixture
final_cnn_channels = 288
@classmethod
def get_init_train_params(cls):
init_train_params = {
"scenes": cls.train_scenes,
"num_agents": cls.num_agents,
"object_type": "Television",
"to_object_type": "Dresser",
"to_object_silhouette": constants.DRESSER_SILHOUETTE_STRING,
"episode_class": cls.episode_class,
"player_screen_height": cls.screen_size,
"player_screen_width": cls.screen_size,
"max_ep_using_expert_actions": cls.max_ep_using_expert_actions,
"visible_agents": cls.visible_agents,
"include_depth_frame": cls.include_depth_frame,
"object_initial_height": 1.3,
"headless": cls.headless,
"max_distance_from_object": 0.76,
"max_episode_length": 500,
"environment_args": {"min_steps_between_agents": 2},
"episode_args": {
"include_move_obj_actions": cls.include_move_obj_actions,
"first_correct_coord_reward": 0.0,
"exploration_bonus": 0.0,
"failed_action_penalty": -0.02,
"step_penalty": -0.01,
"joint_pass_penalty": -0.09,
"moved_closer_reward": 1.0,
"min_dist_to_to_object": 0.26,
"reached_target_reward": 1.0,
"return_likely_successfuly_move_actions": cls.return_likely_successfuly_move_actions,
"frame_type": "fast-egocentric-relative-tensor",
"pass_conditioned_coordination": True,
},
}
return init_train_params
@property
def saved_model_path(self) -> Optional[str]:
return None
@classmethod
def create_model(cls, **kwargs) -> nn.Module:
def _create_model(**kwargs):
return A3CLSTMNStepComCoordinatedActionsEgoGridsEmbedCNN(
**{
**dict(
num_inputs=9,
action_groups=cls.episode_class.class_available_action_groups(
include_move_obj_actions=cls.include_move_obj_actions
),
num_agents=cls.num_agents,
state_repr_length=cls.state_repr_length,
occupancy_embed_length=8,
talk_embed_length=cls.talk_embed_length,
agent_num_embed_length=cls.agent_num_embed_length,
reply_embed_length=cls.reply_embed_length,
turn_off_communication=cls.turn_off_communication,
coordinate_actions=cls.coordinate_actions,
coordinate_actions_dim=13 if cls.coordinate_actions else None,
separate_actor_weights=False,
num_talk_symbols=cls.num_talk_symbols,
num_reply_symbols=cls.num_reply_symbols,
final_cnn_channels=cls.final_cnn_channels,
),
**kwargs,
}
)
return _create_model(**kwargs)
def get_experiment():
return FurnMoveNoCommNoRotationsExperimentConfig()
| cordial-sync-master | rl_multi_agent/experiments/furnmove_grid_marginalnocomm_nocl_norot_config.py |
from typing import Callable, Optional
import torch
from torch import nn
import constants
from rl_multi_agent import MultiAgent
from rl_multi_agent.experiments.experiment import ExperimentConfig
from rl_multi_agent.furnmove_episode_samplers import FurnMoveGridEpisodeSampler
from rl_multi_agent.furnmove_episodes import FurnMoveEgocentricFastGridEpisode
from rl_multi_agent.models import A3CLSTMNStepComCoordinatedActionsEgoGridsEmbedCNN
class FurnMoveExperimentConfig(ExperimentConfig):
# Env/episode config
num_agents = 2
screen_size = 84
episode_class = FurnMoveEgocentricFastGridEpisode
episode_sampler_class = FurnMoveGridEpisodeSampler
visible_agents = True
include_depth_frame = False
include_move_obj_actions = True
headless = True if torch.cuda.is_available() else False
# Model config
state_repr_length = 512
talk_embed_length = 16
reply_embed_length = 16
agent_num_embed_length = 8
coordinate_actions = False
num_talk_symbols = 2
num_reply_symbols = 2
# Agent config
agent_class = MultiAgent
turn_off_communication = True
# Training config
max_ep_using_expert_actions = 0
train_scenes = constants.TRAIN_SCENE_NAMES[20:40]
valid_scenes = constants.VALID_SCENE_NAMES[5:10]
use_a3c_loss_when_not_expert_forcing = True
# Misc (e.g. visualization)
record_all_in_test = False
save_talk_reply_probs_path = None
include_test_eval_results = not torch.cuda.is_available()
return_likely_successfuly_move_actions = False
@classmethod
def get_init_train_params(cls):
init_train_params = {
"scenes": cls.train_scenes,
"num_agents": cls.num_agents,
"object_type": "Television",
"to_object_type": "Dresser",
"to_object_silhouette": constants.DRESSER_SILHOUETTE_STRING,
"episode_class": cls.episode_class,
"player_screen_height": cls.screen_size,
"player_screen_width": cls.screen_size,
"max_ep_using_expert_actions": cls.max_ep_using_expert_actions,
"visible_agents": cls.visible_agents,
"include_depth_frame": cls.include_depth_frame,
"object_initial_height": 1.3,
"headless": cls.headless,
"max_distance_from_object": 0.76,
"max_episode_length": 500,
"episode_args": {
"include_move_obj_actions": cls.include_move_obj_actions,
"first_correct_coord_reward": 0.0,
"exploration_bonus": 0.0,
"failed_action_penalty": -0.02,
"step_penalty": -0.01,
"joint_pass_penalty": -0.09,
"moved_closer_reward": 1.0,
"min_dist_to_to_object": 0.26,
"reached_target_reward": 1.0,
"return_likely_successfuly_move_actions": cls.return_likely_successfuly_move_actions,
"frame_type": "fast-egocentric-relative-tensor",
"pass_conditioned_coordination": True,
},
}
return init_train_params
@classmethod
def get_init_valid_params(cls):
init_valid_params = {
**cls.get_init_train_params(),
"scenes": cls.valid_scenes,
"player_screen_height": 224,
"player_screen_width": 224,
"headless": False,
}
if cls.save_talk_reply_probs_path is not None:
init_valid_params[
"save_talk_reply_probs_path"
] = cls.save_talk_reply_probs_path
return init_valid_params
def __init__(self):
self._init_train_agent = self.episode_sampler_class(
**self.get_init_train_params()
)
self._init_test_agent = self.episode_sampler_class(
**self.get_init_valid_params()
)
@classmethod
def create_model(cls, **kwargs) -> nn.Module:
return A3CLSTMNStepComCoordinatedActionsEgoGridsEmbedCNN(
num_inputs=9,
action_groups=cls.episode_class.class_available_action_groups(
include_move_obj_actions=cls.include_move_obj_actions
),
num_agents=cls.num_agents,
state_repr_length=cls.state_repr_length,
occupancy_embed_length=8,
talk_embed_length=cls.talk_embed_length,
agent_num_embed_length=cls.agent_num_embed_length,
reply_embed_length=cls.reply_embed_length,
turn_off_communication=cls.turn_off_communication,
coordinate_actions=cls.coordinate_actions,
coordinate_actions_dim=13 if cls.coordinate_actions else None,
separate_actor_weights=False,
num_talk_symbols=cls.num_talk_symbols,
num_reply_symbols=cls.num_reply_symbols,
)
# IF WE WANTED CENTRAL
# return A3CLSTMCentralEgoGridsEmbedCNN(
# num_inputs_per_agent=10,
# action_groups=cls.episode_class.class_available_action_groups(
# include_move_obj_actions=cls.include_move_obj_actions
# ),
# num_agents=cls.num_agents,
# state_repr_length=cls.state_repr_length,
# occupancy_embed_length=8,
# )
@classmethod
def create_agent(cls, **kwargs) -> MultiAgent:
return cls.agent_class(
model=kwargs["model"],
gpu_id=kwargs["gpu_id"],
include_test_eval_results=cls.include_test_eval_results,
use_a3c_loss_when_not_expert_forcing=cls.use_a3c_loss_when_not_expert_forcing,
record_all_in_test=cls.record_all_in_test,
include_depth_frame=cls.include_depth_frame,
)
@property
def init_train_agent(self) -> Callable:
return self._init_train_agent
@property
def init_test_agent(self) -> Callable:
return self._init_test_agent
@property
def saved_model_path(self) -> Optional[str]:
return None
# return "trained_models/furnmove_a3c_noncentral_ego_grids_ego_actions_uncoordinated_discrete_com_fast_relative_v2_pass_1000000_2019-11-02_14-28-33.dat"
def get_experiment():
return FurnMoveExperimentConfig()
| cordial-sync-master | rl_multi_agent/experiments/furnmove_grid_marginalnocomm_base_config.py |
from typing import Callable, Optional
import torch
from torch import nn
import constants
from rl_multi_agent import MultiAgent
from rl_multi_agent.experiments.experiment import ExperimentConfig
from rl_multi_agent.furnmove_episode_samplers import FurnMoveEpisodeSampler
from rl_multi_agent.furnmove_episodes import FurnMoveEgocentricEpisode
from rl_multi_agent.models import A3CLSTMCentralEgoVision
class FurnMoveExperimentConfig(ExperimentConfig):
"""
All scenes + 1.0 reward for getting closer to the target
With Failed coordination loss
"""
# Env/episode config
num_agents = 2
screen_size = 84
episode_class = FurnMoveEgocentricEpisode
frame_type = "image"
episode_sampler_class = FurnMoveEpisodeSampler
visible_agents = True
include_depth_frame = False
include_move_obj_actions = True
headless = False
# Model config
state_repr_length = 512
# Agent config
agent_class = MultiAgent
# Training config
max_ep_using_expert_actions = 0
train_scenes = constants.TRAIN_SCENE_NAMES[20:40]
valid_scenes = constants.VALID_SCENE_NAMES[5:10]
use_a3c_loss_when_not_expert_forcing = True
# Misc (e.g. visualization)
record_all_in_test = False
save_talk_reply_probs_path = None
include_test_eval_results = not torch.cuda.is_available()
return_likely_successfuly_move_actions = False
@classmethod
def get_init_train_params(cls):
init_train_params = {
"scenes": cls.train_scenes,
"num_agents": cls.num_agents,
"object_type": "Television",
"to_object_type": "Dresser",
"episode_class": cls.episode_class,
"player_screen_height": cls.screen_size,
"player_screen_width": cls.screen_size,
"max_ep_using_expert_actions": cls.max_ep_using_expert_actions,
"visible_agents": cls.visible_agents,
"include_depth_frame": cls.include_depth_frame,
"object_initial_height": 1.3,
"headless": cls.headless,
"max_distance_from_object": 0.76,
"max_episode_length": 500,
"episode_args": {
"include_move_obj_actions": cls.include_move_obj_actions,
"first_correct_coord_reward": 0.0,
"exploration_bonus": 0.0,
"failed_action_penalty": -0.02,
"step_penalty": -0.01,
"joint_pass_penalty": -0.09,
"moved_closer_reward": 1.0,
"min_dist_to_to_object": 0.26,
"frame_type": cls.frame_type,
"reached_target_reward": 1.0,
"return_likely_successfuly_move_actions": cls.return_likely_successfuly_move_actions,
"pass_conditioned_coordination": True,
},
}
return init_train_params
@classmethod
def get_init_valid_params(cls):
init_valid_params = {
**cls.get_init_train_params(),
"scenes": cls.valid_scenes,
"player_screen_height": 224,
"player_screen_width": 224,
"headless": False,
}
if cls.save_talk_reply_probs_path is not None:
init_valid_params[
"save_talk_reply_probs_path"
] = cls.save_talk_reply_probs_path
return init_valid_params
def __init__(self):
self._init_train_agent = self.episode_sampler_class(
**self.get_init_train_params()
)
self._init_test_agent = self.episode_sampler_class(
**self.get_init_valid_params()
)
@classmethod
def create_model(cls, **kwargs) -> nn.Module:
return A3CLSTMCentralEgoVision(
num_inputs_per_agent=3 + 1 * cls.include_depth_frame,
action_groups=cls.episode_class.class_available_action_groups(
include_move_obj_actions=cls.include_move_obj_actions
),
num_agents=cls.num_agents,
state_repr_length=cls.state_repr_length,
)
@classmethod
def create_agent(cls, **kwargs) -> MultiAgent:
return cls.agent_class(
model=kwargs["model"],
gpu_id=kwargs["gpu_id"],
include_test_eval_results=cls.include_test_eval_results,
use_a3c_loss_when_not_expert_forcing=cls.use_a3c_loss_when_not_expert_forcing,
record_all_in_test=cls.record_all_in_test,
include_depth_frame=cls.include_depth_frame,
resize_image_as=cls.screen_size,
discourage_failed_coordination=5000,
)
@property
def init_train_agent(self) -> Callable:
return self._init_train_agent
@property
def init_test_agent(self) -> Callable:
return self._init_test_agent
@property
def saved_model_path(self) -> Optional[str]:
return None
def get_experiment():
return FurnMoveExperimentConfig()
| cordial-sync-master | rl_multi_agent/experiments/furnmove_vision_bigcentral_base_config.py |
from typing import Callable, Optional
import torch
from torch import nn
import constants
from rl_multi_agent import MultiAgent
from rl_multi_agent.experiments.experiment import ExperimentConfig
from rl_multi_agent.furnmove_episode_samplers import FurnMoveEpisodeSampler
from rl_multi_agent.furnmove_episodes import FurnMoveEgocentricEpisode
from rl_multi_agent.models import A3CLSTMNStepComCoordinatedActionsEgoVision
class FurnMoveMixtureVision3AgentsExperimentConfig(ExperimentConfig):
# Env/episode config
num_agents = 3
screen_size = 84
episode_class = FurnMoveEgocentricEpisode
frame_type = "image"
episode_sampler_class = FurnMoveEpisodeSampler
visible_agents = True
include_depth_frame = False
include_move_obj_actions = True
headless = False
# Model config
state_repr_length = 512
talk_embed_length = 16
reply_embed_length = 16
agent_num_embed_length = 8
num_talk_symbols = 2
num_reply_symbols = 2
coordinate_actions = True
# Agent config
agent_class = MultiAgent
turn_off_communication = False
discourage_failed_coordination = 5000
# Training config
max_ep_using_expert_actions = 0
train_scenes = constants.TRAIN_SCENE_NAMES[20:40]
valid_scenes = constants.VALID_SCENE_NAMES[5:10]
use_a3c_loss_when_not_expert_forcing = True
# Misc (e.g. visualization)
record_all_in_test = False
save_talk_reply_probs_path = None
include_test_eval_results = not torch.cuda.is_available()
return_likely_successfuly_move_actions = False
@classmethod
def get_init_train_params(cls):
init_train_params = {
"scenes": cls.train_scenes,
"num_agents": cls.num_agents,
"object_type": "Television",
"to_object_type": "Dresser",
"episode_class": cls.episode_class,
"player_screen_height": cls.screen_size,
"player_screen_width": cls.screen_size,
"max_ep_using_expert_actions": cls.max_ep_using_expert_actions,
"visible_agents": cls.visible_agents,
"include_depth_frame": cls.include_depth_frame,
"object_initial_height": 1.3,
"headless": cls.headless,
"max_distance_from_object": 0.76,
"max_episode_length": 250 * cls.num_agents,
"episode_args": {
"include_move_obj_actions": cls.include_move_obj_actions,
"first_correct_coord_reward": 0.0,
"exploration_bonus": 0.0,
"failed_action_penalty": -0.02,
"step_penalty": -0.01,
"joint_pass_penalty": -0.09,
"moved_closer_reward": 2.0,
"min_dist_to_to_object": 0.26,
"frame_type": cls.frame_type,
"reached_target_reward": 2.0,
"return_likely_successfuly_move_actions": cls.return_likely_successfuly_move_actions,
"pass_conditioned_coordination": True,
},
}
return init_train_params
@classmethod
def get_init_valid_params(cls):
init_valid_params = {
**cls.get_init_train_params(),
"scenes": cls.valid_scenes,
"player_screen_height": 224,
"player_screen_width": 224,
"headless": False,
}
if cls.save_talk_reply_probs_path is not None:
init_valid_params[
"save_talk_reply_probs_path"
] = cls.save_talk_reply_probs_path
return init_valid_params
def __init__(self):
self._init_train_agent = self.episode_sampler_class(
**self.get_init_train_params()
)
self._init_test_agent = self.episode_sampler_class(
**self.get_init_valid_params()
)
@classmethod
def create_model(cls, **kwargs) -> nn.Module:
return A3CLSTMNStepComCoordinatedActionsEgoVision(
num_inputs_per_agent=3 + 1 * cls.include_depth_frame,
action_groups=cls.episode_class.class_available_action_groups(
include_move_obj_actions=cls.include_move_obj_actions
),
num_agents=cls.num_agents,
state_repr_length=cls.state_repr_length,
talk_embed_length=cls.talk_embed_length,
agent_num_embed_length=cls.agent_num_embed_length,
reply_embed_length=cls.reply_embed_length,
num_talk_symbols=cls.num_talk_symbols,
num_reply_symbols=cls.num_reply_symbols,
turn_off_communication=cls.turn_off_communication,
coordinate_actions=cls.coordinate_actions,
coordinate_actions_dim=13 if cls.coordinate_actions else None,
separate_actor_weights=False,
)
@classmethod
def create_agent(cls, **kwargs) -> MultiAgent:
return cls.agent_class(
model=kwargs["model"],
gpu_id=kwargs["gpu_id"],
include_test_eval_results=cls.include_test_eval_results,
use_a3c_loss_when_not_expert_forcing=cls.use_a3c_loss_when_not_expert_forcing,
record_all_in_test=cls.record_all_in_test,
include_depth_frame=cls.include_depth_frame,
resize_image_as=cls.screen_size,
discourage_failed_coordination=cls.discourage_failed_coordination,
)
@property
def init_train_agent(self) -> Callable:
return self._init_train_agent
@property
def init_test_agent(self) -> Callable:
return self._init_test_agent
@property
def saved_model_path(cls) -> Optional[str]:
return None
def get_experiment():
return FurnMoveMixtureVision3AgentsExperimentConfig()
| cordial-sync-master | rl_multi_agent/experiments/furnmove_vision_mixture_3agents_config.py |
from typing import Optional
from torch import nn
from rl_multi_agent.experiments.furnmove_grid_marginalnocomm_base_config import (
FurnMoveExperimentConfig,
)
from rl_multi_agent.models import A3CLSTMNStepComCoordinatedActionsEgoGridsEmbedCNN
class FurnMoveGridExperimentConfig(FurnMoveExperimentConfig):
# Increasing the params of marginal to match mixture
final_cnn_channels = 288
@classmethod
def get_init_train_params(cls):
init_train_params = FurnMoveExperimentConfig.get_init_train_params()
init_train_params["environment_args"] = {"min_steps_between_agents": 2}
return init_train_params
@property
def saved_model_path(self) -> Optional[str]:
return None
@classmethod
def create_model(cls, **kwargs) -> nn.Module:
def _create_model(**kwargs):
return A3CLSTMNStepComCoordinatedActionsEgoGridsEmbedCNN(
**{
**dict(
num_inputs=9,
action_groups=cls.episode_class.class_available_action_groups(
include_move_obj_actions=cls.include_move_obj_actions
),
num_agents=cls.num_agents,
state_repr_length=cls.state_repr_length,
occupancy_embed_length=8,
talk_embed_length=cls.talk_embed_length,
agent_num_embed_length=cls.agent_num_embed_length,
reply_embed_length=cls.reply_embed_length,
turn_off_communication=cls.turn_off_communication,
coordinate_actions=cls.coordinate_actions,
coordinate_actions_dim=13 if cls.coordinate_actions else None,
separate_actor_weights=False,
num_talk_symbols=cls.num_talk_symbols,
num_reply_symbols=cls.num_reply_symbols,
final_cnn_channels=cls.final_cnn_channels,
),
**kwargs,
}
)
return _create_model(**kwargs)
def get_experiment():
return FurnMoveGridExperimentConfig()
| cordial-sync-master | rl_multi_agent/experiments/furnmove_grid_marginalnocomm_nocl_rot_config.py |
from torch import nn
from rl_multi_agent import MultiAgent
from rl_multi_agent.experiments.furnlift_base_config import FurnLiftBaseConfig
from rl_multi_agent.experiments.furnlift_vision_mixture_cl_config import (
FurnLiftMinDistMixtureConfig,
)
from rl_multi_agent.models import BigA3CLSTMNStepComCoordinatedActionsEgoVision
from utils.net_util import (
binary_search_for_model_with_least_upper_bound_parameters,
count_parameters,
)
class FurnLiftMinDistUncoordinatedExperimentConfig(FurnLiftBaseConfig):
# Env/episode config
min_dist_between_agents_to_pickup = 8
visible_agents = True
# Model config
turn_off_communication = False
# Agent config
agent_class = MultiAgent
# Coordination
coordinate_actions = False # Marginal
# Balancing
final_cnn_channels = 64
increased_params_for_balancing_marginal = True
balance_against_model_function = FurnLiftMinDistMixtureConfig.create_model
@classmethod
def create_model(cls, **kwargs) -> nn.Module:
def _create_model(**kwargs):
return BigA3CLSTMNStepComCoordinatedActionsEgoVision(
**{
**dict(
num_inputs_per_agent=3 + 1 * (cls.include_depth_frame),
action_groups=cls.episode_class.class_available_action_groups(),
num_agents=cls.num_agents,
state_repr_length=cls.state_repr_length,
talk_embed_length=cls.talk_embed_length,
agent_num_embed_length=cls.agent_num_embed_length,
reply_embed_length=cls.reply_embed_length,
num_talk_symbols=cls.num_talk_symbols,
num_reply_symbols=cls.num_reply_symbols,
turn_off_communication=cls.turn_off_communication,
coordinate_actions=cls.coordinate_actions,
separate_actor_weights=False,
final_cnn_channels=cls.final_cnn_channels,
),
**kwargs,
}
)
if cls.increased_params_for_balancing_marginal:
final_cnn_channels = binary_search_for_model_with_least_upper_bound_parameters(
target_parameter_count=count_parameters(
cls.balance_against_model_function()
),
create_model_func=lambda x: _create_model(final_cnn_channels=x),
lower=cls.final_cnn_channels,
upper=2 * cls.final_cnn_channels,
)
kwargs["final_cnn_channels"] = final_cnn_channels
return _create_model(**kwargs)
else:
return _create_model(**kwargs)
def get_experiment():
return FurnLiftMinDistUncoordinatedExperimentConfig()
| cordial-sync-master | rl_multi_agent/experiments/furnlift_vision_marginal_nocl_config.py |
from typing import Callable, Optional
import torch
from torch import nn
import constants
from rl_multi_agent import MultiAgent
from rl_multi_agent.experiments.experiment import ExperimentConfig
from rl_multi_agent.furnmove_episode_samplers import FurnMoveGridEpisodeSampler
from rl_multi_agent.furnmove_episodes import FurnMoveEgocentricFastGridEpisode
from rl_multi_agent.models import A3CLSTMNStepComCoordinatedActionsEgoGridsEmbedCNN
class FurnMoveExperimentConfig(ExperimentConfig):
# Env/episode config
num_agents = 2
screen_size = 84
episode_class = FurnMoveEgocentricFastGridEpisode
episode_sampler_class = FurnMoveGridEpisodeSampler
visible_agents = True
include_depth_frame = False
include_move_obj_actions = True
headless = True if torch.cuda.is_available() else False
# Model config
state_repr_length = 512
talk_embed_length = 16
reply_embed_length = 16
agent_num_embed_length = 8
coordinate_actions = False
num_talk_symbols = 2
num_reply_symbols = 2
# Agent config
agent_class = MultiAgent
turn_off_communication = False
# Training config
max_ep_using_expert_actions = 0
train_scenes = constants.TRAIN_SCENE_NAMES[20:40]
valid_scenes = constants.VALID_SCENE_NAMES[5:10]
use_a3c_loss_when_not_expert_forcing = True
# Misc (e.g. visualization)
record_all_in_test = False
save_talk_reply_probs_path = None
include_test_eval_results = not torch.cuda.is_available()
return_likely_successfuly_move_actions = False
@classmethod
def get_init_train_params(cls):
init_train_params = {
"scenes": cls.train_scenes,
"num_agents": cls.num_agents,
"object_type": "Television",
"to_object_type": "Dresser",
"to_object_silhouette": constants.DRESSER_SILHOUETTE_STRING,
"episode_class": cls.episode_class,
"player_screen_height": cls.screen_size,
"player_screen_width": cls.screen_size,
"max_ep_using_expert_actions": cls.max_ep_using_expert_actions,
"visible_agents": cls.visible_agents,
"include_depth_frame": cls.include_depth_frame,
"object_initial_height": 1.3,
"headless": cls.headless,
"max_distance_from_object": 0.76,
"max_episode_length": 500,
"episode_args": {
"include_move_obj_actions": cls.include_move_obj_actions,
"first_correct_coord_reward": 0.0,
"exploration_bonus": 0.0,
"failed_action_penalty": -0.02,
"step_penalty": -0.01,
"joint_pass_penalty": -0.09,
"moved_closer_reward": 1.0,
"min_dist_to_to_object": 0.26,
"reached_target_reward": 1.0,
"return_likely_successfuly_move_actions": cls.return_likely_successfuly_move_actions,
"frame_type": "fast-egocentric-relative-tensor",
"pass_conditioned_coordination": True,
},
}
return init_train_params
@classmethod
def get_init_valid_params(cls):
init_valid_params = {
**cls.get_init_train_params(),
"scenes": cls.valid_scenes,
"player_screen_height": 224,
"player_screen_width": 224,
"headless": False,
}
if cls.save_talk_reply_probs_path is not None:
init_valid_params[
"save_talk_reply_probs_path"
] = cls.save_talk_reply_probs_path
return init_valid_params
def __init__(self):
self._init_train_agent = self.episode_sampler_class(
**self.get_init_train_params()
)
self._init_test_agent = self.episode_sampler_class(
**self.get_init_valid_params()
)
@classmethod
def create_model(cls, **kwargs) -> nn.Module:
return A3CLSTMNStepComCoordinatedActionsEgoGridsEmbedCNN(
num_inputs=9,
action_groups=cls.episode_class.class_available_action_groups(
include_move_obj_actions=cls.include_move_obj_actions
),
num_agents=cls.num_agents,
state_repr_length=cls.state_repr_length,
occupancy_embed_length=8,
talk_embed_length=cls.talk_embed_length,
agent_num_embed_length=cls.agent_num_embed_length,
reply_embed_length=cls.reply_embed_length,
turn_off_communication=cls.turn_off_communication,
coordinate_actions=cls.coordinate_actions,
coordinate_actions_dim=13 if cls.coordinate_actions else None,
separate_actor_weights=False,
num_talk_symbols=cls.num_talk_symbols,
num_reply_symbols=cls.num_reply_symbols,
)
@classmethod
def create_agent(cls, **kwargs) -> MultiAgent:
return cls.agent_class(
model=kwargs["model"],
gpu_id=kwargs["gpu_id"],
include_test_eval_results=cls.include_test_eval_results,
use_a3c_loss_when_not_expert_forcing=cls.use_a3c_loss_when_not_expert_forcing,
record_all_in_test=cls.record_all_in_test,
include_depth_frame=cls.include_depth_frame,
discourage_failed_coordination=5000,
)
@property
def init_train_agent(self) -> Callable:
return self._init_train_agent
@property
def init_test_agent(self) -> Callable:
return self._init_test_agent
@property
def saved_model_path(self) -> Optional[str]:
return None
def get_experiment():
return FurnMoveExperimentConfig()
| cordial-sync-master | rl_multi_agent/experiments/furnmove_grid_marginal_cl_base_config.py |
cordial-sync-master | rl_multi_agent/experiments/__init__.py |
|
from torch import nn
from rl_multi_agent import MultiAgent
from rl_multi_agent.experiments.furnlift_base_config import FurnLiftBaseConfig
from rl_multi_agent.experiments.furnlift_vision_mixture_cl_config import (
FurnLiftMinDistMixtureConfig,
)
from rl_multi_agent.models import BigA3CLSTMNStepComCoordinatedActionsEgoVision
from utils.net_util import (
binary_search_for_model_with_least_upper_bound_parameters,
count_parameters,
)
class FurnLiftMinDistUncoordinatedNocommExperimentConfig(FurnLiftBaseConfig):
# Env/episode config
min_dist_between_agents_to_pickup = 8
visible_agents = True
# Model config
turn_off_communication = True
# Agent config
agent_class = MultiAgent
# Coordination
coordinate_actions = False # Marginal
# Balancing
final_cnn_channels = 64
increased_params_for_balancing_marginal = True
balance_against_model_function = FurnLiftMinDistMixtureConfig.create_model
@classmethod
def create_model(cls, **kwargs) -> nn.Module:
def _create_model(**kwargs):
return BigA3CLSTMNStepComCoordinatedActionsEgoVision(
**{
**dict(
num_inputs_per_agent=3 + 1 * (cls.include_depth_frame),
action_groups=cls.episode_class.class_available_action_groups(),
num_agents=cls.num_agents,
state_repr_length=cls.state_repr_length,
talk_embed_length=cls.talk_embed_length,
agent_num_embed_length=cls.agent_num_embed_length,
reply_embed_length=cls.reply_embed_length,
num_talk_symbols=cls.num_talk_symbols,
num_reply_symbols=cls.num_reply_symbols,
turn_off_communication=cls.turn_off_communication,
coordinate_actions=cls.coordinate_actions,
separate_actor_weights=False,
final_cnn_channels=cls.final_cnn_channels,
),
**kwargs,
}
)
if cls.increased_params_for_balancing_marginal:
final_cnn_channels = binary_search_for_model_with_least_upper_bound_parameters(
target_parameter_count=count_parameters(
cls.balance_against_model_function()
),
create_model_func=lambda x: _create_model(final_cnn_channels=x),
lower=cls.final_cnn_channels,
upper=2 * cls.final_cnn_channels,
)
kwargs["final_cnn_channels"] = final_cnn_channels
return _create_model(**kwargs)
else:
return _create_model(**kwargs)
def get_experiment():
return FurnLiftMinDistUncoordinatedNocommExperimentConfig()
| cordial-sync-master | rl_multi_agent/experiments/furnlift_vision_marginalnocomm_nocl_config.py |
from typing import Optional
from torch import nn
import constants
from rl_multi_agent.experiments.furnmove_grid_mixture_base_config import (
FurnMoveExperimentConfig,
)
from rl_multi_agent.models import (
A3CLSTMBigCentralBackboneAndCoordinatedActionsEgoGridsEmbedCNN,
)
class FurnMoveGridExperimentConfig(FurnMoveExperimentConfig):
@classmethod
def get_init_train_params(cls):
init_train_params = {
"scenes": cls.train_scenes,
"num_agents": cls.num_agents,
"object_type": "Television",
"to_object_type": "Dresser",
"to_object_silhouette": constants.DRESSER_SILHOUETTE_STRING,
"episode_class": cls.episode_class,
"player_screen_height": cls.screen_size,
"player_screen_width": cls.screen_size,
"max_ep_using_expert_actions": cls.max_ep_using_expert_actions,
"visible_agents": cls.visible_agents,
"include_depth_frame": cls.include_depth_frame,
"object_initial_height": 1.3,
"headless": cls.headless,
"max_distance_from_object": 0.76,
"max_episode_length": 500,
"environment_args": {"min_steps_between_agents": 2},
"episode_args": {
"include_move_obj_actions": cls.include_move_obj_actions,
"first_correct_coord_reward": 0.0,
"exploration_bonus": 0.0,
"failed_action_penalty": -0.02,
"step_penalty": -0.01,
"joint_pass_penalty": -0.09,
"moved_closer_reward": 1.0,
"min_dist_to_to_object": 0.26,
"reached_target_reward": 1.0,
"return_likely_successfuly_move_actions": cls.return_likely_successfuly_move_actions,
"frame_type": "fast-egocentric-relative-tensor",
"pass_conditioned_coordination": True,
},
}
return init_train_params
@classmethod
def create_model(cls, **kwargs) -> nn.Module:
return A3CLSTMBigCentralBackboneAndCoordinatedActionsEgoGridsEmbedCNN(
num_inputs_per_agent=9,
action_groups=cls.episode_class.class_available_action_groups(
include_move_obj_actions=cls.include_move_obj_actions
),
num_agents=cls.num_agents,
state_repr_length=cls.state_repr_length,
occupancy_embed_length=8,
coordinate_actions=cls.coordinate_actions,
coordinate_actions_dim=13 if cls.coordinate_actions else None,
)
@property
def saved_model_path(self) -> Optional[str]:
return None
def get_experiment():
return FurnMoveGridExperimentConfig()
| cordial-sync-master | rl_multi_agent/experiments/furnmove_grid_bigcentralmixture_cl_rot_config.py |
from rl_multi_agent.experiments.furnmove_grid_mixture_3agents_config import (
FurnMove3AgentMixtureExperimentConfig as FurnMoveCoordinatedMultiAgentConfig,
)
class FurnMove3AgentUncoordinatedExperimentConfig(FurnMoveCoordinatedMultiAgentConfig):
coordinate_actions = False
discourage_failed_coordination = False
def get_experiment():
return FurnMove3AgentUncoordinatedExperimentConfig()
| cordial-sync-master | rl_multi_agent/experiments/furnmove_grid_marginal_3agents_config.py |
from typing import Optional
from torch import nn
import constants
from rl_multi_agent.experiments.furnmove_grid_mixture_base_config import (
FurnMoveExperimentConfig,
)
from rl_multi_agent.furnmove_episodes import (
FurnMoveEgocentricNoRotationsFastGridEpisode,
)
from rl_multi_agent.models import A3CLSTMNStepComCoordinatedActionsEgoGridsEmbedCNN
class FurnMoveMixtureNoRotationsExperimentConfig(FurnMoveExperimentConfig):
episode_class = FurnMoveEgocentricNoRotationsFastGridEpisode
coordinate_actions_dim = 14 # Number of actions in NoRotationsFastGridEpisode
@classmethod
def get_init_train_params(cls):
init_train_params = {
"scenes": cls.train_scenes,
"num_agents": cls.num_agents,
"object_type": "Television",
"to_object_type": "Dresser",
"to_object_silhouette": constants.DRESSER_SILHOUETTE_STRING,
"episode_class": cls.episode_class,
"player_screen_height": cls.screen_size,
"player_screen_width": cls.screen_size,
"max_ep_using_expert_actions": cls.max_ep_using_expert_actions,
"visible_agents": cls.visible_agents,
"include_depth_frame": cls.include_depth_frame,
"object_initial_height": 1.3,
"headless": cls.headless,
"max_distance_from_object": 0.76,
"max_episode_length": 500,
"environment_args": {"min_steps_between_agents": 2},
"episode_args": {
"include_move_obj_actions": cls.include_move_obj_actions,
"first_correct_coord_reward": 0.0,
"exploration_bonus": 0.0,
"failed_action_penalty": -0.02,
"step_penalty": -0.01,
"joint_pass_penalty": -0.09,
"moved_closer_reward": 1.0,
"min_dist_to_to_object": 0.26,
"reached_target_reward": 1.0,
"return_likely_successfuly_move_actions": cls.return_likely_successfuly_move_actions,
"frame_type": "fast-egocentric-relative-tensor",
"pass_conditioned_coordination": True,
},
}
return init_train_params
@classmethod
def create_model(cls, **kwargs) -> nn.Module:
return A3CLSTMNStepComCoordinatedActionsEgoGridsEmbedCNN(
num_inputs=9,
action_groups=cls.episode_class.class_available_action_groups(
include_move_obj_actions=cls.include_move_obj_actions
),
num_agents=cls.num_agents,
state_repr_length=cls.state_repr_length,
occupancy_embed_length=8,
talk_embed_length=cls.talk_embed_length,
agent_num_embed_length=cls.agent_num_embed_length,
reply_embed_length=cls.reply_embed_length,
turn_off_communication=cls.turn_off_communication,
coordinate_actions=cls.coordinate_actions,
coordinate_actions_dim=cls.coordinate_actions_dim
if cls.coordinate_actions
else None,
separate_actor_weights=False,
num_talk_symbols=cls.num_talk_symbols,
num_reply_symbols=cls.num_reply_symbols,
)
@property
def saved_model_path(self) -> Optional[str]:
return None
def get_experiment():
return FurnMoveMixtureNoRotationsExperimentConfig()
| cordial-sync-master | rl_multi_agent/experiments/furnmove_grid_mixture_cl_norot_config.py |
import abc
from typing import Dict, Callable, Any, List, Optional
import torch
from torch import nn
from rl_base.agent import RLAgent
from rl_multi_agent import MultiAgent
class ExperimentConfig(abc.ABC):
@classmethod
def create_model(cls, **kwargs) -> nn.Module:
raise NotImplementedError()
@classmethod
def create_agent(cls, **kwargs) -> MultiAgent:
raise NotImplementedError()
@property
def saved_model_path(self) -> Dict[str, str]:
raise NotImplementedError()
@property
def init_train_agent(self) -> Callable:
raise NotImplementedError()
@property
def init_test_agent(self) -> Callable:
raise NotImplementedError()
def create_episode_summary(
self,
agent: RLAgent,
additional_metrics: Dict[str, float],
step_results: List[Dict],
) -> Any:
"""Summarize an episode.
On a worker thread, generate an episode summary that is
passed to the :func:`save_episode_summary <ExperimentConfig.save_episode_summary>`
method of of experiment object on the Train/Test Manager main thread. This
can be used to summarize episodes and then save them for analysis.
:param agent: Agent from the worker thread.
:param additional_metrics: Mean of metrics, across the episode, saved in
agent.eval_results[i]["additional_metrics"] which is of type
Dict[str, float].
:param step_results: The step results from the episode.
:return: Data to be saved.
"""
return None
def save_episode_summary(self, data_to_save: Any):
raise RuntimeError("Attempting to save episode summary but do not know how.")
def create_episode_init_queue(
self, mp_module
) -> Optional[torch.multiprocessing.Queue]:
raise RuntimeError(
"Attempting to create an episode init queue but do not know how."
)
def stopping_criteria_reached(self):
return False
| cordial-sync-master | rl_multi_agent/experiments/experiment.py |
from abc import ABC
from typing import Callable, Optional
import torch
import constants
from rl_multi_agent import MultiAgent
from rl_multi_agent.experiments.experiment import ExperimentConfig
from rl_multi_agent.furnlift_episode_samplers import FurnLiftEpisodeSamplers
from rl_multi_agent.furnlift_episodes import FurnLiftNApartStateEpisode
class FurnLiftBaseConfig(ExperimentConfig, ABC):
# Env/episode config
num_agents = 2
screen_size = 84
episode_class = FurnLiftNApartStateEpisode
episode_sampler_class = FurnLiftEpisodeSamplers
min_dist_between_agents_to_pickup = None
visible_agents = None
include_depth_frame = False
# CVPR 2019 baselines allowed intersection of agent.
# With new capabilities in AI2Thor, we can switch on/off this option
allow_agents_to_intersect = True
# Model config
state_repr_length = 512
talk_embed_length = 8
reply_embed_length = 8
agent_num_embed_length = 8
num_talk_symbols = 2
num_reply_symbols = 2
# Agent config
agent_class = None
turn_off_communication = None
# Training config
max_ep_using_expert_actions = 10000
dagger_mode = True
train_scenes = constants.TRAIN_SCENE_NAMES[20:40]
valid_scenes = constants.VALID_SCENE_NAMES[5:10]
use_a3c_loss_when_not_expert_forcing = True
# Misc (e.g. visualization)
record_all_in_test = False
save_talk_reply_probs_path = None
include_test_eval_results = not torch.cuda.is_available()
# Balancing params
increased_params_for_balancing_marginal = False
@classmethod
def get_init_train_params(cls):
init_train_params = {
"scenes": cls.train_scenes,
"num_agents": cls.num_agents,
"object_type": "Television",
"episode_class": cls.episode_class,
"player_screen_height": cls.screen_size,
"player_screen_width": cls.screen_size,
"min_dist_between_agents_to_pickup": cls.min_dist_between_agents_to_pickup,
"max_ep_using_expert_actions": cls.max_ep_using_expert_actions,
"visible_agents": cls.visible_agents,
"include_depth_frame": cls.include_depth_frame,
"allow_agents_to_intersect": cls.allow_agents_to_intersect,
}
return init_train_params
@classmethod
def get_init_valid_params(cls):
init_valid_params = {**cls.get_init_train_params(), "scenes": cls.valid_scenes}
if cls.save_talk_reply_probs_path is not None:
init_valid_params[
"save_talk_reply_probs_path"
] = cls.save_talk_reply_probs_path
return init_valid_params
def __init__(self):
self._init_train_agent = self.episode_sampler_class(
**self.get_init_train_params()
)
self._init_test_agent = self.episode_sampler_class(
**self.get_init_valid_params()
)
@classmethod
def create_agent(cls, **kwargs) -> MultiAgent:
return cls.agent_class(
model=kwargs["model"],
gpu_id=kwargs["gpu_id"],
include_test_eval_results=cls.include_test_eval_results,
use_a3c_loss_when_not_expert_forcing=cls.use_a3c_loss_when_not_expert_forcing,
record_all_in_test=cls.record_all_in_test,
include_depth_frame=cls.include_depth_frame,
resize_image_as=cls.screen_size,
dagger_mode=cls.dagger_mode,
)
@property
def init_train_agent(self) -> Callable:
return self._init_train_agent
@property
def init_test_agent(self) -> Callable:
return self._init_test_agent
@property
def saved_model_path(self) -> Optional[str]:
return None
| cordial-sync-master | rl_multi_agent/experiments/furnlift_base_config.py |
from torch import nn
from rl_multi_agent.experiments.furnmove_vision_mixture_3agents_config import (
FurnMoveMixtureVision3AgentsExperimentConfig,
)
from rl_multi_agent.models import A3CLSTMNStepComCoordinatedActionsEgoVision
from utils.net_util import (
binary_search_for_model_with_least_upper_bound_parameters,
count_parameters,
)
class FurnMoveVision3AgentUncoordinatedExperimentConfig(
FurnMoveMixtureVision3AgentsExperimentConfig
):
coordinate_actions = False
discourage_failed_coordination = False
# Balancing
final_cnn_channels = 128
increased_params_for_balancing_marginal = True
balance_against_model_function = (
FurnMoveMixtureVision3AgentsExperimentConfig.create_model
)
@classmethod
def create_model(cls, **kwargs) -> nn.Module:
def _create_model(**kwargs):
return A3CLSTMNStepComCoordinatedActionsEgoVision(
**{
**dict(
num_inputs_per_agent=3 + 1 * (cls.include_depth_frame),
action_groups=cls.episode_class.class_available_action_groups(
include_move_obj_actions=cls.include_move_obj_actions
),
num_agents=cls.num_agents,
state_repr_length=cls.state_repr_length,
talk_embed_length=cls.talk_embed_length,
agent_num_embed_length=cls.agent_num_embed_length,
reply_embed_length=cls.reply_embed_length,
num_talk_symbols=cls.num_talk_symbols,
num_reply_symbols=cls.num_reply_symbols,
turn_off_communication=cls.turn_off_communication,
coordinate_actions=cls.coordinate_actions,
coordinate_actions_dim=13 if cls.coordinate_actions else None,
separate_actor_weights=False,
final_cnn_channels=cls.final_cnn_channels,
),
**kwargs,
}
)
if cls.increased_params_for_balancing_marginal:
final_cnn_channels = binary_search_for_model_with_least_upper_bound_parameters(
target_parameter_count=count_parameters(
cls.balance_against_model_function()
),
create_model_func=lambda x: _create_model(final_cnn_channels=x),
lower=cls.final_cnn_channels,
upper=2 * cls.final_cnn_channels,
)
kwargs["final_cnn_channels"] = final_cnn_channels
return _create_model(**kwargs)
else:
return _create_model(**kwargs)
def get_experiment():
return FurnMoveVision3AgentUncoordinatedExperimentConfig()
| cordial-sync-master | rl_multi_agent/experiments/furnmove_vision_marginal_3agents_config.py |
from typing import Optional
from torch import nn
import constants
from rl_multi_agent import MultiAgent
from rl_multi_agent.experiments.furnmove_grid_mixture_base_config import (
FurnMoveExperimentConfig,
)
from rl_multi_agent.furnmove_episodes import (
FurnMoveEgocentricNoRotationsFastGridEpisode,
)
from rl_multi_agent.models import A3CLSTMNStepComCoordinatedActionsEgoGridsEmbedCNN
class FurnMoveMixtureNoRotationsNoCLExperimentConfig(FurnMoveExperimentConfig):
episode_class = FurnMoveEgocentricNoRotationsFastGridEpisode
coordinate_actions_dim = 14 # Number of actions in NoRotationsFastGridEpisode
@classmethod
def get_init_train_params(cls):
init_train_params = {
"scenes": cls.train_scenes,
"num_agents": cls.num_agents,
"object_type": "Television",
"to_object_type": "Dresser",
"to_object_silhouette": constants.DRESSER_SILHOUETTE_STRING,
"episode_class": cls.episode_class,
"player_screen_height": cls.screen_size,
"player_screen_width": cls.screen_size,
"max_ep_using_expert_actions": cls.max_ep_using_expert_actions,
"visible_agents": cls.visible_agents,
"include_depth_frame": cls.include_depth_frame,
"object_initial_height": 1.3,
"headless": cls.headless,
"max_distance_from_object": 0.76,
"max_episode_length": 500,
"environment_args": {"min_steps_between_agents": 2},
"episode_args": {
"include_move_obj_actions": cls.include_move_obj_actions,
"first_correct_coord_reward": 0.0,
"exploration_bonus": 0.0,
"failed_action_penalty": -0.02,
"step_penalty": -0.01,
"joint_pass_penalty": -0.09,
"moved_closer_reward": 1.0,
"min_dist_to_to_object": 0.26,
"reached_target_reward": 1.0,
"return_likely_successfuly_move_actions": cls.return_likely_successfuly_move_actions,
"frame_type": "fast-egocentric-relative-tensor",
"pass_conditioned_coordination": True,
},
}
return init_train_params
@classmethod
def create_model(cls, **kwargs) -> nn.Module:
return A3CLSTMNStepComCoordinatedActionsEgoGridsEmbedCNN(
num_inputs=9,
action_groups=cls.episode_class.class_available_action_groups(
include_move_obj_actions=cls.include_move_obj_actions
),
num_agents=cls.num_agents,
state_repr_length=cls.state_repr_length,
occupancy_embed_length=8,
talk_embed_length=cls.talk_embed_length,
agent_num_embed_length=cls.agent_num_embed_length,
reply_embed_length=cls.reply_embed_length,
turn_off_communication=cls.turn_off_communication,
coordinate_actions=cls.coordinate_actions,
coordinate_actions_dim=cls.coordinate_actions_dim
if cls.coordinate_actions
else None,
separate_actor_weights=False,
num_talk_symbols=cls.num_talk_symbols,
num_reply_symbols=cls.num_reply_symbols,
)
@classmethod
def create_agent(cls, **kwargs) -> MultiAgent:
return cls.agent_class(
model=kwargs["model"],
gpu_id=kwargs["gpu_id"],
include_test_eval_results=cls.include_test_eval_results,
use_a3c_loss_when_not_expert_forcing=cls.use_a3c_loss_when_not_expert_forcing,
record_all_in_test=cls.record_all_in_test,
include_depth_frame=cls.include_depth_frame,
discourage_failed_coordination=5000,
)
@property
def saved_model_path(self) -> Optional[str]:
return None
def get_experiment():
return FurnMoveMixtureNoRotationsNoCLExperimentConfig()
| cordial-sync-master | rl_multi_agent/experiments/furnmove_grid_mixture_nocl_norot_config.py |
from typing import Optional
import constants
from rl_multi_agent.experiments.furnmove_grid_marginal_cl_base_config import (
FurnMoveExperimentConfig,
)
from rl_multi_agent.furnmove_episodes import (
FurnMoveEgocentricNoRotationsFastGridEpisode,
)
class FurnMoveGridExperimentConfig(FurnMoveExperimentConfig):
episode_class = FurnMoveEgocentricNoRotationsFastGridEpisode
@classmethod
def get_init_train_params(cls):
init_train_params = {
"scenes": cls.train_scenes,
"num_agents": cls.num_agents,
"object_type": "Television",
"to_object_type": "Dresser",
"to_object_silhouette": constants.DRESSER_SILHOUETTE_STRING,
"episode_class": cls.episode_class,
"player_screen_height": cls.screen_size,
"player_screen_width": cls.screen_size,
"max_ep_using_expert_actions": cls.max_ep_using_expert_actions,
"visible_agents": cls.visible_agents,
"include_depth_frame": cls.include_depth_frame,
"object_initial_height": 1.3,
"headless": cls.headless,
"max_distance_from_object": 0.76,
"max_episode_length": 500,
"environment_args": {"min_steps_between_agents": 2},
"episode_args": {
"include_move_obj_actions": cls.include_move_obj_actions,
"first_correct_coord_reward": 0.0,
"exploration_bonus": 0.0,
"failed_action_penalty": -0.02,
"step_penalty": -0.01,
"joint_pass_penalty": -0.09,
"moved_closer_reward": 1.0,
"min_dist_to_to_object": 0.26,
"reached_target_reward": 1.0,
"return_likely_successfuly_move_actions": cls.return_likely_successfuly_move_actions,
"frame_type": "fast-egocentric-relative-tensor",
"pass_conditioned_coordination": True,
},
}
return init_train_params
@property
def saved_model_path(self) -> Optional[str]:
return None
def get_experiment():
return FurnMoveGridExperimentConfig()
| cordial-sync-master | rl_multi_agent/experiments/furnmove_grid_marginal_cl_norot_config.py |
from typing import Optional
from torch import nn
import constants
from rl_multi_agent import MultiAgent
from rl_multi_agent.experiments.furnmove_grid_bigcentral_base_config import (
FurnMoveExperimentConfig,
)
from rl_multi_agent.furnmove_episodes import (
FurnMoveEgocentricNoRotationsFastGridEpisode,
)
from rl_multi_agent.models import A3CLSTMBigCentralEgoGridsEmbedCNN
class FurnMoveNoRotationsExperimentConfig(FurnMoveExperimentConfig):
episode_class = FurnMoveEgocentricNoRotationsFastGridEpisode
@classmethod
def get_init_train_params(cls):
init_train_params = {
"scenes": cls.train_scenes,
"num_agents": cls.num_agents,
"object_type": "Television",
"to_object_type": "Dresser",
"to_object_silhouette": constants.DRESSER_SILHOUETTE_STRING,
"episode_class": cls.episode_class,
"player_screen_height": cls.screen_size,
"player_screen_width": cls.screen_size,
"max_ep_using_expert_actions": cls.max_ep_using_expert_actions,
"visible_agents": cls.visible_agents,
"include_depth_frame": cls.include_depth_frame,
"object_initial_height": 1.3,
"headless": cls.headless,
"max_distance_from_object": 0.76,
"max_episode_length": 500,
"environment_args": {"min_steps_between_agents": 2},
"episode_args": {
"include_move_obj_actions": cls.include_move_obj_actions,
"first_correct_coord_reward": 0.0,
"exploration_bonus": 0.0,
"failed_action_penalty": -0.02,
"step_penalty": -0.01,
# "increasing_rotate_penalty": True,
"joint_pass_penalty": -0.09,
"moved_closer_reward": 1.0,
# "moved_closer_reward": 0.50,
"min_dist_to_to_object": 0.26,
"frame_type": cls.frame_type,
"reached_target_reward": 1.0,
"return_likely_successfuly_move_actions": cls.return_likely_successfuly_move_actions,
"pass_conditioned_coordination": True,
},
}
return init_train_params
@classmethod
def create_model(cls, **kwargs) -> nn.Module:
return A3CLSTMBigCentralEgoGridsEmbedCNN(
num_inputs_per_agent=9,
action_groups=cls.episode_class.class_available_action_groups(
include_move_obj_actions=cls.include_move_obj_actions
),
num_agents=cls.num_agents,
state_repr_length=cls.state_repr_length,
occupancy_embed_length=8,
)
@classmethod
def create_agent(cls, **kwargs) -> MultiAgent:
return cls.agent_class(
model=kwargs["model"],
gpu_id=kwargs["gpu_id"],
include_test_eval_results=cls.include_test_eval_results,
use_a3c_loss_when_not_expert_forcing=cls.use_a3c_loss_when_not_expert_forcing,
record_all_in_test=cls.record_all_in_test,
include_depth_frame=cls.include_depth_frame,
# discourage_failed_coordination=5000,
)
@property
def saved_model_path(self) -> Optional[str]:
return None
def get_experiment():
return FurnMoveNoRotationsExperimentConfig()
| cordial-sync-master | rl_multi_agent/experiments/furnmove_grid_bigcentral_nocl_norot_config.py |
from typing import Callable, Optional
import torch
from torch import nn
import constants
from rl_multi_agent import MultiAgent
from rl_multi_agent.experiments.experiment import ExperimentConfig
from rl_multi_agent.furnmove_episode_samplers import FurnMoveGridEpisodeSampler
from rl_multi_agent.furnmove_episodes import FurnMoveEgocentricFastGridEpisode
from rl_multi_agent.models import A3CLSTMNStepComCoordinatedActionsEgoGridsEmbedCNN
class FurnMoveExperimentConfig(ExperimentConfig):
# Env/episode config
num_agents = 2
screen_size = 84
episode_class = FurnMoveEgocentricFastGridEpisode
episode_sampler_class = FurnMoveGridEpisodeSampler
visible_agents = True
include_depth_frame = False
include_move_obj_actions = True
headless = True if torch.cuda.is_available() else False
# Model config
state_repr_length = 512
talk_embed_length = 16
reply_embed_length = 16
agent_num_embed_length = 8
coordinate_actions = False
num_talk_symbols = 2
num_reply_symbols = 2
# Agent config
agent_class = MultiAgent
turn_off_communication = False
# Training config
max_ep_using_expert_actions = 0
train_scenes = constants.TRAIN_SCENE_NAMES[20:40]
valid_scenes = constants.VALID_SCENE_NAMES[5:10]
use_a3c_loss_when_not_expert_forcing = True
# Misc (e.g. visualization)
record_all_in_test = False
save_talk_reply_probs_path = None
include_test_eval_results = not torch.cuda.is_available()
return_likely_successfuly_move_actions = False
@classmethod
def get_init_train_params(cls):
init_train_params = {
"scenes": cls.train_scenes,
"num_agents": cls.num_agents,
"object_type": "Television",
"to_object_type": "Dresser",
"to_object_silhouette": constants.DRESSER_SILHOUETTE_STRING,
"episode_class": cls.episode_class,
"player_screen_height": cls.screen_size,
"player_screen_width": cls.screen_size,
"max_ep_using_expert_actions": cls.max_ep_using_expert_actions,
"visible_agents": cls.visible_agents,
"include_depth_frame": cls.include_depth_frame,
"object_initial_height": 1.3,
"headless": cls.headless,
"max_distance_from_object": 0.76,
"max_episode_length": 500,
"episode_args": {
"include_move_obj_actions": cls.include_move_obj_actions,
"first_correct_coord_reward": 0.0,
"exploration_bonus": 0.0,
"failed_action_penalty": -0.02,
"step_penalty": -0.01,
"joint_pass_penalty": -0.09,
"moved_closer_reward": 1.0,
"min_dist_to_to_object": 0.26,
"reached_target_reward": 1.0,
"return_likely_successfuly_move_actions": cls.return_likely_successfuly_move_actions,
"frame_type": "fast-egocentric-relative-tensor",
"pass_conditioned_coordination": True,
},
}
return init_train_params
@classmethod
def get_init_valid_params(cls):
init_valid_params = {
**cls.get_init_train_params(),
"scenes": cls.valid_scenes,
"player_screen_height": 224,
"player_screen_width": 224,
"headless": False,
}
if cls.save_talk_reply_probs_path is not None:
init_valid_params[
"save_talk_reply_probs_path"
] = cls.save_talk_reply_probs_path
return init_valid_params
def __init__(self):
self._init_train_agent = self.episode_sampler_class(
**self.get_init_train_params()
)
self._init_test_agent = self.episode_sampler_class(
**self.get_init_valid_params()
)
@classmethod
def create_model(cls, **kwargs) -> nn.Module:
return A3CLSTMNStepComCoordinatedActionsEgoGridsEmbedCNN(
num_inputs=9,
action_groups=cls.episode_class.class_available_action_groups(
include_move_obj_actions=cls.include_move_obj_actions
),
num_agents=cls.num_agents,
state_repr_length=cls.state_repr_length,
occupancy_embed_length=8,
talk_embed_length=cls.talk_embed_length,
agent_num_embed_length=cls.agent_num_embed_length,
reply_embed_length=cls.reply_embed_length,
turn_off_communication=cls.turn_off_communication,
coordinate_actions=cls.coordinate_actions,
coordinate_actions_dim=13 if cls.coordinate_actions else None,
separate_actor_weights=False,
num_talk_symbols=cls.num_talk_symbols,
num_reply_symbols=cls.num_reply_symbols,
)
# IF WE WANTED CENTRAL
# return A3CLSTMCentralEgoGridsEmbedCNN(
# num_inputs_per_agent=10,
# action_groups=cls.episode_class.class_available_action_groups(
# include_move_obj_actions=cls.include_move_obj_actions
# ),
# num_agents=cls.num_agents,
# state_repr_length=cls.state_repr_length,
# occupancy_embed_length=8,
# )
@classmethod
def create_agent(cls, **kwargs) -> MultiAgent:
return cls.agent_class(
model=kwargs["model"],
gpu_id=kwargs["gpu_id"],
include_test_eval_results=cls.include_test_eval_results,
use_a3c_loss_when_not_expert_forcing=cls.use_a3c_loss_when_not_expert_forcing,
record_all_in_test=cls.record_all_in_test,
include_depth_frame=cls.include_depth_frame,
)
@property
def init_train_agent(self) -> Callable:
return self._init_train_agent
@property
def init_test_agent(self) -> Callable:
return self._init_test_agent
@property
def saved_model_path(self) -> Optional[str]:
return None
# return "trained_models/furnmove_a3c_noncentral_ego_grids_ego_actions_uncoordinated_discrete_com_fast_relative_v2_pass_1000000_2019-11-02_14-28-33.dat"
def get_experiment():
return FurnMoveExperimentConfig()
| cordial-sync-master | rl_multi_agent/experiments/furnmove_grid_marginal_nocl_base_config.py |
from typing import Callable, Optional
import torch
from torch import nn
import constants
from rl_multi_agent import MultiAgent
from rl_multi_agent.experiments.experiment import ExperimentConfig
from rl_multi_agent.furnmove_episode_samplers import FurnMoveEpisodeSampler
from rl_multi_agent.furnmove_episodes import FurnMoveEgocentricEpisode
from rl_multi_agent.models import A3CLSTMNStepComCoordinatedActionsEgoVision
class FurnMoveExperimentConfig(ExperimentConfig):
"""
All scenes + 1.0 reward for getting closer to the target
Discrete communication with NO discourage failed coordination loss
"""
# Env/episode config
num_agents = 2
screen_size = 84
episode_class = FurnMoveEgocentricEpisode
frame_type = "image"
episode_sampler_class = FurnMoveEpisodeSampler
visible_agents = True
include_depth_frame = False
include_move_obj_actions = True
headless = False
# Model config
state_repr_length = 512
talk_embed_length = 16
reply_embed_length = 16
agent_num_embed_length = 8
num_talk_symbols = 2
num_reply_symbols = 2
coordinate_actions = False
# Agent config
agent_class = MultiAgent
turn_off_communication = True
# Training config
max_ep_using_expert_actions = 0
train_scenes = constants.TRAIN_SCENE_NAMES[20:40]
valid_scenes = constants.VALID_SCENE_NAMES[5:10]
use_a3c_loss_when_not_expert_forcing = True
# Misc (e.g. visualization)
record_all_in_test = False
save_talk_reply_probs_path = None
include_test_eval_results = not torch.cuda.is_available()
return_likely_successfuly_move_actions = False
# Balancing
final_cnn_channels = 140
@classmethod
def get_init_train_params(cls):
init_train_params = {
"scenes": cls.train_scenes,
"num_agents": cls.num_agents,
"object_type": "Television",
"to_object_type": "Dresser",
"episode_class": cls.episode_class,
"player_screen_height": cls.screen_size,
"player_screen_width": cls.screen_size,
"max_ep_using_expert_actions": cls.max_ep_using_expert_actions,
"visible_agents": cls.visible_agents,
"include_depth_frame": cls.include_depth_frame,
"object_initial_height": 1.3,
"headless": cls.headless,
"max_distance_from_object": 0.76,
"max_episode_length": 500,
"episode_args": {
"include_move_obj_actions": cls.include_move_obj_actions,
"first_correct_coord_reward": 0.0,
"exploration_bonus": 0.0,
"failed_action_penalty": -0.02,
"step_penalty": -0.01,
# "increasing_rotate_penalty": True,
"joint_pass_penalty": -0.09,
"moved_closer_reward": 1.0,
# "moved_closer_reward": 0.50,
"min_dist_to_to_object": 0.26,
"frame_type": cls.frame_type,
"reached_target_reward": 1.0,
"return_likely_successfuly_move_actions": cls.return_likely_successfuly_move_actions,
"pass_conditioned_coordination": True,
},
}
return init_train_params
@classmethod
def get_init_valid_params(cls):
init_valid_params = {
**cls.get_init_train_params(),
"scenes": cls.valid_scenes,
"player_screen_height": 224,
"player_screen_width": 224,
"headless": False,
}
if cls.save_talk_reply_probs_path is not None:
init_valid_params[
"save_talk_reply_probs_path"
] = cls.save_talk_reply_probs_path
return init_valid_params
def __init__(self):
self._init_train_agent = self.episode_sampler_class(
**self.get_init_train_params()
)
self._init_test_agent = self.episode_sampler_class(
**self.get_init_valid_params()
)
@classmethod
def create_model(cls, **kwargs) -> nn.Module:
def _create_model(**kwargs):
return A3CLSTMNStepComCoordinatedActionsEgoVision(
**{
**dict(
num_inputs_per_agent=3 + 1 * cls.include_depth_frame,
action_groups=cls.episode_class.class_available_action_groups(
include_move_obj_actions=cls.include_move_obj_actions
),
num_agents=cls.num_agents,
state_repr_length=cls.state_repr_length,
talk_embed_length=cls.talk_embed_length,
agent_num_embed_length=cls.agent_num_embed_length,
reply_embed_length=cls.reply_embed_length,
num_talk_symbols=cls.num_talk_symbols,
num_reply_symbols=cls.num_reply_symbols,
turn_off_communication=cls.turn_off_communication,
coordinate_actions=cls.coordinate_actions,
coordinate_actions_dim=13 if cls.coordinate_actions else None,
separate_actor_weights=False,
final_cnn_channels=cls.final_cnn_channels,
),
**kwargs,
}
)
return _create_model(**kwargs)
@classmethod
def create_agent(cls, **kwargs) -> MultiAgent:
return cls.agent_class(
model=kwargs["model"],
gpu_id=kwargs["gpu_id"],
include_test_eval_results=cls.include_test_eval_results,
use_a3c_loss_when_not_expert_forcing=cls.use_a3c_loss_when_not_expert_forcing,
record_all_in_test=cls.record_all_in_test,
include_depth_frame=cls.include_depth_frame,
resize_image_as=cls.screen_size,
)
@property
def init_train_agent(self) -> Callable:
return self._init_train_agent
@property
def init_test_agent(self) -> Callable:
return self._init_test_agent
@property
def saved_model_path(self) -> Optional[str]:
return None
def get_experiment():
return FurnMoveExperimentConfig()
| cordial-sync-master | rl_multi_agent/experiments/furnmove_vision_marginalnocomm_nocl_rot_config.py |
from typing import Callable, Optional
import torch
import constants
from rl_multi_agent import MultiAgent
from rl_multi_agent.experiments.experiment import ExperimentConfig
from rl_multi_agent.furnmove_episode_samplers import FurnMoveGridEpisodeSampler
from rl_multi_agent.furnmove_episodes import FurnMoveEgocentricFastGridEpisode
class FurnMoveExperimentConfig(ExperimentConfig):
# Env/episode config
num_agents = 2
screen_size = 84
episode_class = FurnMoveEgocentricFastGridEpisode
frame_type = "fast-egocentric-relative-tensor"
episode_sampler_class = FurnMoveGridEpisodeSampler
visible_agents = True
include_depth_frame = False
include_move_obj_actions = True
headless = True if torch.cuda.is_available() else False
# Model config
state_repr_length = 512
# Agent config
agent_class = MultiAgent
# Training config
max_ep_using_expert_actions = 0
train_scenes = constants.TRAIN_SCENE_NAMES[20:40]
valid_scenes = constants.VALID_SCENE_NAMES[5:10]
use_a3c_loss_when_not_expert_forcing = True
# Misc (e.g. visualization)
record_all_in_test = False
save_talk_reply_probs_path = None
include_test_eval_results = not torch.cuda.is_available()
return_likely_successfuly_move_actions = False
@classmethod
def get_init_train_params(cls):
init_train_params = {
"scenes": cls.train_scenes,
"num_agents": cls.num_agents,
"object_type": "Television",
"to_object_type": "Dresser",
"to_object_silhouette": constants.DRESSER_SILHOUETTE_STRING,
"episode_class": cls.episode_class,
"player_screen_height": cls.screen_size,
"player_screen_width": cls.screen_size,
"max_ep_using_expert_actions": cls.max_ep_using_expert_actions,
"visible_agents": cls.visible_agents,
"include_depth_frame": cls.include_depth_frame,
"object_initial_height": 1.3,
"headless": cls.headless,
"max_distance_from_object": 0.76,
"max_episode_length": 500,
"episode_args": {
"include_move_obj_actions": cls.include_move_obj_actions,
"first_correct_coord_reward": 0.0,
"exploration_bonus": 0.0,
"failed_action_penalty": -0.02,
"step_penalty": -0.01,
# "increasing_rotate_penalty": True,
"joint_pass_penalty": -0.09,
"moved_closer_reward": 1.0,
# "moved_closer_reward": 0.50,
"min_dist_to_to_object": 0.26,
"frame_type": cls.frame_type,
"reached_target_reward": 1.0,
"return_likely_successfuly_move_actions": cls.return_likely_successfuly_move_actions,
"pass_conditioned_coordination": True,
},
}
return init_train_params
@classmethod
def get_init_valid_params(cls):
init_valid_params = {
**cls.get_init_train_params(),
"scenes": cls.valid_scenes,
"player_screen_height": 224,
"player_screen_width": 224,
"headless": False,
}
if cls.save_talk_reply_probs_path is not None:
init_valid_params[
"save_talk_reply_probs_path"
] = cls.save_talk_reply_probs_path
# init_valid_params["episode_args"]["visualize_test_gridworld"] = True
# init_valid_params["episode_args"]["visualizing_ms"] = 50
return init_valid_params
def __init__(self):
self._init_train_agent = self.episode_sampler_class(
**self.get_init_train_params()
)
self._init_test_agent = self.episode_sampler_class(
**self.get_init_valid_params()
)
@classmethod
def create_agent(cls, **kwargs) -> MultiAgent:
return cls.agent_class(
model=kwargs["model"],
gpu_id=kwargs["gpu_id"],
include_test_eval_results=cls.include_test_eval_results,
use_a3c_loss_when_not_expert_forcing=cls.use_a3c_loss_when_not_expert_forcing,
record_all_in_test=cls.record_all_in_test,
include_depth_frame=cls.include_depth_frame,
discourage_failed_coordination=5000,
)
@property
def init_train_agent(self) -> Callable:
return self._init_train_agent
@property
def init_test_agent(self) -> Callable:
return self._init_test_agent
@property
def saved_model_path(self) -> Optional[str]:
return None
def get_experiment():
return FurnMoveExperimentConfig()
| cordial-sync-master | rl_multi_agent/experiments/furnmove_grid_bigcentral_base_config.py |
from typing import Callable, Optional
import torch
from torch import nn
import constants
from rl_multi_agent import MultiAgent
from rl_multi_agent.experiments.experiment import ExperimentConfig
from rl_multi_agent.furnmove_episode_samplers import FurnMoveGridEpisodeSampler
from rl_multi_agent.furnmove_episodes import FurnMoveEgocentricFastGridEpisode
from rl_multi_agent.models import A3CLSTMNStepComCoordinatedActionsEgoGridsEmbedCNN
class FurnMove3AgentMixtureExperimentConfig(ExperimentConfig):
# Env/episode config
num_agents = 3
screen_size = 84
episode_class = FurnMoveEgocentricFastGridEpisode
episode_sampler_class = FurnMoveGridEpisodeSampler
visible_agents = True
include_depth_frame = False
include_move_obj_actions = True
headless = True if torch.cuda.is_available() else False
# Model config
state_repr_length = 512
talk_embed_length = 16
reply_embed_length = 16
agent_num_embed_length = 8
coordinate_actions = True
num_talk_symbols = 2
num_reply_symbols = 2
# Agent config
agent_class = MultiAgent
turn_off_communication = False
discourage_failed_coordination = 5000
# Training config
max_ep_using_expert_actions = 0
train_scenes = constants.TRAIN_SCENE_NAMES[20:40]
valid_scenes = constants.VALID_SCENE_NAMES[5:10]
use_a3c_loss_when_not_expert_forcing = True
# Misc (e.g. visualization)
record_all_in_test = False
save_talk_reply_probs_path = None
include_test_eval_results = not torch.cuda.is_available()
return_likely_successfuly_move_actions = False
@classmethod
def get_init_train_params(cls):
init_train_params = {
"scenes": cls.train_scenes,
"num_agents": cls.num_agents,
"object_type": "Television",
"to_object_type": "Dresser",
"to_object_silhouette": constants.DRESSER_SILHOUETTE_STRING,
"episode_class": cls.episode_class,
"player_screen_height": cls.screen_size,
"player_screen_width": cls.screen_size,
"max_ep_using_expert_actions": cls.max_ep_using_expert_actions,
"visible_agents": cls.visible_agents,
"include_depth_frame": cls.include_depth_frame,
"object_initial_height": 1.3,
"headless": cls.headless,
"max_distance_from_object": 0.76,
"max_episode_length": 250 * cls.num_agents,
"environment_args": {"min_steps_between_agents": 2},
"episode_args": {
"include_move_obj_actions": cls.include_move_obj_actions,
"first_correct_coord_reward": 0.0,
"exploration_bonus": 0.0,
"failed_action_penalty": -0.02,
"step_penalty": -0.01,
"joint_pass_penalty": -0.09,
"moved_closer_reward": 2.0,
"min_dist_to_to_object": 0.26,
"reached_target_reward": 2.0,
"return_likely_successfuly_move_actions": cls.return_likely_successfuly_move_actions,
"frame_type": "fast-egocentric-relative-tensor",
"pass_conditioned_coordination": True,
},
}
return init_train_params
@classmethod
def get_init_valid_params(cls):
init_valid_params = {
**cls.get_init_train_params(),
"scenes": cls.valid_scenes,
"player_screen_height": 224,
"player_screen_width": 224,
"headless": False,
}
if cls.save_talk_reply_probs_path is not None:
init_valid_params[
"save_talk_reply_probs_path"
] = cls.save_talk_reply_probs_path
return init_valid_params
def __init__(self):
self._init_train_agent = self.episode_sampler_class(
**self.get_init_train_params()
)
self._init_test_agent = self.episode_sampler_class(
**self.get_init_valid_params()
)
@classmethod
def create_model(cls, **kwargs) -> nn.Module:
return A3CLSTMNStepComCoordinatedActionsEgoGridsEmbedCNN(
num_inputs=13,
action_groups=cls.episode_class.class_available_action_groups(
include_move_obj_actions=cls.include_move_obj_actions
),
num_agents=cls.num_agents,
state_repr_length=cls.state_repr_length,
occupancy_embed_length=8,
talk_embed_length=cls.talk_embed_length,
agent_num_embed_length=cls.agent_num_embed_length,
reply_embed_length=cls.reply_embed_length,
turn_off_communication=cls.turn_off_communication,
coordinate_actions=cls.coordinate_actions,
coordinate_actions_dim=13 if cls.coordinate_actions else None,
separate_actor_weights=False,
num_talk_symbols=cls.num_talk_symbols,
num_reply_symbols=cls.num_reply_symbols,
)
# IF WE WANTED CENTRAL
# return A3CLSTMCentralEgoGridsEmbedCNN(
# num_inputs_per_agent=10,
# action_groups=cls.episode_class.class_available_action_groups(
# include_move_obj_actions=cls.include_move_obj_actions
# ),
# num_agents=cls.num_agents,
# state_repr_length=cls.state_repr_length,
# occupancy_embed_length=8,
# )
@classmethod
def create_agent(cls, **kwargs) -> MultiAgent:
return cls.agent_class(
model=kwargs["model"],
gpu_id=kwargs["gpu_id"],
include_test_eval_results=cls.include_test_eval_results,
use_a3c_loss_when_not_expert_forcing=cls.use_a3c_loss_when_not_expert_forcing,
record_all_in_test=cls.record_all_in_test,
include_depth_frame=cls.include_depth_frame,
discourage_failed_coordination=cls.discourage_failed_coordination,
)
@property
def init_train_agent(self) -> Callable:
return self._init_train_agent
@property
def init_test_agent(self) -> Callable:
return self._init_test_agent
@property
def saved_model_path(self) -> Optional[str]:
return None
def get_experiment():
return FurnMove3AgentMixtureExperimentConfig()
| cordial-sync-master | rl_multi_agent/experiments/furnmove_grid_mixture_3agents_config.py |
from typing import Optional
from torch import nn
from rl_multi_agent import MultiAgent
from rl_multi_agent.experiments.furnmove_vision_bigcentral_base_config import (
FurnMoveExperimentConfig,
)
from rl_multi_agent.models import A3CLSTMBigCentralEgoVision
class FurnMoveBigCentralVisionExperimentConfig(FurnMoveExperimentConfig):
@classmethod
def create_model(cls, **kwargs) -> nn.Module:
return A3CLSTMBigCentralEgoVision(
num_inputs_per_agent=3 + 1 * cls.include_depth_frame,
action_groups=cls.episode_class.class_available_action_groups(
include_move_obj_actions=cls.include_move_obj_actions
),
num_agents=cls.num_agents,
state_repr_length=cls.state_repr_length,
)
@classmethod
def create_agent(cls, **kwargs) -> MultiAgent:
return cls.agent_class(
model=kwargs["model"],
gpu_id=kwargs["gpu_id"],
include_test_eval_results=cls.include_test_eval_results,
use_a3c_loss_when_not_expert_forcing=cls.use_a3c_loss_when_not_expert_forcing,
record_all_in_test=cls.record_all_in_test,
include_depth_frame=cls.include_depth_frame,
resize_image_as=cls.screen_size,
# discourage_failed_coordination=5000,
)
@property
def saved_model_path(self) -> Optional[str]:
return None
def get_experiment():
return FurnMoveBigCentralVisionExperimentConfig()
| cordial-sync-master | rl_multi_agent/experiments/furnmove_vision_bigcentral_nocl_rot_config.py |
from typing import Optional
from torch import nn
from rl_multi_agent.experiments.furnmove_grid_bigcentral_base_config import (
FurnMoveExperimentConfig,
)
from rl_multi_agent.models import A3CLSTMBigCentralEgoGridsEmbedCNN
class FurnMoveBigCentralNoCLExperimentConfig(FurnMoveExperimentConfig):
@classmethod
def create_model(cls, **kwargs) -> nn.Module:
return A3CLSTMBigCentralEgoGridsEmbedCNN(
num_inputs_per_agent=9,
action_groups=cls.episode_class.class_available_action_groups(
include_move_obj_actions=cls.include_move_obj_actions
),
num_agents=cls.num_agents,
state_repr_length=cls.state_repr_length,
occupancy_embed_length=8,
)
@classmethod
def create_agent(cls, **kwargs):
return cls.agent_class(
model=kwargs["model"],
gpu_id=kwargs["gpu_id"],
include_test_eval_results=cls.include_test_eval_results,
use_a3c_loss_when_not_expert_forcing=cls.use_a3c_loss_when_not_expert_forcing,
record_all_in_test=cls.record_all_in_test,
include_depth_frame=cls.include_depth_frame,
)
@classmethod
def get_init_train_params(cls):
init_train_params = FurnMoveExperimentConfig.get_init_train_params()
init_train_params["environment_args"] = {"min_steps_between_agents": 2}
return init_train_params
@property
def saved_model_path(self) -> Optional[str]:
return None
def get_experiment():
return FurnMoveBigCentralNoCLExperimentConfig()
| cordial-sync-master | rl_multi_agent/experiments/furnmove_grid_bigcentral_nocl_rot_config.py |
from typing import Optional
from rl_multi_agent.experiments.furnmove_grid_mixture_base_config import (
FurnMoveExperimentConfig,
)
class FurnMoveGridMixtureExperimentConfig(FurnMoveExperimentConfig):
@classmethod
def get_init_train_params(cls):
init_train_params = FurnMoveExperimentConfig.get_init_train_params()
init_train_params["environment_args"] = {"min_steps_between_agents": 2}
return init_train_params
@property
def saved_model_path(self) -> Optional[str]:
return None
def get_experiment():
return FurnMoveGridMixtureExperimentConfig()
| cordial-sync-master | rl_multi_agent/experiments/furnmove_grid_mixture_cl_rot_config.py |
from typing import Callable, Optional
import torch
from torch import nn
import constants
from rl_multi_agent import MultiAgent
from rl_multi_agent.experiments.experiment import ExperimentConfig
from rl_multi_agent.furnmove_episode_samplers import FurnMoveEpisodeSampler
from rl_multi_agent.furnmove_episodes import FurnMoveEgocentricEpisode
from rl_multi_agent.models import A3CLSTMNStepComCoordinatedActionsEgoVision
class FurnMoveExperimentConfig(ExperimentConfig):
"""
All scenes + 1.0 reward for getting closer to the target
Discrete communication + discourage failed coordination loss
"""
# Env/episode config
num_agents = 2
screen_size = 84
episode_class = FurnMoveEgocentricEpisode
frame_type = "image"
episode_sampler_class = FurnMoveEpisodeSampler
visible_agents = True
include_depth_frame = False
include_move_obj_actions = True
headless = False
# Model config
state_repr_length = 512
talk_embed_length = 16
reply_embed_length = 16
agent_num_embed_length = 8
num_talk_symbols = 2
num_reply_symbols = 2
coordinate_actions = True
# Agent config
agent_class = MultiAgent
turn_off_communication = False
# Training config
max_ep_using_expert_actions = 0
train_scenes = constants.TRAIN_SCENE_NAMES[20:40]
valid_scenes = constants.VALID_SCENE_NAMES[5:10]
use_a3c_loss_when_not_expert_forcing = True
# Misc (e.g. visualization)
record_all_in_test = False
save_talk_reply_probs_path = None
include_test_eval_results = not torch.cuda.is_available()
return_likely_successfuly_move_actions = False
@classmethod
def get_init_train_params(cls):
init_train_params = {
"scenes": cls.train_scenes,
"num_agents": cls.num_agents,
"object_type": "Television",
"to_object_type": "Dresser",
"episode_class": cls.episode_class,
"player_screen_height": cls.screen_size,
"player_screen_width": cls.screen_size,
"max_ep_using_expert_actions": cls.max_ep_using_expert_actions,
"visible_agents": cls.visible_agents,
"include_depth_frame": cls.include_depth_frame,
"object_initial_height": 1.3,
"headless": cls.headless,
"max_distance_from_object": 0.76,
"max_episode_length": 500,
"episode_args": {
"include_move_obj_actions": cls.include_move_obj_actions,
"first_correct_coord_reward": 0.0,
"exploration_bonus": 0.0,
"failed_action_penalty": -0.02,
"step_penalty": -0.01,
# "increasing_rotate_penalty": True,
"joint_pass_penalty": -0.09,
"moved_closer_reward": 1.0,
# "moved_closer_reward": 0.50,
"min_dist_to_to_object": 0.26,
"frame_type": cls.frame_type,
"reached_target_reward": 1.0,
"return_likely_successfuly_move_actions": cls.return_likely_successfuly_move_actions,
"pass_conditioned_coordination": True,
},
}
return init_train_params
@classmethod
def get_init_valid_params(cls):
init_valid_params = {
**cls.get_init_train_params(),
"scenes": cls.valid_scenes,
"player_screen_height": 224,
"player_screen_width": 224,
"headless": False,
}
if cls.save_talk_reply_probs_path is not None:
init_valid_params[
"save_talk_reply_probs_path"
] = cls.save_talk_reply_probs_path
return init_valid_params
def __init__(self):
self._init_train_agent = self.episode_sampler_class(
**self.get_init_train_params()
)
self._init_test_agent = self.episode_sampler_class(
**self.get_init_valid_params()
)
@classmethod
def create_model(cls, **kwargs) -> nn.Module:
return A3CLSTMNStepComCoordinatedActionsEgoVision(
num_inputs_per_agent=3 + 1 * cls.include_depth_frame,
action_groups=cls.episode_class.class_available_action_groups(
include_move_obj_actions=cls.include_move_obj_actions
),
num_agents=cls.num_agents,
state_repr_length=cls.state_repr_length,
talk_embed_length=cls.talk_embed_length,
agent_num_embed_length=cls.agent_num_embed_length,
reply_embed_length=cls.reply_embed_length,
num_talk_symbols=cls.num_talk_symbols,
num_reply_symbols=cls.num_reply_symbols,
turn_off_communication=cls.turn_off_communication,
coordinate_actions=cls.coordinate_actions,
coordinate_actions_dim=4 if cls.coordinate_actions else None,
separate_actor_weights=False,
)
@classmethod
def create_agent(cls, **kwargs) -> MultiAgent:
return cls.agent_class(
model=kwargs["model"],
gpu_id=kwargs["gpu_id"],
include_test_eval_results=cls.include_test_eval_results,
use_a3c_loss_when_not_expert_forcing=cls.use_a3c_loss_when_not_expert_forcing,
record_all_in_test=cls.record_all_in_test,
include_depth_frame=cls.include_depth_frame,
resize_image_as=cls.screen_size,
discourage_failed_coordination=5000,
)
@property
def init_train_agent(self) -> Callable:
return self._init_train_agent
@property
def init_test_agent(self) -> Callable:
return self._init_test_agent
@property
def saved_model_path(self) -> Optional[str]:
# return "trained_models/furnmove_a3c_mixture_ego_vision_ego_actions_v3_490000_2019-10-28_16-43-13.dat"
return None
def get_experiment():
return FurnMoveExperimentConfig()
| cordial-sync-master | rl_multi_agent/experiments/furnmove_vision_mixture4mix_cl_rot_config.py |
from typing import Callable, Optional
import torch
from torch import nn
import constants
from rl_multi_agent import MultiAgent
from rl_multi_agent.experiments.experiment import ExperimentConfig
from rl_multi_agent.furnmove_episode_samplers import FurnMoveEpisodeSampler
from rl_multi_agent.furnmove_episodes import FurnMoveEgocentricEpisode
from rl_multi_agent.models import A3CLSTMNStepComCoordinatedActionsEgoVision
class FurnMoveExperimentConfig(ExperimentConfig):
"""
All scenes + 1.0 reward for getting closer to the target
Discrete communication + discourage failed coordination loss
"""
# Env/episode config
num_agents = 2
screen_size = 84
episode_class = FurnMoveEgocentricEpisode
frame_type = "image"
episode_sampler_class = FurnMoveEpisodeSampler
visible_agents = True
include_depth_frame = False
include_move_obj_actions = True
headless = False
# Model config
state_repr_length = 512
talk_embed_length = 16
reply_embed_length = 16
agent_num_embed_length = 8
num_talk_symbols = 2
num_reply_symbols = 2
coordinate_actions = True
# Agent config
agent_class = MultiAgent
turn_off_communication = False
# Training config
max_ep_using_expert_actions = 0
train_scenes = constants.TRAIN_SCENE_NAMES[20:40]
valid_scenes = constants.VALID_SCENE_NAMES[5:10]
use_a3c_loss_when_not_expert_forcing = True
# Misc (e.g. visualization)
record_all_in_test = False
save_talk_reply_probs_path = None
include_test_eval_results = not torch.cuda.is_available()
return_likely_successfuly_move_actions = False
@classmethod
def get_init_train_params(cls):
init_train_params = {
"scenes": cls.train_scenes,
"num_agents": cls.num_agents,
"object_type": "Television",
"to_object_type": "Dresser",
"episode_class": cls.episode_class,
"player_screen_height": cls.screen_size,
"player_screen_width": cls.screen_size,
"max_ep_using_expert_actions": cls.max_ep_using_expert_actions,
"visible_agents": cls.visible_agents,
"include_depth_frame": cls.include_depth_frame,
"object_initial_height": 1.3,
"headless": cls.headless,
"max_distance_from_object": 0.76,
"max_episode_length": 500,
"episode_args": {
"include_move_obj_actions": cls.include_move_obj_actions,
"first_correct_coord_reward": 0.0,
"exploration_bonus": 0.0,
"failed_action_penalty": -0.02,
"step_penalty": -0.01,
# "increasing_rotate_penalty": True,
"joint_pass_penalty": -0.09,
"moved_closer_reward": 1.0,
# "moved_closer_reward": 0.50,
"min_dist_to_to_object": 0.26,
"frame_type": cls.frame_type,
"reached_target_reward": 1.0,
"return_likely_successfuly_move_actions": cls.return_likely_successfuly_move_actions,
"pass_conditioned_coordination": True,
},
}
return init_train_params
@classmethod
def get_init_valid_params(cls):
init_valid_params = {
**cls.get_init_train_params(),
"scenes": cls.valid_scenes,
"player_screen_height": 224,
"player_screen_width": 224,
"headless": False,
}
if cls.save_talk_reply_probs_path is not None:
init_valid_params[
"save_talk_reply_probs_path"
] = cls.save_talk_reply_probs_path
return init_valid_params
def __init__(self):
self._init_train_agent = self.episode_sampler_class(
**self.get_init_train_params()
)
self._init_test_agent = self.episode_sampler_class(
**self.get_init_valid_params()
)
@classmethod
def create_model(cls, **kwargs) -> nn.Module:
return A3CLSTMNStepComCoordinatedActionsEgoVision(
num_inputs_per_agent=3 + 1 * cls.include_depth_frame,
action_groups=cls.episode_class.class_available_action_groups(
include_move_obj_actions=cls.include_move_obj_actions
),
num_agents=cls.num_agents,
state_repr_length=cls.state_repr_length,
talk_embed_length=cls.talk_embed_length,
agent_num_embed_length=cls.agent_num_embed_length,
reply_embed_length=cls.reply_embed_length,
num_talk_symbols=cls.num_talk_symbols,
num_reply_symbols=cls.num_reply_symbols,
turn_off_communication=cls.turn_off_communication,
coordinate_actions=cls.coordinate_actions,
coordinate_actions_dim=13 if cls.coordinate_actions else None,
separate_actor_weights=False,
)
@classmethod
def create_agent(cls, **kwargs) -> MultiAgent:
return cls.agent_class(
model=kwargs["model"],
gpu_id=kwargs["gpu_id"],
include_test_eval_results=cls.include_test_eval_results,
use_a3c_loss_when_not_expert_forcing=cls.use_a3c_loss_when_not_expert_forcing,
record_all_in_test=cls.record_all_in_test,
include_depth_frame=cls.include_depth_frame,
resize_image_as=cls.screen_size,
)
@property
def init_train_agent(self) -> Callable:
return self._init_train_agent
@property
def init_test_agent(self) -> Callable:
return self._init_test_agent
@property
def saved_model_path(self) -> Optional[str]:
# return "trained_models/furnmove_a3c_mixture_ego_vision_ego_actions_v3_490000_2019-10-28_16-43-13.dat"
return None
def get_experiment():
return FurnMoveExperimentConfig()
| cordial-sync-master | rl_multi_agent/experiments/furnmove_vision_mixture_nocl_rot_config.py |
from typing import Callable, Optional
import torch
from torch import nn
import constants
from rl_multi_agent import MultiAgent
from rl_multi_agent.experiments.experiment import ExperimentConfig
from rl_multi_agent.furnmove_episode_samplers import FurnMoveEpisodeSampler
from rl_multi_agent.furnmove_episodes import FurnMoveEgocentricEpisode
from rl_multi_agent.models import A3CLSTMNStepComCoordinatedActionsEgoVision
class FurnMoveMixtureVisionExperimentConfig(ExperimentConfig):
"""
All scenes + 1.0 reward for getting closer to the target
Discrete communication + discourage failed coordination loss
"""
# Env/episode config
num_agents = 2
screen_size = 84
episode_class = FurnMoveEgocentricEpisode
frame_type = "image"
episode_sampler_class = FurnMoveEpisodeSampler
visible_agents = True
include_depth_frame = False
include_move_obj_actions = True
headless = False
# Model config
state_repr_length = 512
talk_embed_length = 16
reply_embed_length = 16
agent_num_embed_length = 8
num_talk_symbols = 2
num_reply_symbols = 2
coordinate_actions = True
# Agent config
agent_class = MultiAgent
turn_off_communication = False
# Training config
max_ep_using_expert_actions = 0
train_scenes = constants.TRAIN_SCENE_NAMES[20:40]
valid_scenes = constants.VALID_SCENE_NAMES[5:10]
use_a3c_loss_when_not_expert_forcing = True
# Misc (e.g. visualization)
record_all_in_test = False
save_talk_reply_probs_path = None
include_test_eval_results = not torch.cuda.is_available()
return_likely_successfuly_move_actions = False
@classmethod
def get_init_train_params(cls):
init_train_params = {
"scenes": cls.train_scenes,
"num_agents": cls.num_agents,
"object_type": "Television",
"to_object_type": "Dresser",
"episode_class": cls.episode_class,
"player_screen_height": cls.screen_size,
"player_screen_width": cls.screen_size,
"max_ep_using_expert_actions": cls.max_ep_using_expert_actions,
"visible_agents": cls.visible_agents,
"include_depth_frame": cls.include_depth_frame,
"object_initial_height": 1.3,
"headless": cls.headless,
"max_distance_from_object": 0.76,
"max_episode_length": 500,
"episode_args": {
"include_move_obj_actions": cls.include_move_obj_actions,
"first_correct_coord_reward": 0.0,
"exploration_bonus": 0.0,
"failed_action_penalty": -0.02,
"step_penalty": -0.01,
"joint_pass_penalty": -0.09,
"moved_closer_reward": 1.0,
"min_dist_to_to_object": 0.26,
"frame_type": cls.frame_type,
"reached_target_reward": 1.0,
"return_likely_successfuly_move_actions": cls.return_likely_successfuly_move_actions,
"pass_conditioned_coordination": True,
},
}
return init_train_params
@classmethod
def get_init_valid_params(cls):
init_valid_params = {
**cls.get_init_train_params(),
"scenes": cls.valid_scenes,
"player_screen_height": 224,
"player_screen_width": 224,
"headless": False,
}
if cls.save_talk_reply_probs_path is not None:
init_valid_params[
"save_talk_reply_probs_path"
] = cls.save_talk_reply_probs_path
return init_valid_params
def __init__(self):
self._init_train_agent = self.episode_sampler_class(
**self.get_init_train_params()
)
self._init_test_agent = self.episode_sampler_class(
**self.get_init_valid_params()
)
@classmethod
def create_model(cls, **kwargs) -> nn.Module:
return A3CLSTMNStepComCoordinatedActionsEgoVision(
num_inputs_per_agent=3 + 1 * cls.include_depth_frame,
action_groups=cls.episode_class.class_available_action_groups(
include_move_obj_actions=cls.include_move_obj_actions
),
num_agents=cls.num_agents,
state_repr_length=cls.state_repr_length,
talk_embed_length=cls.talk_embed_length,
agent_num_embed_length=cls.agent_num_embed_length,
reply_embed_length=cls.reply_embed_length,
num_talk_symbols=cls.num_talk_symbols,
num_reply_symbols=cls.num_reply_symbols,
turn_off_communication=cls.turn_off_communication,
coordinate_actions=cls.coordinate_actions,
coordinate_actions_dim=13 if cls.coordinate_actions else None,
separate_actor_weights=False,
)
@classmethod
def create_agent(cls, **kwargs) -> MultiAgent:
return cls.agent_class(
model=kwargs["model"],
gpu_id=kwargs["gpu_id"],
include_test_eval_results=cls.include_test_eval_results,
use_a3c_loss_when_not_expert_forcing=cls.use_a3c_loss_when_not_expert_forcing,
record_all_in_test=cls.record_all_in_test,
include_depth_frame=cls.include_depth_frame,
resize_image_as=cls.screen_size,
discourage_failed_coordination=5000,
)
@property
def init_train_agent(self) -> Callable:
return self._init_train_agent
@property
def init_test_agent(self) -> Callable:
return self._init_test_agent
@property
def saved_model_path(self) -> Optional[str]:
return None
def get_experiment():
return FurnMoveMixtureVisionExperimentConfig()
| cordial-sync-master | rl_multi_agent/experiments/furnmove_vision_mixture_cl_rot_config.py |
from typing import Callable, Optional
import torch
from torch import nn
import constants
from rl_multi_agent import MultiAgent
from rl_multi_agent.experiments.experiment import ExperimentConfig
from rl_multi_agent.furnmove_episode_samplers import FurnMoveEpisodeSampler
from rl_multi_agent.furnmove_episodes import FurnMoveEgocentricEpisode
from rl_multi_agent.models import A3CLSTMNStepComCoordinatedActionsEgoVision
class FurnMoveExperimentConfig(ExperimentConfig):
"""
All scenes + 1.0 reward for getting closer to the target
Discrete communication with NO discourage failed coordination loss
"""
# Env/episode config
num_agents = 2
screen_size = 84
episode_class = FurnMoveEgocentricEpisode
frame_type = "image"
episode_sampler_class = FurnMoveEpisodeSampler
visible_agents = True
include_depth_frame = False
include_move_obj_actions = True
headless = False
# Model config
state_repr_length = 512
talk_embed_length = 16
reply_embed_length = 16
agent_num_embed_length = 8
num_talk_symbols = 2
num_reply_symbols = 2
coordinate_actions = False
# Agent config
agent_class = MultiAgent
turn_off_communication = False
# Training config
max_ep_using_expert_actions = 0
train_scenes = constants.TRAIN_SCENE_NAMES[20:40]
valid_scenes = constants.VALID_SCENE_NAMES[5:10]
use_a3c_loss_when_not_expert_forcing = True
# Misc (e.g. visualization)
record_all_in_test = False
save_talk_reply_probs_path = None
include_test_eval_results = not torch.cuda.is_available()
return_likely_successfuly_move_actions = False
# Balancing
final_cnn_channels = 140
@classmethod
def get_init_train_params(cls):
init_train_params = {
"scenes": cls.train_scenes,
"num_agents": cls.num_agents,
"object_type": "Television",
"to_object_type": "Dresser",
"episode_class": cls.episode_class,
"player_screen_height": cls.screen_size,
"player_screen_width": cls.screen_size,
"max_ep_using_expert_actions": cls.max_ep_using_expert_actions,
"visible_agents": cls.visible_agents,
"include_depth_frame": cls.include_depth_frame,
"object_initial_height": 1.3,
"headless": cls.headless,
"max_distance_from_object": 0.76,
"max_episode_length": 500,
"episode_args": {
"include_move_obj_actions": cls.include_move_obj_actions,
"first_correct_coord_reward": 0.0,
"exploration_bonus": 0.0,
"failed_action_penalty": -0.02,
"step_penalty": -0.01,
# "increasing_rotate_penalty": True,
"joint_pass_penalty": -0.09,
"moved_closer_reward": 1.0,
# "moved_closer_reward": 0.50,
"min_dist_to_to_object": 0.26,
"frame_type": cls.frame_type,
"reached_target_reward": 1.0,
"return_likely_successfuly_move_actions": cls.return_likely_successfuly_move_actions,
"pass_conditioned_coordination": True,
},
}
return init_train_params
@classmethod
def get_init_valid_params(cls):
init_valid_params = {
**cls.get_init_train_params(),
"scenes": cls.valid_scenes,
"player_screen_height": 224,
"player_screen_width": 224,
"headless": False,
}
if cls.save_talk_reply_probs_path is not None:
init_valid_params[
"save_talk_reply_probs_path"
] = cls.save_talk_reply_probs_path
return init_valid_params
def __init__(self):
self._init_train_agent = self.episode_sampler_class(
**self.get_init_train_params()
)
self._init_test_agent = self.episode_sampler_class(
**self.get_init_valid_params()
)
@classmethod
def create_model(cls, **kwargs) -> nn.Module:
def _create_model(**kwargs):
return A3CLSTMNStepComCoordinatedActionsEgoVision(
**{
**dict(
num_inputs_per_agent=3 + 1 * cls.include_depth_frame,
action_groups=cls.episode_class.class_available_action_groups(
include_move_obj_actions=cls.include_move_obj_actions
),
num_agents=cls.num_agents,
state_repr_length=cls.state_repr_length,
talk_embed_length=cls.talk_embed_length,
agent_num_embed_length=cls.agent_num_embed_length,
reply_embed_length=cls.reply_embed_length,
num_talk_symbols=cls.num_talk_symbols,
num_reply_symbols=cls.num_reply_symbols,
turn_off_communication=cls.turn_off_communication,
coordinate_actions=cls.coordinate_actions,
coordinate_actions_dim=13 if cls.coordinate_actions else None,
separate_actor_weights=False,
final_cnn_channels=cls.final_cnn_channels,
),
**kwargs,
}
)
return _create_model(**kwargs)
@classmethod
def create_agent(cls, **kwargs) -> MultiAgent:
return cls.agent_class(
model=kwargs["model"],
gpu_id=kwargs["gpu_id"],
include_test_eval_results=cls.include_test_eval_results,
use_a3c_loss_when_not_expert_forcing=cls.use_a3c_loss_when_not_expert_forcing,
record_all_in_test=cls.record_all_in_test,
include_depth_frame=cls.include_depth_frame,
resize_image_as=cls.screen_size,
)
@property
def init_train_agent(self) -> Callable:
return self._init_train_agent
@property
def init_test_agent(self) -> Callable:
return self._init_test_agent
@property
def saved_model_path(self) -> Optional[str]:
return None
def get_experiment():
return FurnMoveExperimentConfig()
| cordial-sync-master | rl_multi_agent/experiments/furnmove_vision_marginal_nocl_rot_config.py |
from typing import Optional
import constants
from rl_multi_agent import MultiAgent
from rl_multi_agent.experiments.furnmove_grid_mixture_base_config import (
FurnMoveExperimentConfig,
)
class FurnMoveGridExperimentConfig(FurnMoveExperimentConfig):
@classmethod
def get_init_train_params(cls):
init_train_params = {
"scenes": cls.train_scenes,
"num_agents": cls.num_agents,
"object_type": "Television",
"to_object_type": "Dresser",
"to_object_silhouette": constants.DRESSER_SILHOUETTE_STRING,
"episode_class": cls.episode_class,
"player_screen_height": cls.screen_size,
"player_screen_width": cls.screen_size,
"max_ep_using_expert_actions": cls.max_ep_using_expert_actions,
"visible_agents": cls.visible_agents,
"include_depth_frame": cls.include_depth_frame,
"object_initial_height": 1.3,
"headless": cls.headless,
"max_distance_from_object": 0.76,
"max_episode_length": 500,
"environment_args": {"min_steps_between_agents": 2},
"episode_args": {
"include_move_obj_actions": cls.include_move_obj_actions,
"first_correct_coord_reward": 0.0,
"exploration_bonus": 0.0,
"failed_action_penalty": -0.02,
"step_penalty": -0.01,
"joint_pass_penalty": -0.09,
"moved_closer_reward": 1.0,
"min_dist_to_to_object": 0.26,
"reached_target_reward": 1.0,
"return_likely_successfuly_move_actions": cls.return_likely_successfuly_move_actions,
"frame_type": "fast-egocentric-relative-tensor",
"pass_conditioned_coordination": True,
},
}
return init_train_params
@classmethod
def create_agent(cls, **kwargs) -> MultiAgent:
return cls.agent_class(
model=kwargs["model"],
gpu_id=kwargs["gpu_id"],
include_test_eval_results=cls.include_test_eval_results,
use_a3c_loss_when_not_expert_forcing=cls.use_a3c_loss_when_not_expert_forcing,
record_all_in_test=cls.record_all_in_test,
include_depth_frame=cls.include_depth_frame,
)
@property
def saved_model_path(self) -> Optional[str]:
return None
def get_experiment():
return FurnMoveGridExperimentConfig()
| cordial-sync-master | rl_multi_agent/experiments/furnmove_grid_mixture_nocl_rot_config.py |
from typing import Optional
from torch import nn
from rl_multi_agent import MultiAgent
from rl_multi_agent.experiments.furnlift_base_config import FurnLiftBaseConfig
from rl_multi_agent.models import BigA3CLSTMNStepComCoordinatedActionsEgoVision
class FurnLiftMinDistNoimplicitMixtureConfig(FurnLiftBaseConfig):
# Env/episode config
min_dist_between_agents_to_pickup = 8
visible_agents = False
# Model config
turn_off_communication = False
# Agent config
agent_class = MultiAgent
# Coordination
coordinate_actions = True # Mixture
@classmethod
def create_model(cls, **kwargs) -> nn.Module:
return BigA3CLSTMNStepComCoordinatedActionsEgoVision(
num_inputs_per_agent=3 + 1 * (cls.include_depth_frame),
action_groups=cls.episode_class.class_available_action_groups(),
num_agents=cls.num_agents,
state_repr_length=cls.state_repr_length,
talk_embed_length=cls.talk_embed_length,
agent_num_embed_length=cls.agent_num_embed_length,
reply_embed_length=cls.reply_embed_length,
num_talk_symbols=cls.num_talk_symbols,
num_reply_symbols=cls.num_reply_symbols,
turn_off_communication=cls.turn_off_communication,
coordinate_actions=cls.coordinate_actions,
coordinate_actions_dim=2 if cls.coordinate_actions else None,
separate_actor_weights=False,
)
@classmethod
def create_agent(cls, **kwargs) -> MultiAgent:
return cls.agent_class(
model=kwargs["model"],
gpu_id=kwargs["gpu_id"],
include_test_eval_results=cls.include_test_eval_results,
use_a3c_loss_when_not_expert_forcing=cls.use_a3c_loss_when_not_expert_forcing,
record_all_in_test=cls.record_all_in_test,
include_depth_frame=cls.include_depth_frame,
resize_image_as=cls.screen_size,
dagger_mode=cls.dagger_mode,
discourage_failed_coordination=False,
coordination_use_marginal_entropy=True,
)
@property
def saved_model_path(self) -> Optional[str]:
return None
def get_experiment():
return FurnLiftMinDistNoimplicitMixtureConfig()
| cordial-sync-master | rl_multi_agent/experiments/furnlift_vision_noimplicit_mixture_cl_config.py |
from torch import nn
from rl_multi_agent import MultiAgent
from rl_multi_agent.experiments.furnlift_base_config import FurnLiftBaseConfig
from rl_multi_agent.models import OldTaskA3CLSTMBigCentralEgoVision
class FurnLiftExperimentConfig(FurnLiftBaseConfig):
# Env/episode config
min_dist_between_agents_to_pickup = 8
visible_agents = True
# Agent config
agent_class = MultiAgent
@classmethod
def create_model(cls, **kwargs) -> nn.Module:
return OldTaskA3CLSTMBigCentralEgoVision(
num_inputs_per_agent=3 + 1 * (cls.include_depth_frame),
action_groups=cls.episode_class.class_available_action_groups(),
num_agents=cls.num_agents,
state_repr_length=cls.state_repr_length,
)
def get_experiment():
return FurnLiftExperimentConfig()
| cordial-sync-master | rl_multi_agent/experiments/furnlift_vision_central_cl_config.py |
from typing import Optional
from torch import nn
import constants
from rl_multi_agent.experiments.furnmove_grid_marginal_nocl_base_config import (
FurnMoveExperimentConfig,
)
from rl_multi_agent.furnmove_episodes import (
FurnMoveEgocentricNoRotationsFastGridEpisode,
)
from rl_multi_agent.models import A3CLSTMNStepComCoordinatedActionsEgoGridsEmbedCNN
class FurnMoveMarginalNoRotationsExperimentConfig(FurnMoveExperimentConfig):
episode_class = FurnMoveEgocentricNoRotationsFastGridEpisode
# Increasing the params of marginal to match mixture
final_cnn_channels = 288
@classmethod
def get_init_train_params(cls):
init_train_params = {
"scenes": cls.train_scenes,
"num_agents": cls.num_agents,
"object_type": "Television",
"to_object_type": "Dresser",
"to_object_silhouette": constants.DRESSER_SILHOUETTE_STRING,
"episode_class": cls.episode_class,
"player_screen_height": cls.screen_size,
"player_screen_width": cls.screen_size,
"max_ep_using_expert_actions": cls.max_ep_using_expert_actions,
"visible_agents": cls.visible_agents,
"include_depth_frame": cls.include_depth_frame,
"object_initial_height": 1.3,
"headless": cls.headless,
"max_distance_from_object": 0.76,
"max_episode_length": 500,
"environment_args": {"min_steps_between_agents": 2},
"episode_args": {
"include_move_obj_actions": cls.include_move_obj_actions,
"first_correct_coord_reward": 0.0,
"exploration_bonus": 0.0,
"failed_action_penalty": -0.02,
"step_penalty": -0.01,
"joint_pass_penalty": -0.09,
"moved_closer_reward": 1.0,
"min_dist_to_to_object": 0.26,
"reached_target_reward": 1.0,
"return_likely_successfuly_move_actions": cls.return_likely_successfuly_move_actions,
"frame_type": "fast-egocentric-relative-tensor",
"pass_conditioned_coordination": True,
},
}
return init_train_params
@property
def saved_model_path(self) -> Optional[str]:
return None
@classmethod
def create_model(cls, **kwargs) -> nn.Module:
def _create_model(**kwargs):
return A3CLSTMNStepComCoordinatedActionsEgoGridsEmbedCNN(
**{
**dict(
num_inputs=9,
action_groups=cls.episode_class.class_available_action_groups(
include_move_obj_actions=cls.include_move_obj_actions
),
num_agents=cls.num_agents,
state_repr_length=cls.state_repr_length,
occupancy_embed_length=8,
talk_embed_length=cls.talk_embed_length,
agent_num_embed_length=cls.agent_num_embed_length,
reply_embed_length=cls.reply_embed_length,
turn_off_communication=cls.turn_off_communication,
coordinate_actions=cls.coordinate_actions,
coordinate_actions_dim=13 if cls.coordinate_actions else None,
separate_actor_weights=False,
num_talk_symbols=cls.num_talk_symbols,
num_reply_symbols=cls.num_reply_symbols,
final_cnn_channels=cls.final_cnn_channels,
),
**kwargs,
}
)
return _create_model(**kwargs)
def get_experiment():
return FurnMoveMarginalNoRotationsExperimentConfig()
| cordial-sync-master | rl_multi_agent/experiments/furnmove_grid_marginal_nocl_norot_config.py |
from rl_multi_agent.experiments.furnlift_vision_marginal_nocl_config import (
FurnLiftMinDistUncoordinatedExperimentConfig,
)
class FurnLiftMinDistUncoordinatedNoimplicitExperimentConfig(
FurnLiftMinDistUncoordinatedExperimentConfig
):
# Env/episode config
visible_agents = False
def get_experiment():
return FurnLiftMinDistUncoordinatedNoimplicitExperimentConfig()
| cordial-sync-master | rl_multi_agent/experiments/furnlift_vision_noimplicit_marginal_nocl_config.py |
from typing import Optional
from torch import nn
from rl_multi_agent.experiments.furnmove_grid_marginal_nocl_base_config import (
FurnMoveExperimentConfig,
)
from rl_multi_agent.models import A3CLSTMNStepComCoordinatedActionsEgoGridsEmbedCNN
class FurnMoveGridExperimentConfig(FurnMoveExperimentConfig):
# Increasing the params of marginal to match mixture
final_cnn_channels = 288
@classmethod
def get_init_train_params(cls):
init_train_params = FurnMoveExperimentConfig.get_init_train_params()
init_train_params["environment_args"] = {"min_steps_between_agents": 2}
return init_train_params
@property
def saved_model_path(self) -> Optional[str]:
return None
@classmethod
def create_model(cls, **kwargs) -> nn.Module:
def _create_model(**kwargs):
return A3CLSTMNStepComCoordinatedActionsEgoGridsEmbedCNN(
**{
**dict(
num_inputs=9,
action_groups=cls.episode_class.class_available_action_groups(
include_move_obj_actions=cls.include_move_obj_actions
),
num_agents=cls.num_agents,
state_repr_length=cls.state_repr_length,
occupancy_embed_length=8,
talk_embed_length=cls.talk_embed_length,
agent_num_embed_length=cls.agent_num_embed_length,
reply_embed_length=cls.reply_embed_length,
turn_off_communication=cls.turn_off_communication,
coordinate_actions=cls.coordinate_actions,
coordinate_actions_dim=13 if cls.coordinate_actions else None,
separate_actor_weights=False,
num_talk_symbols=cls.num_talk_symbols,
num_reply_symbols=cls.num_reply_symbols,
final_cnn_channels=cls.final_cnn_channels,
),
**kwargs,
}
)
return _create_model(**kwargs)
def get_experiment():
return FurnMoveGridExperimentConfig()
| cordial-sync-master | rl_multi_agent/experiments/furnmove_grid_marginal_nocl_rot_config.py |
from typing import Callable, Optional
import torch
from torch import nn
import constants
from rl_multi_agent import MultiAgent
from rl_multi_agent.experiments.experiment import ExperimentConfig
from rl_multi_agent.furnmove_episode_samplers import FurnMoveGridEpisodeSampler
from rl_multi_agent.furnmove_episodes import FurnMoveEgocentricFastGridEpisode
from rl_multi_agent.models import A3CLSTMNStepComCoordinatedActionsEgoGridsEmbedCNN
class FurnMoveExperimentConfig(ExperimentConfig):
# Env/episode config
num_agents = 2
screen_size = 84
episode_class = FurnMoveEgocentricFastGridEpisode
episode_sampler_class = FurnMoveGridEpisodeSampler
visible_agents = True
include_depth_frame = False
include_move_obj_actions = True
headless = True if torch.cuda.is_available() else False
# Model config
state_repr_length = 512
talk_embed_length = 16
reply_embed_length = 16
agent_num_embed_length = 8
coordinate_actions = True
num_talk_symbols = 2
num_reply_symbols = 2
# Agent config
agent_class = MultiAgent
turn_off_communication = False
# Training config
max_ep_using_expert_actions = 0
train_scenes = constants.TRAIN_SCENE_NAMES[20:40]
valid_scenes = constants.VALID_SCENE_NAMES[5:10]
use_a3c_loss_when_not_expert_forcing = True
# Misc (e.g. visualization)
record_all_in_test = False
save_talk_reply_probs_path = None
include_test_eval_results = not torch.cuda.is_available()
return_likely_successfuly_move_actions = False
@classmethod
def get_init_train_params(cls):
init_train_params = {
"scenes": cls.train_scenes,
"num_agents": cls.num_agents,
"object_type": "Television",
"to_object_type": "Dresser",
"to_object_silhouette": constants.DRESSER_SILHOUETTE_STRING,
"episode_class": cls.episode_class,
"player_screen_height": cls.screen_size,
"player_screen_width": cls.screen_size,
"max_ep_using_expert_actions": cls.max_ep_using_expert_actions,
"visible_agents": cls.visible_agents,
"include_depth_frame": cls.include_depth_frame,
"object_initial_height": 1.3,
"headless": cls.headless,
"max_distance_from_object": 0.76,
"max_episode_length": 500,
"episode_args": {
"include_move_obj_actions": cls.include_move_obj_actions,
"first_correct_coord_reward": 0.0,
"exploration_bonus": 0.0,
"failed_action_penalty": -0.02,
"step_penalty": -0.01,
"joint_pass_penalty": -0.09,
"moved_closer_reward": 1.0,
"min_dist_to_to_object": 0.26,
"reached_target_reward": 1.0,
"return_likely_successfuly_move_actions": cls.return_likely_successfuly_move_actions,
"frame_type": "fast-egocentric-relative-tensor",
"pass_conditioned_coordination": True,
},
}
return init_train_params
@classmethod
def get_init_valid_params(cls):
init_valid_params = {
**cls.get_init_train_params(),
"scenes": cls.valid_scenes,
"player_screen_height": 224,
"player_screen_width": 224,
"headless": False,
}
if cls.save_talk_reply_probs_path is not None:
init_valid_params[
"save_talk_reply_probs_path"
] = cls.save_talk_reply_probs_path
return init_valid_params
def __init__(self):
self._init_train_agent = self.episode_sampler_class(
**self.get_init_train_params()
)
self._init_test_agent = self.episode_sampler_class(
**self.get_init_valid_params()
)
@classmethod
def create_model(cls, **kwargs) -> nn.Module:
return A3CLSTMNStepComCoordinatedActionsEgoGridsEmbedCNN(
num_inputs=9,
action_groups=cls.episode_class.class_available_action_groups(
include_move_obj_actions=cls.include_move_obj_actions
),
num_agents=cls.num_agents,
state_repr_length=cls.state_repr_length,
occupancy_embed_length=8,
talk_embed_length=cls.talk_embed_length,
agent_num_embed_length=cls.agent_num_embed_length,
reply_embed_length=cls.reply_embed_length,
turn_off_communication=cls.turn_off_communication,
coordinate_actions=cls.coordinate_actions,
coordinate_actions_dim=13 if cls.coordinate_actions else None,
separate_actor_weights=False,
num_talk_symbols=cls.num_talk_symbols,
num_reply_symbols=cls.num_reply_symbols,
)
@classmethod
def create_agent(cls, **kwargs) -> MultiAgent:
return cls.agent_class(
model=kwargs["model"],
gpu_id=kwargs["gpu_id"],
include_test_eval_results=cls.include_test_eval_results,
use_a3c_loss_when_not_expert_forcing=cls.use_a3c_loss_when_not_expert_forcing,
record_all_in_test=cls.record_all_in_test,
include_depth_frame=cls.include_depth_frame,
discourage_failed_coordination=5000,
)
@property
def init_train_agent(self) -> Callable:
return self._init_train_agent
@property
def init_test_agent(self) -> Callable:
return self._init_test_agent
@property
def saved_model_path(self) -> Optional[str]:
return None
def get_experiment():
return FurnMoveExperimentConfig()
| cordial-sync-master | rl_multi_agent/experiments/furnmove_grid_mixture_base_config.py |
import torch.nn as nn
from rl_multi_agent.experiments.furnmove_grid_mixture_3agents_config import (
FurnMove3AgentMixtureExperimentConfig as FurnMoveCoordinatedMultiAgentConfig,
)
from rl_multi_agent.models import A3CLSTMCentralEgoGridsEmbedCNN
class FurnMove3AgentCentralExperimentConfig(FurnMoveCoordinatedMultiAgentConfig):
num_agents = 3
@classmethod
def create_model(cls, **kwargs) -> nn.Module:
return A3CLSTMCentralEgoGridsEmbedCNN(
num_inputs_per_agent=13,
action_groups=cls.episode_class.class_available_action_groups(
include_move_obj_actions=cls.include_move_obj_actions
),
num_agents=cls.num_agents,
state_repr_length=cls.state_repr_length,
occupancy_embed_length=8,
)
def get_experiment():
return FurnMove3AgentCentralExperimentConfig()
| cordial-sync-master | rl_multi_agent/experiments/furnmove_grid_central_3agents_config.py |
from typing import Optional
from torch import nn
from rl_multi_agent.experiments.furnmove_grid_bigcentral_base_config import (
FurnMoveExperimentConfig,
)
from rl_multi_agent.models import A3CLSTMBigCentralEgoGridsEmbedCNN
class FurnMoveBigCentralExperimentConfig(FurnMoveExperimentConfig):
@classmethod
def create_model(cls, **kwargs) -> nn.Module:
return A3CLSTMBigCentralEgoGridsEmbedCNN(
num_inputs_per_agent=9,
action_groups=cls.episode_class.class_available_action_groups(
include_move_obj_actions=cls.include_move_obj_actions
),
num_agents=cls.num_agents,
state_repr_length=cls.state_repr_length,
occupancy_embed_length=8,
)
@classmethod
def get_init_train_params(cls):
init_train_params = FurnMoveExperimentConfig.get_init_train_params()
init_train_params["environment_args"] = {"min_steps_between_agents": 2}
return init_train_params
@property
def saved_model_path(self) -> Optional[str]:
return None
def get_experiment():
return FurnMoveBigCentralExperimentConfig()
| cordial-sync-master | rl_multi_agent/experiments/furnmove_grid_bigcentral_cl_rot_config.py |
from typing import Optional
from rl_multi_agent.agents import MultiAgent
from rl_multi_agent.experiments.furnmove_vision_marginal_nocl_rot_config import (
FurnMoveExperimentConfig,
)
class FurnMoveVisionMarginalWithCLExperimentConfig(FurnMoveExperimentConfig):
@classmethod
def create_agent(cls, **kwargs) -> MultiAgent:
return cls.agent_class(
model=kwargs["model"],
gpu_id=kwargs["gpu_id"],
include_test_eval_results=cls.include_test_eval_results,
use_a3c_loss_when_not_expert_forcing=cls.use_a3c_loss_when_not_expert_forcing,
record_all_in_test=cls.record_all_in_test,
include_depth_frame=cls.include_depth_frame,
resize_image_as=cls.screen_size,
discourage_failed_coordination=5000,
)
@property
def saved_model_path(self) -> Optional[str]:
return None
def get_experiment():
return FurnMoveVisionMarginalWithCLExperimentConfig()
| cordial-sync-master | rl_multi_agent/experiments/furnmove_vision_marginal_cl_rot_config.py |
from typing import Optional
from torch import nn
import constants
from rl_multi_agent.experiments.furnmove_grid_bigcentral_base_config import (
FurnMoveExperimentConfig,
)
from rl_multi_agent.furnmove_episodes import (
FurnMoveEgocentricNoRotationsFastGridEpisode,
)
from rl_multi_agent.models import A3CLSTMBigCentralEgoGridsEmbedCNN
class FurnMoveNoRotationsExperimentConfig(FurnMoveExperimentConfig):
episode_class = FurnMoveEgocentricNoRotationsFastGridEpisode
@classmethod
def get_init_train_params(cls):
init_train_params = {
"scenes": cls.train_scenes,
"num_agents": cls.num_agents,
"object_type": "Television",
"to_object_type": "Dresser",
"to_object_silhouette": constants.DRESSER_SILHOUETTE_STRING,
"episode_class": cls.episode_class,
"player_screen_height": cls.screen_size,
"player_screen_width": cls.screen_size,
"max_ep_using_expert_actions": cls.max_ep_using_expert_actions,
"visible_agents": cls.visible_agents,
"include_depth_frame": cls.include_depth_frame,
"object_initial_height": 1.3,
"headless": cls.headless,
"max_distance_from_object": 0.76,
"max_episode_length": 500,
"environment_args": {"min_steps_between_agents": 2},
"episode_args": {
"include_move_obj_actions": cls.include_move_obj_actions,
"first_correct_coord_reward": 0.0,
"exploration_bonus": 0.0,
"failed_action_penalty": -0.02,
"step_penalty": -0.01,
# "increasing_rotate_penalty": True,
"joint_pass_penalty": -0.09,
"moved_closer_reward": 1.0,
# "moved_closer_reward": 0.50,
"min_dist_to_to_object": 0.26,
"frame_type": cls.frame_type,
"reached_target_reward": 1.0,
"return_likely_successfuly_move_actions": cls.return_likely_successfuly_move_actions,
"pass_conditioned_coordination": True,
},
}
return init_train_params
@classmethod
def create_model(cls, **kwargs) -> nn.Module:
return A3CLSTMBigCentralEgoGridsEmbedCNN(
num_inputs_per_agent=9,
action_groups=cls.episode_class.class_available_action_groups(
include_move_obj_actions=cls.include_move_obj_actions
),
num_agents=cls.num_agents,
state_repr_length=cls.state_repr_length,
occupancy_embed_length=8,
)
@property
def saved_model_path(self) -> Optional[str]:
return None
def get_experiment():
return FurnMoveNoRotationsExperimentConfig()
| cordial-sync-master | rl_multi_agent/experiments/furnmove_grid_bigcentral_cl_norot_config.py |
from rl_multi_agent.experiments.furnlift_vision_central_cl_config import (
FurnLiftExperimentConfig,
)
class FurnLiftNoimplicitExperimentConfig(FurnLiftExperimentConfig):
# Env/episode config
visible_agents = False
def get_experiment():
return FurnLiftNoimplicitExperimentConfig()
| cordial-sync-master | rl_multi_agent/experiments/furnlift_vision_noimplicit_central_cl_config.py |
from typing import Optional
from torch import nn
from rl_multi_agent import MultiAgent
from rl_multi_agent.experiments.furnlift_base_config import FurnLiftBaseConfig
from rl_multi_agent.models import BigA3CLSTMNStepComCoordinatedActionsEgoVision
class FurnLiftMinDistMixtureConfig(FurnLiftBaseConfig):
# Env/episode config
min_dist_between_agents_to_pickup = 8
visible_agents = True
# Model config
turn_off_communication = False
# Agent config
agent_class = MultiAgent
# Coordination
coordinate_actions = True # Mixture
@classmethod
def create_model(cls, **kwargs) -> nn.Module:
return BigA3CLSTMNStepComCoordinatedActionsEgoVision(
num_inputs_per_agent=3 + 1 * (cls.include_depth_frame),
action_groups=cls.episode_class.class_available_action_groups(),
num_agents=cls.num_agents,
state_repr_length=cls.state_repr_length,
talk_embed_length=cls.talk_embed_length,
agent_num_embed_length=cls.agent_num_embed_length,
reply_embed_length=cls.reply_embed_length,
num_talk_symbols=cls.num_talk_symbols,
num_reply_symbols=cls.num_reply_symbols,
turn_off_communication=cls.turn_off_communication,
coordinate_actions=cls.coordinate_actions,
coordinate_actions_dim=2 if cls.coordinate_actions else None,
separate_actor_weights=False,
)
@classmethod
def create_agent(cls, **kwargs) -> MultiAgent:
return cls.agent_class(
model=kwargs["model"],
gpu_id=kwargs["gpu_id"],
include_test_eval_results=cls.include_test_eval_results,
use_a3c_loss_when_not_expert_forcing=cls.use_a3c_loss_when_not_expert_forcing,
record_all_in_test=cls.record_all_in_test,
include_depth_frame=cls.include_depth_frame,
resize_image_as=cls.screen_size,
dagger_mode=cls.dagger_mode,
discourage_failed_coordination=False,
coordination_use_marginal_entropy=True,
)
@property
def saved_model_path(self) -> Optional[str]:
return None
def get_experiment():
return FurnLiftMinDistMixtureConfig()
| cordial-sync-master | rl_multi_agent/experiments/furnlift_vision_mixture_cl_config.py |
from typing import Callable, Optional
import torch
from torch import nn
import constants
from rl_multi_agent import MultiAgent
from rl_multi_agent.experiments.experiment import ExperimentConfig
from rl_multi_agent.furnmove_episode_samplers import FurnMoveEpisodeSampler
from rl_multi_agent.furnmove_episodes import FurnMoveEgocentricEpisode
from rl_multi_agent.models import A3CLSTMNStepComCoordinatedActionsEgoVision
class FurnMoveExperimentConfig(ExperimentConfig):
"""
All scenes + 1.0 reward for getting closer to the target
Discrete communication + discourage failed coordination loss
"""
# Env/episode config
num_agents = 2
screen_size = 84
episode_class = FurnMoveEgocentricEpisode
frame_type = "image"
episode_sampler_class = FurnMoveEpisodeSampler
visible_agents = True
include_depth_frame = False
include_move_obj_actions = True
headless = False
# Model config
state_repr_length = 512
talk_embed_length = 16
reply_embed_length = 16
agent_num_embed_length = 8
num_talk_symbols = 2
num_reply_symbols = 2
coordinate_actions = True
# Agent config
agent_class = MultiAgent
turn_off_communication = False
# Training config
max_ep_using_expert_actions = 0
train_scenes = constants.TRAIN_SCENE_NAMES[20:40]
valid_scenes = constants.VALID_SCENE_NAMES[5:10]
use_a3c_loss_when_not_expert_forcing = True
# Misc (e.g. visualization)
record_all_in_test = False
save_talk_reply_probs_path = None
include_test_eval_results = not torch.cuda.is_available()
return_likely_successfuly_move_actions = False
@classmethod
def get_init_train_params(cls):
init_train_params = {
"scenes": cls.train_scenes,
"num_agents": cls.num_agents,
"object_type": "Television",
"to_object_type": "Dresser",
"episode_class": cls.episode_class,
"player_screen_height": cls.screen_size,
"player_screen_width": cls.screen_size,
"max_ep_using_expert_actions": cls.max_ep_using_expert_actions,
"visible_agents": cls.visible_agents,
"include_depth_frame": cls.include_depth_frame,
"object_initial_height": 1.3,
"headless": cls.headless,
"max_distance_from_object": 0.76,
"max_episode_length": 500,
"episode_args": {
"include_move_obj_actions": cls.include_move_obj_actions,
"first_correct_coord_reward": 0.0,
"exploration_bonus": 0.0,
"failed_action_penalty": -0.02,
"step_penalty": -0.01,
# "increasing_rotate_penalty": True,
"joint_pass_penalty": -0.09,
"moved_closer_reward": 1.0,
# "moved_closer_reward": 0.50,
"min_dist_to_to_object": 0.26,
"frame_type": cls.frame_type,
"reached_target_reward": 1.0,
"return_likely_successfuly_move_actions": cls.return_likely_successfuly_move_actions,
"pass_conditioned_coordination": True,
},
}
return init_train_params
@classmethod
def get_init_valid_params(cls):
init_valid_params = {
**cls.get_init_train_params(),
"scenes": cls.valid_scenes,
"player_screen_height": 224,
"player_screen_width": 224,
"headless": False,
}
if cls.save_talk_reply_probs_path is not None:
init_valid_params[
"save_talk_reply_probs_path"
] = cls.save_talk_reply_probs_path
return init_valid_params
def __init__(self):
self._init_train_agent = self.episode_sampler_class(
**self.get_init_train_params()
)
self._init_test_agent = self.episode_sampler_class(
**self.get_init_valid_params()
)
@classmethod
def create_model(cls, **kwargs) -> nn.Module:
return A3CLSTMNStepComCoordinatedActionsEgoVision(
num_inputs_per_agent=3 + 1 * cls.include_depth_frame,
action_groups=cls.episode_class.class_available_action_groups(
include_move_obj_actions=cls.include_move_obj_actions
),
num_agents=cls.num_agents,
state_repr_length=cls.state_repr_length,
talk_embed_length=cls.talk_embed_length,
agent_num_embed_length=cls.agent_num_embed_length,
reply_embed_length=cls.reply_embed_length,
num_talk_symbols=cls.num_talk_symbols,
num_reply_symbols=cls.num_reply_symbols,
turn_off_communication=cls.turn_off_communication,
coordinate_actions=cls.coordinate_actions,
coordinate_actions_dim=2 if cls.coordinate_actions else None,
separate_actor_weights=False,
)
@classmethod
def create_agent(cls, **kwargs) -> MultiAgent:
return cls.agent_class(
model=kwargs["model"],
gpu_id=kwargs["gpu_id"],
include_test_eval_results=cls.include_test_eval_results,
use_a3c_loss_when_not_expert_forcing=cls.use_a3c_loss_when_not_expert_forcing,
record_all_in_test=cls.record_all_in_test,
include_depth_frame=cls.include_depth_frame,
resize_image_as=cls.screen_size,
discourage_failed_coordination=5000,
)
@property
def init_train_agent(self) -> Callable:
return self._init_train_agent
@property
def init_test_agent(self) -> Callable:
return self._init_test_agent
@property
def saved_model_path(self) -> Optional[str]:
# return "trained_models/furnmove_a3c_mixture_ego_vision_ego_actions_v3_490000_2019-10-28_16-43-13.dat"
return None
def get_experiment():
return FurnMoveExperimentConfig()
| cordial-sync-master | rl_multi_agent/experiments/furnmove_vision_mixture2mix_cl_rot_config.py |
from typing import Optional
from torch import nn
from rl_multi_agent.experiments.furnmove_vision_bigcentral_base_config import (
FurnMoveExperimentConfig,
)
from rl_multi_agent.models import A3CLSTMBigCentralEgoVision
class FurnMoveBigCentralVisionExperimentConfig(FurnMoveExperimentConfig):
@classmethod
def create_model(cls, **kwargs) -> nn.Module:
return A3CLSTMBigCentralEgoVision(
num_inputs_per_agent=3 + 1 * cls.include_depth_frame,
action_groups=cls.episode_class.class_available_action_groups(
include_move_obj_actions=cls.include_move_obj_actions
),
num_agents=cls.num_agents,
state_repr_length=cls.state_repr_length,
)
@property
def saved_model_path(self) -> Optional[str]:
return None
def get_experiment():
return FurnMoveBigCentralVisionExperimentConfig()
| cordial-sync-master | rl_multi_agent/experiments/furnmove_vision_bigcentral_cl_rot_config.py |
import os
from typing import Optional
from constants import ABS_PATH_TO_FINAL_FURNLIFT_CKPTS
from constants import SPLIT_TO_USE_FOR_EVALUATION
from rl_multi_agent.experiments.furnlift_vision_noimplicit_marginalnocomm_nocl_config import (
FurnLiftMinDistUncoordinatedNocommNoimplicitExperimentConfig,
)
from rl_multi_agent.furnmove_utils import SaveFurnLiftMixin
class EvalConfigNocommVision(
SaveFurnLiftMixin, FurnLiftMinDistUncoordinatedNocommNoimplicitExperimentConfig,
):
max_ep_using_expert_actions = 0
@property
def saved_model_path(cls) -> Optional[str]:
return os.path.join(
ABS_PATH_TO_FINAL_FURNLIFT_CKPTS,
"furnlift_vision_noimplicit_marginalnocomm_nocl_100000_2020-03-03_06-21-49.dat",
)
def simple_name(self):
return "furnlift__vision_noimplicit_marginalnocomm_nocl"
def get_experiment():
ec = EvalConfigNocommVision()
ec.episode_init_queue_file_name = "furnlift_episode_start_positions_for_eval__{}.json".format(
SPLIT_TO_USE_FOR_EVALUATION
)
return ec
| cordial-sync-master | rl_multi_agent/furnlift_eval_experiments/furnlift_vision_noimplicit_marginalnocomm_nocl_config.py |
cordial-sync-master | rl_multi_agent/furnlift_eval_experiments/__init__.py |
|
import os
from typing import Optional
from constants import ABS_PATH_TO_FINAL_FURNLIFT_CKPTS
from constants import SPLIT_TO_USE_FOR_EVALUATION
from rl_multi_agent.experiments.furnlift_vision_marginalnocomm_nocl_config import (
FurnLiftMinDistUncoordinatedNocommExperimentConfig,
)
from rl_multi_agent.furnmove_utils import SaveFurnLiftMixin
class EvalConfigNocommVision(
SaveFurnLiftMixin, FurnLiftMinDistUncoordinatedNocommExperimentConfig,
):
max_ep_using_expert_actions = 0
@property
def saved_model_path(self) -> Optional[str]:
return os.path.join(
ABS_PATH_TO_FINAL_FURNLIFT_CKPTS,
"furnlift_vision_marginalnocomm_nocl_100000_2020-02-29_01-38-14.dat",
)
def simple_name(self):
return "furnlift__vision_marginalnocomm_nocl"
def get_experiment():
ec = EvalConfigNocommVision()
ec.episode_init_queue_file_name = "furnlift_episode_start_positions_for_eval__{}.json".format(
SPLIT_TO_USE_FOR_EVALUATION
)
return ec
| cordial-sync-master | rl_multi_agent/furnlift_eval_experiments/furnlift_vision_marginalnocomm_nocl_config.py |
import os
from typing import Optional
from constants import SPLIT_TO_USE_FOR_EVALUATION, ABS_PATH_TO_FINAL_FURNLIFT_CKPTS
from rl_multi_agent.experiments.furnlift_vision_marginal_nocl_config import (
FurnLiftMinDistUncoordinatedExperimentConfig,
)
from rl_multi_agent.furnmove_utils import SaveFurnLiftMixin
class EvalConfigMarginalVision(
SaveFurnLiftMixin, FurnLiftMinDistUncoordinatedExperimentConfig
):
max_ep_using_expert_actions = 0
@property
def saved_model_path(self) -> Optional[str]:
return os.path.join(
ABS_PATH_TO_FINAL_FURNLIFT_CKPTS,
"furnlift_vision_marginal_nocl_100000_2020-02-28_09-16-44.dat",
)
def simple_name(self):
return "furnlift__vision_marginal_nocl"
def get_experiment():
ec = EvalConfigMarginalVision()
ec.episode_init_queue_file_name = "furnlift_episode_start_positions_for_eval__{}.json".format(
SPLIT_TO_USE_FOR_EVALUATION
)
return ec
| cordial-sync-master | rl_multi_agent/furnlift_eval_experiments/furnlift_vision_marginal_nocl.py |
import os
from typing import Optional
from constants import ABS_PATH_TO_FINAL_FURNLIFT_CKPTS
from constants import SPLIT_TO_USE_FOR_EVALUATION
from rl_multi_agent.experiments.furnlift_vision_noimplicit_mixture_cl_config import (
FurnLiftMinDistNoimplicitMixtureConfig,
)
from rl_multi_agent.furnmove_utils import SaveFurnLiftMixin
class EvalConfigMixtureVision(
SaveFurnLiftMixin, FurnLiftMinDistNoimplicitMixtureConfig
):
max_ep_using_expert_actions = 0
@property
def saved_model_path(cls) -> Optional[str]:
return os.path.join(
ABS_PATH_TO_FINAL_FURNLIFT_CKPTS,
"furnlift_vision_noimplicit_mixture_cl_100000_2020-03-02_18-34-36.dat",
)
def simple_name(self):
return "furnlift__vision_noimplicit_mixture_cl"
def get_experiment():
ec = EvalConfigMixtureVision()
ec.episode_init_queue_file_name = "furnlift_episode_start_positions_for_eval__{}.json".format(
SPLIT_TO_USE_FOR_EVALUATION
)
return ec
| cordial-sync-master | rl_multi_agent/furnlift_eval_experiments/furnlift_vision_noimplicit_mixture_cl_config.py |
import os
from typing import Optional
from constants import ABS_PATH_TO_FINAL_FURNLIFT_CKPTS
from constants import SPLIT_TO_USE_FOR_EVALUATION
from rl_multi_agent.experiments.furnlift_vision_central_cl_config import (
FurnLiftExperimentConfig,
)
from rl_multi_agent.furnmove_utils import SaveFurnLiftMixin
class EvalConfigCentralVision(SaveFurnLiftMixin, FurnLiftExperimentConfig):
max_ep_using_expert_actions = 0
@property
def saved_model_path(self) -> Optional[str]:
return os.path.join(
ABS_PATH_TO_FINAL_FURNLIFT_CKPTS,
"furnlift_vision_central_cl_100000_2020-02-26_00-00-23.dat",
)
def simple_name(self):
return "furnlift__vision_central_cl"
def get_experiment():
ec = EvalConfigCentralVision()
ec.episode_init_queue_file_name = "furnlift_episode_start_positions_for_eval__{}.json".format(
SPLIT_TO_USE_FOR_EVALUATION
)
return ec
| cordial-sync-master | rl_multi_agent/furnlift_eval_experiments/furnlift_vision_central_cl_config.py |
import os
from typing import Optional
from constants import ABS_PATH_TO_FINAL_FURNLIFT_CKPTS
from constants import SPLIT_TO_USE_FOR_EVALUATION
from rl_multi_agent.experiments.furnlift_vision_noimplicit_marginal_nocl_config import (
FurnLiftMinDistUncoordinatedNoimplicitExperimentConfig,
)
from rl_multi_agent.furnmove_utils import SaveFurnLiftMixin
class EvalConfigMarginalVision(
SaveFurnLiftMixin, FurnLiftMinDistUncoordinatedNoimplicitExperimentConfig,
):
max_ep_using_expert_actions = 0
@property
def saved_model_path(cls) -> Optional[str]:
return os.path.join(
ABS_PATH_TO_FINAL_FURNLIFT_CKPTS,
"furnlift_vision_noimplicit_marginal_nocl_100000_2020-03-03_01-43-25.dat",
)
def simple_name(self):
return "furnlift__vision_noimplicit_marginal_nocl"
def get_experiment():
ec = EvalConfigMarginalVision()
ec.episode_init_queue_file_name = "furnlift_episode_start_positions_for_eval__{}.json".format(
SPLIT_TO_USE_FOR_EVALUATION
)
return ec
| cordial-sync-master | rl_multi_agent/furnlift_eval_experiments/furnlift_vision_noimplicit_marginal_nocl_config.py |
import os
from typing import Optional
from constants import ABS_PATH_TO_FINAL_FURNLIFT_CKPTS
from constants import SPLIT_TO_USE_FOR_EVALUATION
from rl_multi_agent.experiments.furnlift_vision_noimplicit_central_cl_config import (
FurnLiftNoimplicitExperimentConfig,
)
from rl_multi_agent.furnmove_utils import SaveFurnLiftMixin
class EvalConfigCentralVision(SaveFurnLiftMixin, FurnLiftNoimplicitExperimentConfig):
max_ep_using_expert_actions = 0
@property
def saved_model_path(cls) -> Optional[str]:
return os.path.join(
ABS_PATH_TO_FINAL_FURNLIFT_CKPTS,
"furnlift_vision_noimplicit_central_cl_100000_2020-03-03_17-54-13.dat",
)
def simple_name(self):
return "furnlift__vision_noimplicit_central_cl"
def get_experiment():
ec = EvalConfigCentralVision()
ec.episode_init_queue_file_name = "furnlift_episode_start_positions_for_eval__{}.json".format(
SPLIT_TO_USE_FOR_EVALUATION
)
return ec
| cordial-sync-master | rl_multi_agent/furnlift_eval_experiments/furnlift_vision_noimplicit_central_cl_config.py |
import os
from typing import Optional
from constants import ABS_PATH_TO_FINAL_FURNLIFT_CKPTS
from constants import SPLIT_TO_USE_FOR_EVALUATION
from rl_multi_agent.experiments.furnlift_vision_mixture_cl_config import (
FurnLiftMinDistMixtureConfig,
)
from rl_multi_agent.furnmove_utils import SaveFurnLiftMixin
class EvalConfigMixtureVision(SaveFurnLiftMixin, FurnLiftMinDistMixtureConfig):
max_ep_using_expert_actions = 0
@property
def saved_model_path(self) -> Optional[str]:
return os.path.join(
ABS_PATH_TO_FINAL_FURNLIFT_CKPTS,
"furnlift_vision_mixture_cl_100000_2020-02-29_12-23-05.dat",
)
def simple_name(self):
return "furnlift__vision_mixture_cl"
def get_experiment():
ec = EvalConfigMixtureVision()
ec.episode_init_queue_file_name = "furnlift_episode_start_positions_for_eval__{}.json".format(
SPLIT_TO_USE_FOR_EVALUATION
)
return ec
| cordial-sync-master | rl_multi_agent/furnlift_eval_experiments/furnlift_vision_mixture_cl_config.py |
"""
A script used to generate a fixed dataset of initial starting locations for agents,
the TV, and the TV stand. This dataset can then be used to provide a fair comparison
between different models trained to complete the FurnMove task.
"""
import hashlib
import itertools
import json
import os
import random
import sys
import time
import traceback
import warnings
from queue import Empty
import numpy as np
import torch
import torch.multiprocessing as mp
from setproctitle import setproctitle as ptitle
import constants
from constants import ABS_PATH_TO_LOCAL_THOR_BUILD, ABS_PATH_TO_DATA_DIR
from rl_ai2thor.ai2thor_environment import AI2ThorEnvironment
mp = mp.get_context("spawn")
LIFTED_OBJECT_INITIAL_HEIGHT = 1.3
MAX_DISTANCE_FROM_OBJECT = 0.76
CURRENTLY_ON_SERVER = torch.cuda.is_available()
def create_environment(num_agents):
return AI2ThorEnvironment(
x_display="0.0" if CURRENTLY_ON_SERVER else None,
local_thor_build=ABS_PATH_TO_LOCAL_THOR_BUILD,
restrict_to_initially_reachable_points=True,
num_agents=num_agents,
)
def try_setting_up_environment(env, scene, lifted_object_type, to_object_type):
env.reset(scene)
for i in range(env.num_agents):
env.teleport_agent_to(
**env.last_event.metadata["agent"]["position"],
rotation=env.last_event.metadata["agent"]["rotation"]["y"],
horizon=30.0,
standing=True,
force_action=True,
agent_id=i,
)
failure_reasons = []
scene_successfully_setup = False
lifted_object_id = None
to_object_id = None
for _ in range(20):
# Randomly generate agents looking at the TV
env.step(
{
"action": "DisableAllObjectsOfType",
"objectId": lifted_object_type,
"agentId": 0,
}
)
sr = env.step(
{
"action": "RandomlyCreateLiftedFurniture",
"objectType": lifted_object_type,
"y": LIFTED_OBJECT_INITIAL_HEIGHT,
"z": MAX_DISTANCE_FROM_OBJECT,
}
)
if (
env.last_event.metadata["lastAction"] != "RandomlyCreateLiftedFurniture"
or not env.last_event.metadata["lastActionSuccess"]
):
failure_reasons.append(
"Could not randomize location of {} in {}. Error message {}.".format(
lifted_object_type, scene, env.last_event.metadata["errorMessage"]
)
)
continue
# Refreshing reachable here should not be done as it means the
# space underneath the TV would be considered unreachable even though,
# once the TV moves, it is.
objects_of_type = env.all_objects_with_properties(
{"objectType": lifted_object_type}, agent_id=0
)
if len(objects_of_type) != 1:
print("len(objects_of_type): {}".format(len(objects_of_type)))
raise (Exception("len(objects_of_type) != 1"))
object = objects_of_type[0]
lifted_object_id = object["objectId"]
assert lifted_object_id == sr.metadata["actionReturn"]
# Create the object we should navigate to
env.step(
{
"action": "RandomlyCreateAndPlaceObjectOnFloor",
"objectType": to_object_type,
"agentId": 0,
}
)
if (
env.last_event.metadata["lastAction"]
!= "RandomlyCreateAndPlaceObjectOnFloor"
or not env.last_event.metadata["lastActionSuccess"]
):
failure_reasons.append(
"Could not randomize location of {} in {}. Error message: {}.".format(
to_object_type, scene, env.last_event.metadata["errorMessage"]
)
)
continue
to_object_id = env.last_event.metadata["actionReturn"]
assert to_object_id is not None and len(to_object_id) != 0
scene_successfully_setup = True
break
return scene_successfully_setup, lifted_object_id, to_object_id
def generate_data(
worker_num: int,
num_agents: int,
lifted_object_type: str,
to_object_type: str,
in_queue: mp.Queue,
out_queue: mp.Queue,
):
ptitle("Training Agent: {}".format(worker_num))
try:
env = None
while not in_queue.empty():
scene, index = in_queue.get(timeout=3)
if env is None:
env = create_environment(num_agents=num_agents)
env.start(scene, player_screen_width=84, player_screen_height=84)
seed = (
int(hashlib.md5((scene + "|{}".format(index)).encode()).hexdigest(), 16)
% 2 ** 31
)
random.seed(seed)
np.random.seed(seed)
torch.random.manual_seed(seed)
torch.cuda.manual_seed(seed)
success, lifted_object_id, to_object_id = try_setting_up_environment(
env,
scene,
lifted_object_type=lifted_object_type,
to_object_type=to_object_type,
)
if not success:
warnings.warn(
"Failed to successfully set up environment in scene {} with index {}".format(
scene, index
)
)
continue
lifted_object = env.get_object_by_id(object_id=lifted_object_id, agent_id=0)
lifted_object_location = {
**lifted_object["position"],
"rotation": (round(lifted_object["rotation"]["y"] / 90) % 4) * 90,
}
to_object = env.get_object_by_id(object_id=to_object_id, agent_id=0)
to_object_location = {
**to_object["position"],
"rotation": (round(to_object["rotation"]["y"] / 90) % 4) * 90,
}
out_queue.put(
{
"scene": scene,
"index": index,
"agent_locations": env.get_agent_locations(),
"lifted_object_location": lifted_object_location,
"to_object_location": to_object_location,
}
)
except Exception as _:
print("ERROR:")
traceback.print_exc(file=sys.stdout)
raise e
finally:
if env is not None:
env.stop()
print("Process ending.")
sys.exit()
if __name__ == "__main__":
num_agents = 2
num_processes = 16 if CURRENTLY_ON_SERVER else 4
split = "test"
if split == "train":
scenes = constants.TRAIN_SCENE_NAMES[20:40]
repeats_per_scene = 50
elif split == "valid":
scenes = constants.VALID_SCENE_NAMES[5:10]
repeats_per_scene = 200
elif split == "test":
scenes = constants.TEST_SCENE_NAMES[5:10]
repeats_per_scene = 200
else:
raise NotImplementedError()
assert os.path.exists(ABS_PATH_TO_DATA_DIR)
save_path = os.path.join(
ABS_PATH_TO_DATA_DIR,
"furnmove_episode_start_positions_for_eval{}__{}.json".format(
"" if num_agents == 2 else "__{}agents".format(num_agents), split
),
)
start_positions_data = {}
if os.path.exists(save_path):
with open(save_path, "r") as f:
start_positions_data = json.load(f)
def save_data_to_disk():
with open(save_path, "w") as f:
json.dump(start_positions_data, f)
processes = []
try:
remaining_keys = set()
send_queue = mp.Queue()
recieve_queue = mp.Queue()
k = 0
for scene, index in itertools.product(scenes, list(range(repeats_per_scene))):
if scene not in start_positions_data:
start_positions_data[scene] = {}
if str(index) in start_positions_data[scene]:
continue
remaining_keys.add((scene, index))
send_queue.put((scene, index))
k += 1
if k == 0:
print("Already generated all positions for evaluation? Quitting...")
sys.exit()
print("Starting data generation with {} unique configurations.".format(k))
for worker_num in range(num_processes):
p = mp.Process(
target=generate_data,
kwargs=dict(
worker_num=worker_num,
num_agents=num_agents,
lifted_object_type="Television",
to_object_type="Dresser",
in_queue=send_queue,
out_queue=recieve_queue,
),
)
p.start()
time.sleep(0.2)
processes.append(p)
time.sleep(3)
while len(remaining_keys) != 0 and (
any([p.is_alive() for p in processes]) or not recieve_queue.empty()
):
print("Gathering incoming data...")
last_time = time.time()
while time.time() - last_time < 60 and len(remaining_keys) != 0:
data = recieve_queue.get(timeout=120)
scene, index = (data["scene"], data["index"])
del data["scene"], data["index"]
start_positions_data[scene][str(index)] = data
remaining_keys.remove((scene, index))
print("Saving to disk...")
save_data_to_disk()
except Empty as _:
print(
"Outqueue empty for 120 seconds. Assuming everything is done. Writing to disk..."
)
save_data_to_disk()
except Exception as e:
traceback.print_exc(file=sys.stdout)
finally:
print("Cleaning up main process.\n")
print("Joining processes.")
for p in processes:
p.join(0.1)
print("Joined.\n")
print("All done.")
sys.exit(1)
| cordial-sync-master | rl_multi_agent/scripts/generate_furnmove_starting_locations_for_evaluation.py |
"""
A script used to (randomly) test that our gridworld variant of AI2-THOR
(Grid-AI2-THOR) aligns properly with the standard version of AI2-THOR. In particular, this
script will randomly initialize agents in Grid-AI2-THOR and AI2-THOR in exactly
the same (living-room) scenes and positions and then will take the same (randomly chosen) actions
in both environments. This script then verifies that actions in both environments
succeed and fail at the same time steps.
Currently the number of agents used during this test is fixed at 2. If you wish to
change this number, simply edit `num_agents = 2` below.
"""
import random
import numpy as np
import torch.multiprocessing as mp
from constants import (
ABS_PATH_TO_LOCAL_THOR_BUILD,
DRESSER_SILHOUETTE_STRING,
SCENES_NAMES_SPLIT_BY_TYPE,
)
from rl_ai2thor.ai2thor_gridworld_environment import AI2ThorLiftedObjectGridEnvironment
from rl_multi_agent.furnmove_episodes import egocentric_action_groups
from rl_multi_agent.scripts.generate_furnmove_starting_locations_for_evaluation import (
create_environment,
MAX_DISTANCE_FROM_OBJECT,
LIFTED_OBJECT_INITIAL_HEIGHT,
try_setting_up_environment,
)
mp = mp.get_context("spawn")
if __name__ == "__main__":
num_agents = 2
vision_env = create_environment(num_agents=num_agents)
vision_env.start("FloorPlan1_physics")
grid_env = AI2ThorLiftedObjectGridEnvironment(
local_thor_build=ABS_PATH_TO_LOCAL_THOR_BUILD,
object_type="Television",
num_agents=num_agents,
max_dist_to_lifted_object=MAX_DISTANCE_FROM_OBJECT,
lifted_object_height=LIFTED_OBJECT_INITIAL_HEIGHT,
min_steps_between_agents=2,
)
grid_env.start("FloorPlan1_physics")
dresser_mask = grid_env.controller.parse_template_to_mask(DRESSER_SILHOUETTE_STRING)
def to_xzr(loc):
if hasattr(loc, "rot"):
return np.array([loc.x, loc.z, loc.rot])
elif "position" in loc:
return np.array(
[loc["position"]["x"], loc["position"]["z"], loc["rotation"]["y"]]
)
else:
return np.array([loc["x"], loc["z"], loc["rotation"]])
for i in range(1000):
print(i)
random.seed(i)
scene = random.choice(SCENES_NAMES_SPLIT_BY_TYPE[1])
(
success,
vision_lifted_object_id,
vision_to_object_id,
) = try_setting_up_environment(vision_env, scene, "Television", "Dresser")
agent_locations = vision_env.get_agent_locations()
lifted_object = vision_env.get_object_by_id(vision_lifted_object_id, 0)
to_object = vision_env.get_object_by_id(vision_to_object_id, 0)
grid_env.reset(scene)
for j, al in enumerate(agent_locations):
grid_env.step(
{
"action": "TeleportFull",
"agentId": j,
"x": al["x"],
"z": al["z"],
"rotation": {"y": al["rotation"]},
"allowAgentIntersection": True,
}
)
assert grid_env.last_event.metadata["lastActionSuccess"]
grid_env.step(
{
"action": "CreateLiftedFurnitureAtLocation",
"objectType": "Television",
**lifted_object["position"],
"rotation": lifted_object["rotation"],
"agentId": 0,
}
)
assert grid_env.last_event.metadata["lastActionSuccess"]
grid_env.step(
{
"action": "CreateAndPlaceObjectOnFloorAtLocation",
"objectType": "Dresser",
"object_mask": dresser_mask,
**to_object["position"],
"rotation": to_object["rotation"],
"agentId": 0,
"forceAction": True,
}
)
if grid_env.last_event.metadata["lastActionSuccess"]:
for _ in range(10):
grid_env.controller.viz_world(do_wait=False)
action = random.choice(sum(egocentric_action_groups(True), tuple()))
agent_id = random.choice([0, 1])
print()
vision_object_xzr = to_xzr(
vision_env.get_object_by_id(vision_lifted_object_id, agent_id=0)
)
gridworld_object_xzr = to_xzr(grid_env.controller.lifted_object)
print("TV positions before")
print("V", vision_object_xzr.round(2))
print("G", gridworld_object_xzr.round(2))
print("Before agent locs")
print("V", [to_xzr(l) for l in vision_env.get_agent_locations()])
print("G", [to_xzr(l) for l in grid_env.get_agent_locations()])
print(action, agent_id)
vision_env.step(
{
"action": action,
"objectId": vision_lifted_object_id,
"maxAgentsDistance": 0.76,
"agentId": agent_id,
}
)
grid_env.step(
{
"action": action,
"objectId": "Television|1",
"maxAgentsDistance": 0.76,
"agentId": agent_id,
}
)
vision_object_xzr = to_xzr(
vision_env.get_object_by_id(vision_lifted_object_id, agent_id=0)
)
gridworld_object_xzr = to_xzr(grid_env.controller.lifted_object)
a = vision_env.last_event.metadata["lastActionSuccess"]
b = grid_env.last_event.metadata["lastActionSuccess"]
print("action success", a, b)
if a != b:
print("Different" + "\n" * 3)
break
if np.abs(vision_object_xzr - gridworld_object_xzr).sum() > 0.01:
print("Different tv xzr's")
print("V", vision_object_xzr)
print("G", gridworld_object_xzr)
print("\n" * 5)
break
different_locs = False
for vloc, gloc in zip(
vision_env.get_agent_locations(), grid_env.get_agent_locations()
):
vloc = to_xzr(vloc)
gloc = to_xzr(gloc)
if np.abs(vloc - gloc).sum() > 0.01:
different_locs = True
print("Different agent locs")
print(vloc)
print(gloc)
break
if different_locs:
continue
else:
print("Could not place dresser on floor")
| cordial-sync-master | rl_multi_agent/scripts/check_equality_gridworld_and_vision_world.py |
cordial-sync-master | rl_multi_agent/scripts/__init__.py |
|
"""
Script used to download and extract:
* `ai2thor_builds` - Our custom AI2-THOR build (necessary for running any experiments),
* `data` - Our evaluation data (necessary to reproduce the plots/tables reported in our paper), and
* `trained_models` - Our trained model checkpoints.
When running
```bash
python rl_multi_agent/scripts/download_evaluation_data.py ai2thor_builds
```
this script downloads all of the above. If you'd prefer to only download a single one
of the above directories simply specify this on the command line, e.g.
```bash
python rl_multi_agent/scripts/download_evaluation_data.py ai2thor_builds
```
will only download the `ai2thor_builds` directory.
"""
import glob
import hashlib
import os
import platform
import shutil
import sys
from zipfile import ZipFile
from constants import PLATFORM, PROJECT_TOP_DIR
DIR_TO_DOWNLOAD_ID = {
"ai2thor_builds": "1zRbOYb-K07R7Bb1vMBGWdPGqXYY-adMY",
"data": "1iz_zV74ZIdZd9UwYDvomKPRgqwpPctEi",
"trained_models": "1vLckX20fnImvoxugZd812fKi2JVKwC0Z",
}
DIR_TO_MD5 = {
"ai2thor_builds": "84201a23b87b60771ac16e7814cd28a0",
"data": "fbbad25bb7d0494fe9a818e496630522",
"trained_models": "b3d4b0208515a0fc59096e2659bb8b19",
}
DIR_TO_NAME = {
"ai2thor_builds": "AI2-THOR builds",
"data": "evaluation data",
"trained_models": "model checkpoints",
}
def get_md5_hash(file_name: str):
md5_obj = hashlib.md5()
with open(file_name, "rb") as f:
buf = f.read()
md5_obj.update(buf)
return md5_obj.hexdigest()
if platform.system() == "Linux":
DOWNLOAD_TEMPLATE = """wget --quiet --load-cookies /tmp/cookies.txt "https://docs.google.com/uc?export=download&confirm=$(wget --quiet --save-cookies /tmp/cookies.txt --keep-session-cookies --no-check-certificate 'https://docs.google.com/uc?export=download&id={}' -O- | sed -rn 's/.*confirm=([0-9A-Za-z_]+).*/\\1\\n/p')&id={}" -O {}"""
elif platform.system() == "Darwin":
DOWNLOAD_TEMPLATE = """wget --quiet --load-cookies /tmp/cookies.txt "https://docs.google.com/uc?export=download&confirm=$(wget --quiet --save-cookies /tmp/cookies.txt --keep-session-cookies --no-check-certificate 'https://docs.google.com/uc?export=download&id={}' -O- | gsed -rn 's/.*confirm=([0-9A-Za-z_]+).*/\\1\\n/p')&id={}" -O {}"""
else:
raise NotImplementedError("{} is not supported".format(PLATFORM))
if __name__ == "__main__":
if len(sys.argv) == 1 or sys.argv[1] == "all":
print("Beginning process of downloading, unzipping, and moving all data.\n")
download_dirs = sorted(list(DIR_TO_DOWNLOAD_ID.keys()))
else:
download_dirs = [sys.argv[1].lower()]
assert (
download_dirs[0] in DIR_TO_DOWNLOAD_ID
), "Do not know how to download {} directory.".format(download_dirs[0])
for rel_download_dir in download_dirs:
print("Now populating {} directory.".format(rel_download_dir))
download_id = DIR_TO_DOWNLOAD_ID[rel_download_dir]
abs_download_dir = os.path.join(PROJECT_TOP_DIR, rel_download_dir)
zip_expected_md5 = DIR_TO_MD5[rel_download_dir]
os.makedirs(abs_download_dir, exist_ok=True)
tmp_directory = os.path.join("/tmp", download_id)
assert os.path.exists("/tmp")
os.makedirs(tmp_directory, exist_ok=True)
tmp_zip_path = os.path.join(tmp_directory, "to_unzip.zip")
tmp_unzip_path = os.path.join(tmp_directory, "unzipped_contents")
if (
os.path.exists(tmp_zip_path)
and get_md5_hash(tmp_zip_path) == DIR_TO_MD5[rel_download_dir]
):
print(
"{} already exists and has correct hash, skipping download.".format(
tmp_zip_path
)
)
else:
print(
"Downloading archive to temporary directory {}...".format(tmp_zip_path)
)
os.system(DOWNLOAD_TEMPLATE.format(download_id, download_id, tmp_zip_path))
print("Downloaded.")
if (not os.path.exists(tmp_zip_path)) or get_md5_hash(
tmp_zip_path
) != DIR_TO_MD5[rel_download_dir]:
print(
"Could not download contents of {}, this is likely an error. Skipping...".format(
rel_download_dir
)
)
continue
print("Unzipping to temporary file {}...".format(tmp_unzip_path))
with ZipFile(tmp_zip_path, "r") as zip_file:
zip_file.extractall(tmp_unzip_path)
print("Unzipped.".format(tmp_unzip_path))
if os.path.exists(os.path.join(tmp_unzip_path, rel_download_dir)):
tmp_unzip_path = os.path.join(tmp_unzip_path, rel_download_dir)
print("Moving unzipped contents to {}".format(abs_download_dir))
for from_path in glob.glob(os.path.join(tmp_unzip_path, "*")):
if "_MACOS" in from_path:
continue
to_path = os.path.join(abs_download_dir, os.path.basename(from_path))
if os.path.exists(to_path):
while True:
should_proceed_str = (
input(
"\nMoving {} to {} but this will cause {} to be overwritten. "
"Overwrite? (y/n)".format(from_path, to_path, to_path)
)
.strip()
.lower()
)
if should_proceed_str in ["yes", "y"]:
should_proceed = True
break
elif should_proceed_str in ["no", "n"]:
should_proceed = False
break
else:
print(
"Unrecognized input '{}', please try again.".format(
should_proceed_str
)
)
else:
should_proceed = True
if not should_proceed:
print("Not overwritting {} and continuing.".format(abs_download_dir))
continue
print("Moving extracted path {} to {}".format(from_path, to_path))
if os.path.exists(to_path):
if os.path.isdir(to_path):
shutil.rmtree(to_path)
else:
os.remove(to_path)
shutil.move(from_path, to_path)
print("Moving complete!".format(rel_download_dir))
print()
| cordial-sync-master | rl_multi_agent/scripts/download_evaluation_data.py |
"""
A script used to generate a fixed dataset of initial starting locations for agents
and the TV. This dataset can then be used to provide a fair comparison between
different models trained to complete the FurnLift task.
"""
import hashlib
import itertools
import json
import os
import random
import sys
import time
import traceback
from queue import Empty
import numpy as np
import torch
import torch.multiprocessing as mp
from setproctitle import setproctitle as ptitle
import constants
from constants import ABS_PATH_TO_LOCAL_THOR_BUILD, ABS_PATH_TO_DATA_DIR
from rl_multi_agent.furnlift_episode_samplers import FurnLiftEpisodeSamplers
from rl_multi_agent.furnlift_episodes import FurnLiftNApartStateEpisode
from rl_multi_agent.scripts.generate_furnmove_starting_locations_for_evaluation import (
CURRENTLY_ON_SERVER,
)
class TmpObject(object):
def __init__(self):
self.env = None
self.task_data = None
self.environment = None
self.episode = None
self.docker_enabled = False
self.local_thor_build = ABS_PATH_TO_LOCAL_THOR_BUILD
self.x_display = None
def fake_episode_class(env, task_data, **kwargs):
o = TmpObject()
o.task_data = task_data
o.env = env
return o
def try_setting_up_environment(
env,
fake_agent,
scene,
target_object_type,
num_agents=2,
min_dist_between_agents_to_pickup=8,
):
init_params = {
"scenes": [scene],
"num_agents": num_agents,
"object_type": target_object_type,
"episode_class": FurnLiftNApartStateEpisode,
"player_screen_height": 84,
"player_screen_width": 84,
"min_dist_between_agents_to_pickup": min_dist_between_agents_to_pickup,
"max_ep_using_expert_actions": 1000000,
"visible_agents": True,
"max_visible_positions_push": 0,
"min_max_visible_positions_to_consider": (1000, 1000),
}
episode_sampler = FurnLiftEpisodeSamplers(**init_params)
episode_sampler.env_args = TmpObject()
next(episode_sampler(agent=fake_agent, env=env))
target_object_id = fake_agent.episode.task_data["goal_obj_id"]
return target_object_id
def generate_data(worker_num, target_object_type, in_queue, out_queue):
ptitle("Training Agent: {}".format(worker_num))
env = None
fake_agent = TmpObject()
try:
while not in_queue.empty():
scene, index = in_queue.get(timeout=3)
seed = (
int(hashlib.md5((scene + "|{}".format(index)).encode()).hexdigest(), 16)
% 2 ** 31
)
random.seed(seed)
np.random.seed(seed)
torch.random.manual_seed(seed)
if torch.cuda.is_available():
torch.cuda.manual_seed(seed)
target_object_id = try_setting_up_environment(
env=env,
fake_agent=fake_agent,
scene=scene,
target_object_type=target_object_type,
)
env = fake_agent.episode.environment
target_object = env.get_object_by_id(object_id=target_object_id, agent_id=0)
object_location = {
**target_object["position"],
"rotation": (round(target_object["rotation"]["y"] / 90) % 4) * 90,
}
out_queue.put(
{
"scene": scene,
"index": index,
"agent_locations": env.get_agent_locations(),
"object_location": object_location,
}
)
except Exception as _:
print("ERROR:")
traceback.print_exc(file=sys.stdout)
raise e
finally:
if env is not None:
env.stop()
print("Process ending.")
sys.exit()
if __name__ == "__main__":
split = "test"
num_processes = 16 if CURRENTLY_ON_SERVER else 4
if split == "train":
scenes = constants.TRAIN_SCENE_NAMES[20:40]
repeats_per_scene = 50
elif split == "valid":
scenes = constants.VALID_SCENE_NAMES[5:10]
repeats_per_scene = 200
elif split == "test":
scenes = constants.TEST_SCENE_NAMES[5:10]
repeats_per_scene = 200
else:
raise NotImplementedError()
assert os.path.exists(ABS_PATH_TO_DATA_DIR)
save_path = os.path.join(
ABS_PATH_TO_DATA_DIR,
"furnlift_episode_start_positions_for_eval__{}.json".format(split),
)
start_positions_data = {}
if os.path.exists(save_path):
with open(save_path, "r") as f:
start_positions_data = json.load(f)
def save_data_to_disk():
with open(save_path, "w") as f:
json.dump(start_positions_data, f)
processes = []
try:
remaining_keys = set()
send_queue = mp.Queue()
recieve_queue = mp.Queue()
k = 0
for scene, index in itertools.product(scenes, list(range(repeats_per_scene))):
if scene not in start_positions_data:
start_positions_data[scene] = {}
if str(index) in start_positions_data[scene]:
continue
remaining_keys.add((scene, index))
send_queue.put((scene, index))
k += 1
if k == 0:
print("Already generated all positions for evaluation? Quitting...")
sys.exit()
print("Starting data generation with {} unique configurations.".format(k))
for worker_num in range(num_processes):
p = mp.Process(
target=generate_data,
args=(worker_num, "Television", send_queue, recieve_queue),
)
p.start()
time.sleep(0.2)
processes.append(p)
time.sleep(3)
while len(remaining_keys) != 0 and (
any([p.is_alive() for p in processes]) or not recieve_queue.empty()
):
print("Gathering incoming data...")
last_time = time.time()
while time.time() - last_time < 60 and len(remaining_keys) != 0:
data = recieve_queue.get(timeout=120)
scene, index = (data["scene"], data["index"])
del data["scene"], data["index"]
start_positions_data[scene][str(index)] = data
remaining_keys.remove((scene, index))
print("Saving to disk...")
save_data_to_disk()
except Empty as _:
print(
"Outqueue empty for 120 seconds. Assuming everything is done. Writing to disk..."
)
save_data_to_disk()
except Exception as e:
traceback.print_exc(file=sys.stdout)
finally:
print("Cleaning up main process.\n")
print("Joining processes.")
for p in processes:
p.join(0.1)
print("Joined.\n")
print("All done.")
sys.exit(1)
| cordial-sync-master | rl_multi_agent/scripts/generate_furnlift_starting_locations_for_evaluation.py |
"""
A script which extracts the communication information from FurnMove evaluation episodes
(using agents trained with SYNC policies and the CORDIAL loss) and saves this information
into a TSV file. This TSV file can then be further analyzed (as we have done in our paper)
to study _what_ the agents have learned to communicate with one another.
By default, this script will also save visualizations of the communication between
agents for episodes that are possibly interesting (e.g. not too short and not too long).
If you wish turn off this behavior, change `create_communication_visualizations = True`
to `create_communication_visualizations = False` below.
"""
import glob
import json
import os
from collections import defaultdict
from typing import Optional
import numpy as np
import pandas as pd
from matplotlib import pyplot as plt
from constants import ABS_PATH_TO_ANALYSIS_RESULTS_DIR, ABS_PATH_TO_DATA_DIR
def visualize_talk_reply_probs(df: pd.DataFrame, save_path: Optional[str] = None):
steps = list(range(df.shape[0]))
a0_tv_visible = np.array(df["a0_tv_visible"])
a1_tv_visible = np.array(df["a1_tv_visible"])
a0_action_taken = np.array(df["a0_next_action"])
a1_action_taken = np.array(df["a1_next_action"])
a0_took_pass_action = np.logical_and(a0_action_taken == 3, a1_action_taken < 3)
a1_took_pass_action = np.logical_and(a1_action_taken == 3, a0_action_taken < 3)
a0_took_mwo_action = np.logical_and(4 <= a0_action_taken, a0_action_taken <= 7)
a1_took_mwo_action = np.logical_and(4 <= a1_action_taken, a1_action_taken <= 7)
fig, axs = plt.subplots(2, 1, sharex=True, figsize=(3, 3))
fig.subplots_adjust(hspace=0.1)
# Plot 1
axs[0].set_ylim((-0.15, 1.15))
axs[0].set_ylabel("Talk Weight")
axs[0].plot(steps, df["a00_talk_probs"], color="r", linewidth=0.75)
axs[0].plot(steps, df["a10_talk_probs"], color="g", linewidth=0.75)
a0_tv_visible_inds = np.argwhere(a0_tv_visible)
if len(a0_tv_visible_inds) != 0:
axs[0].scatter(
a0_tv_visible_inds,
[-0.05] * len(a0_tv_visible_inds),
color="r",
s=4,
marker="|",
)
a1_tv_visible_inds = np.argwhere(a1_tv_visible)
if len(a1_tv_visible_inds) != 0:
axs[0].scatter(
a1_tv_visible_inds,
[-0.1] * len(a1_tv_visible_inds),
color="green",
s=4,
marker="|",
)
# Plot 2
axs[1].set_ylim((-0.15, 1.15))
axs[1].set_ylabel("Reply Weight")
axs[1].set_xlabel("Steps in Episode")
axs[1].plot(steps, df["a00_reply_probs"], color="r", linewidth=0.75)
axs[1].plot(steps, df["a10_reply_probs"], color="g", linewidth=0.75)
a0_pass_action_steps = np.argwhere(a0_took_pass_action)
if len(a0_pass_action_steps) != 0:
axs[1].scatter(
a0_pass_action_steps,
[1.1] * len(a0_pass_action_steps),
color="r",
s=4,
marker="|",
)
a1_pass_action_steps = np.argwhere(a1_took_pass_action)
if len(a1_pass_action_steps) != 0:
axs[1].scatter(
a1_pass_action_steps,
[1.05] * len(a1_pass_action_steps),
color="g",
s=4,
marker="|",
)
a0_mwo_action = np.argwhere(a0_took_mwo_action)
if len(a0_mwo_action) != 0:
axs[1].scatter(
a0_mwo_action, [-0.05] * len(a0_mwo_action), color="r", s=4, marker="|",
)
a1_mwo_action = np.argwhere(a1_took_mwo_action)
if len(a1_mwo_action) != 0:
axs[1].scatter(
a1_mwo_action, [-0.1] * len(a1_mwo_action), color="g", s=4, marker="|",
)
if save_path is not None:
fig.savefig(save_path, bbox_inches="tight")
plt.close(fig)
else:
plt.show()
if __name__ == "__main__":
create_communication_visualizations = True
dir = os.path.join(
ABS_PATH_TO_DATA_DIR, "furnmove_evaluations__test/vision_mixture_cl_rot"
)
id = dir.split("__")[-2]
print()
print(id)
recorded = defaultdict(lambda: [])
for i, p in enumerate(sorted(glob.glob(os.path.join(dir, "*.json")))):
if i % 100 == 0:
print(i)
with open(p, "r") as f:
result_dict = json.load(f)
recorded["a00_talk_probs"].extend(
[probs[0] for probs in result_dict["a0_talk_probs"]]
)
recorded["a10_talk_probs"].extend(
[probs[0] for probs in result_dict["a1_talk_probs"]]
)
recorded["a00_reply_probs"].extend(
[probs[0] for probs in result_dict["a0_reply_probs"]]
)
recorded["a10_reply_probs"].extend(
[probs[0] for probs in result_dict["a1_reply_probs"]]
)
ar0, ar1 = result_dict["agent_action_info"]
for j, srs in enumerate(result_dict["step_results"]):
sr0, sr1 = srs
before_loc0 = sr0["before_location"]
before_loc1 = sr1["before_location"]
recorded["from_a0_to_a1"].append(
round((before_loc0["rotation"] - before_loc1["rotation"]) / 90) % 4
)
recorded["from_a1_to_a0"].append((-recorded["from_a0_to_a1"][-1]) % 4)
recorded["a0_next_action"].append(sr0["action"])
recorded["a1_next_action"].append(sr1["action"])
recorded["a0_action_success"].append(1 * sr0["action_success"])
recorded["a1_action_success"].append(1 * sr1["action_success"])
if j == 0:
recorded["a0_last_action_success"].append(1)
recorded["a1_last_action_success"].append(1)
else:
old0, old1 = result_dict["step_results"][j - 1]
recorded["a0_last_action_success"].append(1 * old0["action_success"])
recorded["a1_last_action_success"].append(1 * old1["action_success"])
e0 = sr0["extra_before_info"]
e1 = sr1["extra_before_info"]
recorded["a0_tv_visible"].append(1 * e0["tv_visible"])
recorded["a1_tv_visible"].append(1 * e1["tv_visible"])
recorded["a0_dresser_visible"].append(1 * e0["dresser_visible"])
recorded["a1_dresser_visible"].append(1 * e1["dresser_visible"])
recorded["index"].extend([i] * len(result_dict["a0_talk_probs"]))
recorded = dict(recorded)
df = pd.DataFrame(recorded)
df.to_csv(
os.path.join(
ABS_PATH_TO_DATA_DIR,
"furnmove_communication_analysis/furnmove_talk_reply_dataset.tsv",
),
sep="\t",
)
if create_communication_visualizations:
print("Creating communication visualizations")
k = 0
while True:
print(k)
k += 1
subdf = df.query("index == {}".format(k))
if 60 < subdf.shape[0] < 80 and subdf.shape[0] != 250:
visualize_talk_reply_probs(
subdf,
save_path=os.path.join(
ABS_PATH_TO_ANALYSIS_RESULTS_DIR,
"plots/furnmove_communication/{}.pdf",
).format(k),
)
| cordial-sync-master | rl_multi_agent/scripts/generate_furnmove_communication_dataset.py |
"""
This script can be used to evaluate the model checkpoints saved in
* `PATH/TO/THIS/PROJECT/trained_models/final_furnlift_ckpts` or
* `PATH/TO/THIS/PROJECT/trained_models/final_furnmove_ckpts`
using the fixed evaluation datasets saved in
* `PATH/TO/THIS/PROJECT/data/furnlift_episode_start_positions_for_eval__test.json`,
* `PATH/TO/THIS/PROJECT/data/furnmove_episode_start_positions_for_eval__3agents__test.json`,
* `PATH/TO/THIS/PROJECT/data/furnmove_episode_start_positions_for_eval__test.json`, or
* `PATH/TO/THIS/PROJECT/data/furnmove_episode_start_positions_for_eval__train.json`.
The above checkpoints can either be trained and created by you or downloaded by following
the directions in the README.md. Likewise, you can download the above json files by following
the directions in the README.md or generate new starting evaluation datasets by running
`generate_furnlift_starting_locations_for_evaluation.py` and
`generate_furnmove_starting_locations_for_evaluation.py` appropriately. Note, do to randomness
in generating these evaluation datasets, we DO NOT GUARANTEE that running the above scripts
will result in the same `*.json` that we used in our experiments. If you wish to directly
compare to our results, please download the above json files. To run this script, run
```
python rl_multi_agent/scripts/generate_furnmove_starting_locations_for_evaluation.py furnmove
```
where you will want to change `furnmove` to `furnlift` if you wish to run the FurnLift evaluations.
"""
import glob
import os
import subprocess
import sys
import time
import torch
from constants import ABS_PATH_TO_LOCAL_THOR_BUILD, PROJECT_TOP_DIR
from utils.misc_util import NonBlockingStreamReader
def run(args):
thor_build = ABS_PATH_TO_LOCAL_THOR_BUILD
thor_build = os.path.abspath(thor_build)
split_command = (
["pipenv", "run", "python"]
+ args
+ ["--local_thor_build", thor_build, "--x_display", "0.0"]
)
print("Command:\n" + " ".join(split_command))
p = subprocess.Popen(
split_command,
encoding="utf-8",
env={**os.environ, "PYTHONPATH": os.getcwd()},
stdout=subprocess.PIPE,
)
nbsr = NonBlockingStreamReader(p.stdout)
last_time = time.time()
while p.poll() is None:
l = nbsr.readline(1)
if l:
print(l.rstrip(), flush=True)
if "All done." in l:
p.kill()
return
time.sleep(0.02)
if time.time() - last_time > 60:
last_time = time.time()
print("PID of process currently running: {}".format(p.pid), flush=True)
while True:
l = nbsr.readline(1)
if l:
print(l.rstrip(), flush=True)
else:
break
print(p.stdout.read())
eval_args_string = (
"main.py"
" --experiment_dir {eval_experiments_dir}"
" --task {task}"
" --num_steps 50"
" --val_timeout 0"
" --enable_val_agent f"
" --enable_logging f"
" --save_freq 100000"
" --seed 1"
" --gpu_ids {gpu_ids_string}"
" --amsgrad t"
" --use_episode_init_queue t"
" --save_extra_data t"
" --skip_backprop t"
" --workers {workers}"
" --verbose t"
)
if __name__ == "__main__":
if len(sys.argv) == 1:
eval_experiments_dir = "rl_multi_agent/furnmove_eval_experiments"
else:
eval_experiments_dir = "rl_multi_agent/{}_eval_experiments".format(
sys.argv[1].strip().lower()
)
os.chdir(PROJECT_TOP_DIR)
gpu_ids_string = (
"-1"
if not torch.cuda.is_available()
else " ".join([str(i) for i in range(1, 8)])
)
workers = 4 if not torch.cuda.is_available() else 40
results = []
for config_path in sorted(
glob.glob(os.path.join(eval_experiments_dir, "/*_config.py"))
):
task = os.path.basename(config_path).replace("_config.py", "")
try:
run(
eval_args_string.format(
eval_experiments_dir=eval_experiments_dir,
task=task,
gpu_ids_string=gpu_ids_string,
workers=workers,
).split(" ")
)
results.append("{} likely success".format(task))
except subprocess.CalledProcessError as cpe:
results.append("{} failure".format(task))
print("Summary:")
print("\n".join(results))
| cordial-sync-master | rl_multi_agent/scripts/run_furnmove_or_furnlift_evaluations.py |
import os
from typing import Optional
from constants import SPLIT_TO_USE_FOR_EVALUATION, ABS_PATH_TO_FINAL_FURNMOVE_CKPTS
from rl_multi_agent.experiments.furnmove_grid_marginal_cl_rot_config import (
FurnMoveGridExperimentConfig,
)
from rl_multi_agent.furnmove_utils import SaveFurnMoveMixin
class EvalConfig(SaveFurnMoveMixin, FurnMoveGridExperimentConfig):
@property
def saved_model_path(self) -> Optional[str]:
return os.path.join(
ABS_PATH_TO_FINAL_FURNMOVE_CKPTS,
"furnmove_grid_marginal_cl_rot_1000000_2019-11-12_07-58-51.dat",
)
def simple_name(self):
return "grid_marginal_cl_rot"
def get_experiment():
ec = EvalConfig()
ec.episode_init_queue_file_name = "furnmove_episode_start_positions_for_eval__{}.json".format(
SPLIT_TO_USE_FOR_EVALUATION
)
return ec
| cordial-sync-master | rl_multi_agent/furnmove_eval_experiments/furnmove_grid_marginal_cl_rot_config.py |
import os
from typing import Optional
from constants import SPLIT_TO_USE_FOR_EVALUATION, ABS_PATH_TO_FINAL_FURNMOVE_CKPTS
from rl_multi_agent.experiments.furnmove_vision_central_3agents_config import (
FurnMoveCentralVision3AgentsExperimentConfig,
)
from rl_multi_agent.furnmove_utils import SaveFurnMoveMixin
class EvalConfig(SaveFurnMoveMixin, FurnMoveCentralVision3AgentsExperimentConfig):
@property
def saved_model_path(cls) -> Optional[str]:
return os.path.join(
ABS_PATH_TO_FINAL_FURNMOVE_CKPTS,
"furnmove_vision_central_3agents_300000_2020-03-08_21-38-23.dat",
)
def simple_name(self):
return "vision_central_3agents"
def get_experiment():
ec = EvalConfig()
ec.episode_init_queue_file_name = "furnmove_episode_start_positions_for_eval__3agents__{}.json".format(
SPLIT_TO_USE_FOR_EVALUATION
)
return ec
| cordial-sync-master | rl_multi_agent/furnmove_eval_experiments/furnmove_vision_central_3agents_config.py |
import os
from typing import Optional
from constants import SPLIT_TO_USE_FOR_EVALUATION, ABS_PATH_TO_FINAL_FURNMOVE_CKPTS
from rl_multi_agent.experiments.furnmove_vision_mixture2mix_nocl_rot_config import (
FurnMoveExperimentConfig,
)
from rl_multi_agent.furnmove_utils import SaveFurnMoveMixin
class EvalConfig(SaveFurnMoveMixin, FurnMoveExperimentConfig):
@property
def saved_model_path(self) -> Optional[str]:
return os.path.join(
ABS_PATH_TO_FINAL_FURNMOVE_CKPTS,
"furnmove_vision_mixture2mix_nocl_rot_500000_2019-11-19_13-42-40.dat",
)
def simple_name(self):
return "vision_mixture2mix_nocl_rot"
def get_experiment():
ec = EvalConfig()
ec.episode_init_queue_file_name = "furnmove_episode_start_positions_for_eval__{}.json".format(
SPLIT_TO_USE_FOR_EVALUATION
)
return ec
| cordial-sync-master | rl_multi_agent/furnmove_eval_experiments/furnmove_vision_mixture2mix_nocl_rot_config.py |
import os
from typing import Optional
from constants import SPLIT_TO_USE_FOR_EVALUATION, ABS_PATH_TO_FINAL_FURNMOVE_CKPTS
from rl_multi_agent.experiments.furnmove_grid_marginalnocomm_nocl_norot_config import (
FurnMoveNoCommNoRotationsExperimentConfig,
)
from rl_multi_agent.furnmove_utils import SaveFurnMoveMixin
class EvalConfig(SaveFurnMoveMixin, FurnMoveNoCommNoRotationsExperimentConfig):
@property
def saved_model_path(self) -> Optional[str]:
return os.path.join(
ABS_PATH_TO_FINAL_FURNMOVE_CKPTS,
"furnmove_grid_marginalnocomm_nocl_norot_1000000_2020-02-22_05-54-13.dat",
)
def simple_name(self):
return "grid_marginalnocomm_nocl_norot"
def get_experiment():
ec = EvalConfig()
ec.episode_init_queue_file_name = "furnmove_episode_start_positions_for_eval__{}.json".format(
SPLIT_TO_USE_FOR_EVALUATION
)
return ec
| cordial-sync-master | rl_multi_agent/furnmove_eval_experiments/furnmove_grid_marginalnocomm_nocl_norot_config.py |
import os
from typing import Optional
from constants import SPLIT_TO_USE_FOR_EVALUATION, ABS_PATH_TO_FINAL_FURNMOVE_CKPTS
from rl_multi_agent.experiments.furnmove_vision_mixture_3agents_config import (
FurnMoveMixtureVision3AgentsExperimentConfig,
)
from rl_multi_agent.furnmove_utils import SaveFurnMoveMixin
class EvalConfig(SaveFurnMoveMixin, FurnMoveMixtureVision3AgentsExperimentConfig):
@property
def saved_model_path(cls) -> Optional[str]:
return os.path.join(
ABS_PATH_TO_FINAL_FURNMOVE_CKPTS,
"furnmove_vision_mixture_3agents_300000_2020-03-08_14-47-04.dat",
)
def simple_name(self):
return "vision_mixture_3agents"
def get_experiment():
ec = EvalConfig()
ec.episode_init_queue_file_name = "furnmove_episode_start_positions_for_eval__3agents__{}.json".format(
SPLIT_TO_USE_FOR_EVALUATION
)
return ec
| cordial-sync-master | rl_multi_agent/furnmove_eval_experiments/furnmove_vision_mixture_3agents_config.py |
import os
from typing import Optional
from constants import SPLIT_TO_USE_FOR_EVALUATION, ABS_PATH_TO_FINAL_FURNMOVE_CKPTS
from rl_multi_agent.experiments.furnmove_grid_marginalnocomm_nocl_rot_config import (
FurnMoveGridExperimentConfig,
)
from rl_multi_agent.furnmove_utils import SaveFurnMoveMixin
class EvalConfig(SaveFurnMoveMixin, FurnMoveGridExperimentConfig):
@property
def saved_model_path(self) -> Optional[str]:
return os.path.join(
ABS_PATH_TO_FINAL_FURNMOVE_CKPTS,
"furnmove_grid_marginalnocomm_nocl_rot_1000000_2020-02-22_00-40-34.dat",
)
def simple_name(self):
return "grid_marginalnocomm_nocl_rot"
def get_experiment():
ec = EvalConfig()
ec.episode_init_queue_file_name = "furnmove_episode_start_positions_for_eval__{}.json".format(
SPLIT_TO_USE_FOR_EVALUATION
)
return ec
| cordial-sync-master | rl_multi_agent/furnmove_eval_experiments/furnmove_grid_marginalnocomm_nocl_rot_config.py |
cordial-sync-master | rl_multi_agent/furnmove_eval_experiments/constants.py |
|
cordial-sync-master | rl_multi_agent/furnmove_eval_experiments/__init__.py |
|
import os
from typing import Optional
from constants import SPLIT_TO_USE_FOR_EVALUATION, ABS_PATH_TO_FINAL_FURNMOVE_CKPTS
from rl_multi_agent.experiments.furnmove_grid_bigcentralmixture_cl_rot_config import (
FurnMoveGridExperimentConfig,
)
from rl_multi_agent.furnmove_utils import SaveFurnMoveMixin
class EvalConfig(SaveFurnMoveMixin, FurnMoveGridExperimentConfig):
@property
def saved_model_path(self) -> Optional[str]:
return os.path.join(
ABS_PATH_TO_FINAL_FURNMOVE_CKPTS,
"furnmove_grid_bigcentralmixture_cl_rot_1000000_2019-11-20_07-08-15.dat",
)
def simple_name(self):
return "grid_bigcentralmixture_cl_rot"
def get_experiment():
ec = EvalConfig()
ec.episode_init_queue_file_name = "furnmove_episode_start_positions_for_eval__{}.json".format(
SPLIT_TO_USE_FOR_EVALUATION
)
return ec
| cordial-sync-master | rl_multi_agent/furnmove_eval_experiments/furnmove_grid_bigcentralmixture_cl_rot_config.py |
import os
from typing import Optional
from constants import SPLIT_TO_USE_FOR_EVALUATION, ABS_PATH_TO_FINAL_FURNMOVE_CKPTS
from rl_multi_agent.experiments.furnmove_grid_marginal_3agents_config import (
FurnMove3AgentUncoordinatedExperimentConfig,
)
from rl_multi_agent.furnmove_utils import SaveFurnMoveMixin
class EvalConfig(SaveFurnMoveMixin, FurnMove3AgentUncoordinatedExperimentConfig):
@property
def saved_model_path(self) -> Optional[str]:
return os.path.join(
ABS_PATH_TO_FINAL_FURNMOVE_CKPTS,
"furnmove_grid_marginal_3agents_1000000_2020-02-28_00-24-58.dat",
)
def simple_name(self):
return "grid_marginal_3agents"
def get_experiment():
ec = EvalConfig()
ec.episode_init_queue_file_name = "furnmove_episode_start_positions_for_eval__3agents__{}.json".format(
SPLIT_TO_USE_FOR_EVALUATION
)
return ec
| cordial-sync-master | rl_multi_agent/furnmove_eval_experiments/furnmove_grid_marginal_3agents_config.py |
import os
from typing import Optional
from constants import SPLIT_TO_USE_FOR_EVALUATION, ABS_PATH_TO_FINAL_FURNMOVE_CKPTS
from rl_multi_agent.experiments.furnmove_grid_mixture_cl_norot_config import (
FurnMoveMixtureNoRotationsExperimentConfig,
)
from rl_multi_agent.furnmove_utils import SaveFurnMoveMixin
class EvalConfig(SaveFurnMoveMixin, FurnMoveMixtureNoRotationsExperimentConfig):
@property
def saved_model_path(self) -> Optional[str]:
return os.path.join(
ABS_PATH_TO_FINAL_FURNMOVE_CKPTS,
"furnmove_grid_mixture_cl_norot_1000000_2019-11-12_16-24-15.dat",
)
def simple_name(self):
return "grid_mixture_cl_norot"
def get_experiment():
ec = EvalConfig()
ec.episode_init_queue_file_name = "furnmove_episode_start_positions_for_eval__{}.json".format(
SPLIT_TO_USE_FOR_EVALUATION
)
return ec
| cordial-sync-master | rl_multi_agent/furnmove_eval_experiments/furnmove_grid_mixture_cl_norot_config.py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.