diff --git a/.github/ISSUE_TEMPLATE/bug_report.md b/.github/ISSUE_TEMPLATE/bug_report.md index e7828f0a7..cb2a22444 100644 --- a/.github/ISSUE_TEMPLATE/bug_report.md +++ b/.github/ISSUE_TEMPLATE/bug_report.md @@ -24,7 +24,7 @@ If applicable, add screenshots to help explain your problem. **Desktop (please complete the following information):** - OS: [e.g. mac/linux/windows] - - Version [e.g. 1.2.2] + - Version [e.g. 1.2.3] **Additional context** Add any other context about the problem here. diff --git a/CMakeLists.txt b/CMakeLists.txt index 0b33a2b1d..ffd2d63bc 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -1,5 +1,5 @@ cmake_minimum_required(VERSION 3.10.0) -project(Griddly VERSION 1.2.2) +project(Griddly VERSION 1.2.3) set(BINARY ${CMAKE_PROJECT_NAME}) diff --git a/bindings/python.cpp b/bindings/python.cpp index 35bdb9a1d..dbbbf14bf 100644 --- a/bindings/python.cpp +++ b/bindings/python.cpp @@ -12,7 +12,7 @@ namespace griddly { PYBIND11_MODULE(python_griddly, m) { m.doc() = "Griddly python bindings"; - m.attr("version") = "1.2.2"; + m.attr("version") = "1.2.3"; #ifndef NDEBUG spdlog::set_level(spdlog::level::debug); diff --git a/docs/conf.py b/docs/conf.py index 3aea132a3..64177ea68 100644 --- a/docs/conf.py +++ b/docs/conf.py @@ -22,7 +22,7 @@ author = 'Chris Bamford' # The full version, including alpha/beta/rc tags -release = '1.2.2' +release = '1.2.3' # -- General configuration --------------------------------------------------- diff --git a/python/examples/rllib/rllib_multi_agent.py b/python/examples/rllib/rllib_multi_agent.py index 031da44ce..8e0b85d80 100644 --- a/python/examples/rllib/rllib_multi_agent.py +++ b/python/examples/rllib/rllib_multi_agent.py @@ -2,7 +2,9 @@ import sys import ray -from griddly.util.rllib.callbacks import VideoCallback + +from examples.rllib.vector_agent import VectorAgent +from griddly.util.rllib.callbacks import VideoCallbacks from ray import tune from ray.rllib.agents.impala import ImpalaTrainer from ray.rllib.models import ModelCatalog @@ -27,7 +29,7 @@ def _create_env(env_config): register_env(env_name, _create_env) - ModelCatalog.register_custom_model('SimpleConv', SimpleConvAgent) + ModelCatalog.register_custom_model('SimpleConv', VectorAgent) max_training_steps = 50000000 @@ -36,7 +38,7 @@ def _create_env(env_config): 'num_workers': 3, 'num_envs_per_worker': 1, - 'callbacks': VideoCallback, + 'callbacks': VideoCallbacks, 'model': { 'custom_model': 'SimpleConv', @@ -48,15 +50,16 @@ def _create_env(env_config): # know if that player is no longer active # 'player_done_variable': 'player_done', - 'record_video_config': { - 'frequency': 20000, # number of rollouts - 'directory': 'videos' - }, + # 'record_video_config': { + # 'frequency': 20000, # number of rollouts + # 'directory': 'videos' + # }, - 'random_level_on_reset': True, - 'yaml_file': 'Multi-Agent/foragers.yaml', - 'global_observer_type': gd.ObserverType.SPRITE_2D, - 'max_steps': 500, + 'random_level_on_reset': False, + 'yaml_file': 'Multi-Agent/robot_tag_12.yaml', + 'global_observer_type': gd.ObserverType.VECTOR, + 'player_observer_type': gd.ObserverType.VECTOR, + #'max_steps': 500, }, 'entropy_coeff_schedule': [ [0, 0.01], diff --git a/python/examples/rllib/rllib_rts.py b/python/examples/rllib/rllib_rts.py index fe96e0a3b..a419a5701 100644 --- a/python/examples/rllib/rllib_rts.py +++ b/python/examples/rllib/rllib_rts.py @@ -3,11 +3,12 @@ import ray from ray import tune +from ray.rllib.agents import MultiCallbacks from ray.rllib.models import ModelCatalog from ray.tune.registry import register_env from griddly import gd -from griddly.util.rllib.callbacks import ActionTrackerCallback, MultiCallback, VideoCallback +from griddly.util.rllib.callbacks import ActionTrackerCallbacks, VideoCallbacks from griddly.util.rllib.environment.core import RLlibMultiAgentWrapper, RLlibEnv from griddly.util.rllib.torch.agents.impala_cnn import ImpalaCNNAgent from griddly.util.rllib.torch.conditional_actions.conditional_action_policy_trainer import \ @@ -38,9 +39,9 @@ def _create_env(env_config): 'num_workers': 3, 'num_envs_per_worker': 2, - 'callbacks': MultiCallback([ - ActionTrackerCallback, - VideoCallback + 'callbacks': MultiCallbacks([ + ActionTrackerCallbacks, + VideoCallbacks ]), 'model': { diff --git a/python/examples/rllib/rllib_single_agent.py b/python/examples/rllib/rllib_single_agent.py index aa80a90df..10121d2f9 100644 --- a/python/examples/rllib/rllib_single_agent.py +++ b/python/examples/rllib/rllib_single_agent.py @@ -2,7 +2,7 @@ import sys import ray -from griddly.util.rllib.callbacks import VideoCallback +from griddly.util.rllib.callbacks import VideoCallbacks from ray import tune from ray.rllib.agents.impala import ImpalaTrainer from ray.rllib.models import ModelCatalog @@ -30,7 +30,7 @@ 'num_workers': 8, 'num_envs_per_worker': 4, - 'callbacks': VideoCallback, + 'callbacks': VideoCallbacks, 'model': { 'custom_model': 'GAP', diff --git a/python/griddly/GymWrapper.py b/python/griddly/GymWrapper.py index 57d7a6800..0f37d6496 100644 --- a/python/griddly/GymWrapper.py +++ b/python/griddly/GymWrapper.py @@ -113,29 +113,33 @@ def step(self, action): elif len(action) == self.player_count: - if np.ndim(action) == 1 or np.ndim(action) == 3: - if isinstance(action[0], list) or isinstance(action[0], np.ndarray) or isinstance(action[0], tuple): - # Multiple agents that can perform multiple actions in parallel - # Used in RTS games - reward = [] - for p in range(self.player_count): - player_action = np.array(action[p], dtype=np.int) - final = p == self.player_count - 1 - rew, done, info = self._players[p].step_multi(player_action, final) - reward.append(rew) + processed_actions = [] + multi_action = False + for a in action: + if a is None: + processed_action = np.zeros((len(self.action_space_parts)), dtype=np.int32) else: - action = np.array(action, dtype=np.int32) - action_data = action.reshape(-1, 1) - reward, done, info = self.game.step_parallel(action_data) + processed_action = np.array(a, dtype=np.int32) + if len(processed_action.shape) > 1 and processed_action.shape[0] > 1: + multi_action = True + processed_actions.append(processed_action) + + if not self.has_avatar and multi_action: + # Multiple agents that can perform multiple actions in parallel + # Used in RTS games + reward = [] + for p in range(self.player_count): + player_action = processed_actions[p].reshape(-1, len(self.action_space_parts)) + final = p == self.player_count - 1 + rew, done, info = self._players[p].step_multi(player_action, final) + reward.append(rew) # Multiple agents executing actions in parallel # Used in multi-agent environments - elif np.ndim(action) == 2: - action_data = np.array(action, dtype=np.int32) - reward, done, info = self.game.step_parallel(action_data) else: - raise ValueError(f'The supplied action is in the wrong format for this environment.\n\n' - f'A valid example: {self.action_space.sample()}') + action_data = np.array(processed_actions, dtype=np.int32) + action_data = action_data.reshape(self.player_count, -1) + reward, done, info = self.game.step_parallel(action_data) else: raise ValueError(f'The supplied action is in the wrong format for this environment.\n\n' @@ -276,13 +280,13 @@ def _create_action_space(self): self.action_count = len(self.action_names) self.default_action_name = self.action_names[0] - action_space_parts = [] + self.action_space_parts = [] if not self.has_avatar: - action_space_parts.extend([self.grid_width, self.grid_height]) + self.action_space_parts.extend([self.grid_width, self.grid_height]) if self.action_count > 1: - action_space_parts.append(self.action_count) + self.action_space_parts.append(self.action_count) self.max_action_ids = 0 for action_name, mapping in sorted(self.action_input_mappings.items()): @@ -292,12 +296,12 @@ def _create_action_space(self): if self.max_action_ids < num_action_ids: self.max_action_ids = num_action_ids - action_space_parts.append(self.max_action_ids) + self.action_space_parts.append(self.max_action_ids) - if len(action_space_parts) == 1: + if len(self.action_space_parts) == 1: action_space = gym.spaces.Discrete(self.max_action_ids) else: - action_space = gym.spaces.MultiDiscrete(action_space_parts) + action_space = gym.spaces.MultiDiscrete(self.action_space_parts) if self.player_count > 1: action_space = MultiAgentActionSpace([action_space for _ in range(self.player_count)]) diff --git a/python/setup.py b/python/setup.py index 5687b71cf..1d5a6060e 100644 --- a/python/setup.py +++ b/python/setup.py @@ -71,7 +71,7 @@ def griddly_package_data(config='Debug'): setup( name='griddly', - version="1.2.2", + version="1.2.3", author_email="chrisbam4d@gmail.com", description="Griddly Python Libraries", long_description=long_description, diff --git a/python/tests/step_test.py b/python/tests/step_test.py index a170c8358..486ffa719 100644 --- a/python/tests/step_test.py +++ b/python/tests/step_test.py @@ -708,3 +708,378 @@ def test_step_MultiplePlayer_SelectSource_MultipleActionType_MultipleAction(test assert len(sample) == 2 assert sample[0].shape == (4,) assert sample[1].shape == (4,) + +def test_step_MultiplePlayer_SingleActionType_SingleValue_Agent_DONE(test_name): + """ + There is an avatar + Multiple players + + env.step([ + actionId_player1, + None + ]) + """ + env = build_test_env( + test_name, + "tests/gdy/test_step_MultiPlayer_SingleActionType.yaml" + ) + + assert len(env.observation_space) == 2 + assert len(env.action_space) == 2 + + assert env.global_observation_space.shape == (1, 5, 6) + assert env.game.get_object_names() == ['avatar'] + + for p in range(env.player_count): + assert env.observation_space[p].shape == (1, 5, 6) + assert env.action_space[p].shape == () + assert env.action_space[p].n == 5 + + obs, reward, done, info = env.step([ + 1, + None, + ]) + + assert obs[0].shape == (1, 5, 6) + assert reward[0] == 0 + assert obs[1].shape == (1, 5, 6) + assert reward[1] == 0 + + player1_avatar_state = get_object_state(env, 'avatar', player=1) + player2_avatar_state = get_object_state(env, 'avatar', player=2) + + assert player1_avatar_state['Location'] == [0, 3] + assert player2_avatar_state['Location'] == [3, 3] + + sample = env.action_space.sample() + assert len(sample) == 2 + + +def test_step_MultiplePlayer_SingleActionType_ArrayValue_Agent_DONE(test_name): + """ + There no avatar, multiple players + + env.step([ + [actionId1], + None + ]) + """ + + env = build_test_env( + test_name, + "tests/gdy/test_step_MultiPlayer_SingleActionType.yaml" + ) + + assert len(env.observation_space) == 2 + assert len(env.action_space) == 2 + + assert env.global_observation_space.shape == (1, 5, 6) + assert env.game.get_object_names() == ['avatar'] + + for p in range(env.player_count): + assert env.observation_space[p].shape == (1, 5, 6) + assert env.action_space[p].shape == () + assert env.action_space[p].n == 5 + + obs, reward, done, info = env.step([ + [1], + None, + ]) + + assert obs[0].shape == (1, 5, 6) + assert reward[0] == 0 + assert obs[1].shape == (1, 5, 6) + assert reward[1] == 0 + assert not done + assert info == {} + + player1_avatar_state = get_object_state(env, 'avatar', player=1) + player2_avatar_state = get_object_state(env, 'avatar', player=2) + + assert player1_avatar_state['Location'] == [0, 3] + assert player2_avatar_state['Location'] == [3, 3] + + +def test_step_MultiplePlayer_MultipleActionType_Agent_DONE(test_name): + """ + There is an avatar + Multiple players + + env.step([ + [action_type, actionId_player1], + None + ]) + """ + + env = build_test_env( + test_name, + "tests/gdy/test_step_MultiPlayer_MultipleActionType.yaml" + ) + + assert len(env.observation_space) == 2 + assert len(env.action_space) == 2 + + assert env.global_observation_space.shape == (1, 5, 6) + assert env.game.get_object_names() == ['avatar'] + + for p in range(env.player_count): + assert env.observation_space[p].shape == (1, 5, 6) + assert env.action_space[p].shape == (2,) + assert np.all(env.action_space[p].nvec == [2, 5]) + + obs, reward, done, info = env.step([ + [0, 1], + None, + ]) + + assert obs[0].shape == (1, 5, 6) + assert reward[0] == 0 + assert obs[1].shape == (1, 5, 6) + assert reward[1] == 0 + assert not done + assert info == {} + + player1_avatar_state = get_object_state(env, 'avatar', player=1) + player2_avatar_state = get_object_state(env, 'avatar', player=2) + + assert player1_avatar_state['Location'] == [0, 3] + assert player2_avatar_state['Location'] == [3, 3] + +def test_step_MultiplePlayer_SelectSource_SingleActionType_Agent_DONE(test_name): + """ + There no avatar, multiple players, single action type + + env.step([ + [x1, y1, actionId1], + None + ]) + """ + env = build_test_env( + test_name, + "tests/gdy/test_step_MultiPlayer_SelectSource_SingleActionType.yaml" + ) + + assert len(env.observation_space) == 2 + assert len(env.action_space) == 2 + + assert env.global_observation_space.shape == (1, 5, 6) + assert env.game.get_object_names() == ['avatar'] + + for p in range(env.player_count): + assert env.observation_space[p].shape == (1, 5, 6) + assert env.action_space[p].shape == (3,) + assert np.all(env.action_space[p].nvec == [5, 6, 5]) + + obs, reward, done, info = env.step([ + [1, 3, 1], + None, + ]) + + assert obs[0].shape == (1, 5, 6) + assert reward[0] == 0 + assert obs[1].shape == (1, 5, 6) + assert reward[1] == 0 + assert not done + assert info == {} + + player1_avatar_state = get_object_state(env, 'avatar', player=1) + player2_avatar_state = get_object_state(env, 'avatar', player=2) + + assert player1_avatar_state['Location'] == [0, 3] + assert player2_avatar_state['Location'] == [3, 3] + +def test_step_MultiplePlayer_SelectSource_MultipleActionType_Agent_DONE(test_name): + """ + There no avatar, multiple players + + env.step([ + [x1, y1, action_type, actionId1], + None + ]) + """ + env = build_test_env( + test_name, + "tests/gdy/test_step_MultiPlayer_SelectSource_MultipleActionType.yaml" + ) + + assert len(env.observation_space) == 2 + assert len(env.action_space) == 2 + + assert env.global_observation_space.shape == (1, 5, 6) + assert env.game.get_object_names() == ['avatar'] + + for p in range(env.player_count): + assert env.observation_space[p].shape == (1, 5, 6) + assert env.action_space[p].shape == (4,) + assert np.all(env.action_space[p].nvec == [5, 6, 2, 5]) + + obs, reward, done, info = env.step([ + [1, 3, 0, 1], + None, + ]) + + assert obs[0].shape == (1, 5, 6) + assert reward[0] == 0 + assert obs[1].shape == (1, 5, 6) + assert reward[1] == 0 + assert not done + assert info == {} + + player1_avatar_state = get_object_state(env, 'avatar', player=1) + player2_avatar_state = get_object_state(env, 'avatar', player=2) + + assert player1_avatar_state['Location'] == [0, 3] + assert player2_avatar_state['Location'] == [3, 3] + + sample = env.action_space.sample() + assert len(sample) == 2 + assert sample[0].shape == (4,) + assert sample[1].shape == (4,) + + +def test_step_MultiplePlayer_SelectSource_SingleActionType_MultipleAction_Agent_DONE(test_name): + """ + There no avatar, multiple players + + env.step([ + [ # player 1 multiple actions + [x1, y1, actionId1], + [x2, y2, actionId2] + ], + [ # player 2 is dead + None, + ], + ]) + """ + env = build_test_env( + test_name, + "tests/gdy/test_step_MultiPlayer_SelectSource_SingleActionType_MultipleAction.yaml" + ) + + assert len(env.observation_space) == 2 + assert len(env.action_space) == 2 + + assert env.global_observation_space.shape == (2, 5, 6) + + for p in range(env.player_count): + assert env.observation_space[p].shape == (2, 5, 6) + assert env.action_space[p].shape == (3,) + assert np.all(env.action_space[p].nvec == [5, 6, 5]) + + obs, reward, done, info = env.step([ + [ + [1, 3, 1], + [3, 4, 3], + ], + None, + ]) + + assert obs[0].shape == (2, 5, 6) + assert reward[0] == 0 + assert obs[1].shape == (2, 5, 6) + assert reward[1] == 0 + assert not done + assert info == {} + + player1_avatar1_state = get_object_state(env, 'avatar1', player=1) + player1_avatar2_state = get_object_state(env, 'avatar2', player=1) + + assert player1_avatar1_state['Location'] == [0, 3] + assert player1_avatar2_state['Location'] == [4, 4] + + object_names = env.game.get_object_names() + avartar1_id = object_names.index('avatar1') + avartar2_id = object_names.index('avatar2') + + assert obs[0][avartar1_id, 0, 3] == 1 + assert obs[0][avartar2_id, 4, 4] == 1 + + player2_avatar1_state = get_object_state(env, 'avatar1', player=2) + player2_avatar2_state = get_object_state(env, 'avatar2', player=2) + + assert player2_avatar1_state['Location'] == [3, 3] + assert player2_avatar2_state['Location'] == [1, 4] + + assert obs[0][avartar1_id, 3, 3] == 1 + assert obs[0][avartar2_id, 1, 4] == 1 + + sample = env.action_space.sample() + assert len(sample) == 2 + assert sample[0].shape == (3,) + assert sample[1].shape == (3,) + + +def test_step_MultiplePlayer_SelectSource_MultipleActionType_MultipleAction_Agent_DONE(test_name): + """ + There no avatar, multiple players + + env.step([ + [ # player 1 multiple actions + [x1, y1, action_type, actionId1], + [x2, y2, action_type, actionId2] + ], + # player 2 is dead + None, + + ]) + """ + env = build_test_env( + test_name, + "tests/gdy/test_step_MultiPlayer_SelectSource_MultipleActionType_MultipleAction.yaml" + ) + + assert len(env.observation_space) == 2 + assert len(env.action_space) == 2 + + assert env.global_observation_space.shape == (2, 5, 6) + + for p in range(env.player_count): + assert env.observation_space[p].shape == (2, 5, 6) + assert env.action_space[p].shape == (4,) + assert np.all(env.action_space[p].nvec == [5, 6, 2, 5]) + + obs, reward, done, info = env.step([ + [ + [1, 3, 0, 1], + [3, 4, 1, 3], + ], + None, + + ]) + + assert obs[0].shape == (2, 5, 6) + assert reward[0] == 1 + assert obs[1].shape == (2, 5, 6) + assert reward[1] == 0 + assert not done + assert info == {} + + player1_avatar1_state = get_object_state(env, 'avatar1', player=1) + player1_avatar2_state = get_object_state(env, 'avatar2', player=1) + + assert player1_avatar1_state['Location'] == [0, 3] + assert player1_avatar2_state['Location'] == [4, 4] + + object_names = env.game.get_object_names() + avartar1_id = object_names.index('avatar1') + avartar2_id = object_names.index('avatar2') + + assert obs[0][avartar1_id, 0, 3] == 1 + assert obs[0][avartar2_id, 4, 4] == 1 + + player2_avatar1_state = get_object_state(env, 'avatar1', player=2) + player2_avatar2_state = get_object_state(env, 'avatar2', player=2) + + assert player2_avatar1_state['Location'] == [3, 3] + assert player2_avatar2_state['Location'] == [1, 4] + + avartar1_id = object_names.index('avatar1') + avartar2_id = object_names.index('avatar2') + + assert obs[1][avartar1_id, 3, 3] == 1 + assert obs[1][avartar2_id, 1, 4] == 1 + + sample = env.action_space.sample() + assert len(sample) == 2 + assert sample[0].shape == (4,) + assert sample[1].shape == (4,) diff --git a/resources/games/Single-Player/GVGAI/zelda.yaml b/resources/games/Single-Player/GVGAI/zelda.yaml index 24fe8b596..a9b0888d6 100644 --- a/resources/games/Single-Player/GVGAI/zelda.yaml +++ b/resources/games/Single-Player/GVGAI/zelda.yaml @@ -179,9 +179,16 @@ Actions: Commands: - spawn: attack_fire Dst: - Object: [spider, _empty] + Object: spider Commands: - remove: true + - Src: + Object: avatar + Commands: + - spawn: attack_fire + Dst: + Object: _empty + Objects: - Name: avatar Z: 3 diff --git a/resources/games/Single-Player/GVGAI/zelda_partially_observable.yaml b/resources/games/Single-Player/GVGAI/zelda_partially_observable.yaml index f12ec336f..34330f10b 100644 --- a/resources/games/Single-Player/GVGAI/zelda_partially_observable.yaml +++ b/resources/games/Single-Player/GVGAI/zelda_partially_observable.yaml @@ -193,9 +193,15 @@ Actions: Commands: - spawn: attack_fire Dst: - Object: [spider, _empty] + Object: spider Commands: - remove: true + - Src: + Object: avatar + Commands: + - spawn: attack_fire + Dst: + Object: _empty Objects: - Name: avatar Z: 3 diff --git a/resources/games/Single-Player/GVGAI/zelda_sequential.yaml b/resources/games/Single-Player/GVGAI/zelda_sequential.yaml index ed6df165b..19cbfb550 100644 --- a/resources/games/Single-Player/GVGAI/zelda_sequential.yaml +++ b/resources/games/Single-Player/GVGAI/zelda_sequential.yaml @@ -179,9 +179,15 @@ Actions: Commands: - spawn: attack_fire Dst: - Object: [spider, _empty] + Object: spider Commands: - remove: true + - Src: + Object: avatar + Commands: + - spawn: attack_fire + Dst: + Object: _empty Objects: - Name: avatar Z: 3 diff --git a/src/Griddly/Core/GDY/Objects/Object.cpp b/src/Griddly/Core/GDY/Objects/Object.cpp index 2eea27cde..9053ed051 100644 --- a/src/Griddly/Core/GDY/Objects/Object.cpp +++ b/src/Griddly/Core/GDY/Objects/Object.cpp @@ -478,8 +478,8 @@ bool Object::isValidAction(std::shared_ptr action) const { auto actionName = action->getActionName(); auto destinationObject = action->getDestinationObject(); - std::string destinationObjectName; - if (destinationObject == nullptr) { + std::string destinationObjectName = destinationObject->getObjectName(); + if (destinationObjectName == "_empty") { auto width = grid_->getWidth(); auto height = grid_->getHeight(); @@ -489,10 +489,6 @@ bool Object::isValidAction(std::shared_ptr action) const { destinationLocation.y >= height || destinationLocation.y < 0) { return false; } - - destinationObjectName = "_empty"; - } else { - destinationObjectName = destinationObject->getObjectName(); } spdlog::debug("Checking preconditions for action [{0}] -> {1} -> {2}", getObjectName(), actionName, destinationObjectName); diff --git a/tests/src/Griddly/Core/GDY/Objects/ObjectTest.cpp b/tests/src/Griddly/Core/GDY/Objects/ObjectTest.cpp index ed5bf93fc..954869a66 100644 --- a/tests/src/Griddly/Core/GDY/Objects/ObjectTest.cpp +++ b/tests/src/Griddly/Core/GDY/Objects/ObjectTest.cpp @@ -1382,4 +1382,39 @@ TEST(ObjectTest, isValidActionNotDefinedForDestination) { verifyMocks(mockActionPtr); } +TEST(ObjectTest, isValidActionDestinationLocationOutsideGrid) { + auto srcObjectName = "srcObject"; + auto dstObjectName = "_empty"; + auto actionName = "action"; + + auto mockGridPtr = mockGrid(); + + EXPECT_CALL(*mockGridPtr, getWidth).WillRepeatedly(Return(10)); + EXPECT_CALL(*mockGridPtr, getHeight).WillRepeatedly(Return(10)); + + auto srcObject = std::shared_ptr(new Object(srcObjectName, 'S', 0, 0, {{"counter", _V(5)}}, nullptr)); + + auto dstObjectOutside = std::shared_ptr(new Object(dstObjectName, 'S', 0, 0, {}, nullptr)); + auto dstObjectInside = std::shared_ptr(new Object(dstObjectName, 'D', 0, 0, {}, nullptr)); + + srcObject->init({5, 4}, DiscreteOrientation(), mockGridPtr); + + dstObjectOutside->init({-1, -1}, DiscreteOrientation(), mockGridPtr); + dstObjectInside->init({5, 5}, DiscreteOrientation(), mockGridPtr); + + auto mockActionPtrInvalid = setupAction(actionName, srcObject, dstObjectOutside); + auto mockActionPtrValid = setupAction(actionName, srcObject, dstObjectInside); + + srcObject->addActionSrcBehaviour(actionName, dstObjectName, "nop", {}, {}); + + auto preconditionResultInvalid = srcObject->isValidAction(mockActionPtrInvalid); + auto preconditionResultValid = srcObject->isValidAction(mockActionPtrValid); + + ASSERT_FALSE(preconditionResultInvalid); + ASSERT_TRUE(preconditionResultValid); + + verifyMocks(mockActionPtrInvalid); + verifyMocks(mockActionPtrValid); +} + } // namespace griddly \ No newline at end of file