Skip to content

Commit a6343bc

Browse files
[pre-commit.ci] auto fixes from pre-commit.com hooks
for more information, see https://pre-commit.ci
1 parent b7c0b1a commit a6343bc

File tree

14 files changed

+7
-17
lines changed

14 files changed

+7
-17
lines changed

neodroidagent/agents/numpy_agents/model_based/dyna_agent.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -363,7 +363,7 @@ def _simulate_behavior(self):
363363
pairs = env_model.state_action_pairs_leading_to_outcome(s_sim)
364364

365365
# add predecessors to queue if their priority exceeds thresh
366-
for (_s, _a) in pairs:
366+
for _s, _a in pairs:
367367
self._update_queue(_s, _a)
368368

369369
def _update(self, s, a):

neodroidagent/agents/numpy_agents/model_free/tabular_q_agent.py

Lines changed: 0 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -19,7 +19,6 @@ class TabularQAgent(NumpyAgent):
1919
# region Private
2020

2121
def __defaults__(self) -> None:
22-
2322
self._action_n = 6
2423

2524
self._init_mean = 0.0
@@ -195,7 +194,6 @@ def _optimise_wrt(self, error, *args, **kwargs) -> None:
195194
pass
196195

197196
def __build__(self, env, **kwargs) -> None:
198-
199197
if hasattr(self._last_connected_environment.action_space, "num_binary_actions"):
200198
self._action_n = (
201199
self._last_connected_environment.action_space.num_binary_actions

neodroidagent/common/memory/transitions/transitions.py

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -88,7 +88,8 @@ def non_terminal_numerical(self):
8888
@dataclass
8989
class ValuedTransitionPoint(TransitionPoint):
9090
"""
91-
__slots__=['state','action','successor_state','signal','terminal',"distribution","value_estimate"]"""
91+
__slots__=['state','action','successor_state','signal','terminal',"distribution","value_estimate"]
92+
"""
9293

9394
__slots__ = TransitionPoint.__slots__ + ["distribution", "value_estimate"]
9495
state: Any

neodroidagent/common/session_factory/vertical/procedures/rollout_inference.py

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -42,7 +42,6 @@ def __call__(
4242
:return:"""
4343
with torch.no_grad():
4444
with TensorBoardPytorchWriter(log_directory) as metric_writer:
45-
4645
B = tqdm(count(), f"step {0}, {iterations}", leave=False)
4746

4847
for _ in B:

neodroidagent/common/session_factory/vertical/procedures/training/experimental/curriculum/curriculum_generation.py

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -78,7 +78,6 @@ def main():
7878
_environment.configure(state=init_state)
7979
episode_reward = 0
8080
for k in count(1):
81-
8281
# actions = _environment.action_space.sample()
8382
observations, signal, terminated, info = _environment.sample()
8483

neodroidagent/common/session_factory/vertical/procedures/training/off_policy_batched.py

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -73,7 +73,6 @@ def __call__(
7373
disable=disable_stdout,
7474
desc="Step #",
7575
):
76-
7776
sample = self.agent.sample(state)
7877
action = self.agent.extract_action(sample)
7978
snapshot = self.environment.react(action)

neodroidagent/common/session_factory/vertical/procedures/training/off_policy_step_wise.py

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -55,7 +55,6 @@ def __call__(
5555

5656
it = tqdm(range(num_environment_steps), desc="Step #", leave=False)
5757
for step_i in it:
58-
5958
sample = self.agent.sample(state)
6059
action = self.agent.extract_action(sample)
6160

neodroidagent/common/session_factory/vertical/procedures/training/sampling/rollout.py

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -26,7 +26,6 @@ def infer(self, env, render=True):
2626
state = env.reset()
2727

2828
for frame_i in count(1):
29-
3029
action, *_ = self.sample(state)
3130
state, signal, terminated, info = env.act(action)
3231
if render:

neodroidagent/common/session_factory/vertical/single_agent_environment_session.py

Lines changed: 0 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -73,7 +73,6 @@ def __call__(
7373
# with ContextWrapper(torchsnooper.snoop, debug):
7474
if True:
7575
with ContextWrapper(torch.autograd.detect_anomaly, debug):
76-
7776
if agent is None:
7877
raise NoAgent
7978

@@ -135,7 +134,6 @@ def __call__(
135134
drawer_type,
136135
not train_agent and drawing_mode != DrawingModeEnum.none,
137136
) as drawer_instance:
138-
139137
agent.build(
140138
self._environment.observation_space,
141139
self._environment.action_space,

neodroidagent/entry_points/session_factory.py

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -37,7 +37,8 @@ def session_factory(
3737
):
3838
r"""
3939
Entry point start a starting a training session with the functionality of parsing cmdline arguments and
40-
confirming configuration to use before training and overwriting of default training configurations"""
40+
confirming configuration to use before training and overwriting of default training configurations
41+
"""
4142

4243
if config is None:
4344
config = {}

0 commit comments

Comments
 (0)