Skip to content

Commit

Permalink
cleaning imports
Browse files Browse the repository at this point in the history
  • Loading branch information
Ro0t-set committed Jan 26, 2024
1 parent aa2e330 commit d6296ee
Show file tree
Hide file tree
Showing 10 changed files with 1,828 additions and 1,426 deletions.
3,216 changes: 1,808 additions & 1,408 deletions log.csv

Large diffs are not rendered by default.

Binary file modified src/base_model.zip
Binary file not shown.
Binary file removed src/base_model_2.zip
Binary file not shown.
1 change: 1 addition & 0 deletions src/train.py
Original file line number Diff line number Diff line change
Expand Up @@ -32,6 +32,7 @@ def train(
eval_env.set_map_path(path)
eval_env.seed(1773449316)
eval_env.set_optimize_speed(optimize_speed)


timesteps_list = np.logspace(np.log10(min_timesteps), np.log10(max_timesteps), num=num_of_steps, endpoint=True, base=10.0, dtype=int, axis=0)
learning_rate_list = np.logspace(np.log10(max_learning_rate), np.log10(min_learning_rate), num=num_of_steps, endpoint=True, base=10.0, dtype=None, axis=0)
Expand Down
33 changes: 16 additions & 17 deletions src/wrapper/wrapper.py
Original file line number Diff line number Diff line change
@@ -1,13 +1,9 @@
import gym
import numpy as np
#from gym import spaces
from gym import spaces
from pathlib import Path
import yaml
from argparse import Namespace
from pyglet.gl import GL_POINTS
import utility.map_utility as map_utility
from typing import Any, Dict, List, Optional, SupportsFloat, Tuple, Union
from typing import List
import csv

def logger(map, event, reword, lap_time):
Expand All @@ -18,6 +14,7 @@ def logger(map, event, reword, lap_time):
with open('log.csv', 'a', newline='') as file:
writer = csv.writer(file)
writer.writerow([map, event, reword, lap_time])


def convert_range(value, input_range, output_range):
# converts value(s) from range to another range
Expand Down Expand Up @@ -59,10 +56,12 @@ def __init__(self, env, random_map=False):
self.start_radius = (self.track_width / 2) - ((self.car_length + self.car_width) / 2) # just extra wiggle room

self.step_count = 0
self.count = 0
self.step_for_episode = 0

# set threshold for maximum angle of car, to prevent spinning
self.max_theta = 100
self.count = 0


self.map_path = None
self.random_map = random_map
Expand All @@ -73,13 +72,9 @@ def __init__(self, env, random_map=False):
self.race_line_theta = []

self.episode_returns = []

self.is_rendering = False

self.last_position = {'x': None, 'y': None}

self.number_of_base_reward_give = 10

self.one_lap_done = False

def get_total_steps(self) -> int:
Expand Down Expand Up @@ -134,6 +129,7 @@ def episode_end(reason = None, rew = 0):
done = True
self.count = 0
self.episode_returns = []
self.step_for_episode = 0
self.one_lap_done = False
return done, rew

Expand Down Expand Up @@ -189,21 +185,22 @@ def episode_end(reason = None, rew = 0):
else:
steps_goal = self.count
if not self.one_lap_done:
steps_done = len(self.episode_returns)
steps_done = self.step_for_episode
elif self.one_lap_done:
steps_done = len(self.episode_returns) / 2
steps_done = self.step_for_episode / 2

k = (steps_done - steps_goal)/steps_goal

reward += (1-k) * 100

print("----------------- Lap Done ----------------->", self.map_path, len(self.episode_returns) * 0.01, reward)
print("----------------- Lap Done ----------------->", self.map_path, self.step_for_episode * 0.01, reward)

self.count = 0

if self.one_lap_done:
logger(self.map_path, "lap_done", sum(self.episode_returns), len(self.episode_returns) * 0.01)
logger(self.map_path, "lap_done", sum(self.episode_returns), self.step_for_episode * 0.01)
self.episode_returns = []
self.step_for_episode = 0
self.one_lap_done = False
else:
self.one_lap_done = True
Expand All @@ -214,16 +211,17 @@ def episode_end(reason = None, rew = 0):


if observation['collisions'][0]:
logger(self.map_path, "collisions", sum(self.episode_returns), len(self.episode_returns) * 0.01)
logger(self.map_path, "collisions", sum(self.episode_returns), self.step_for_episode * 0.01)
done, reward = episode_end(rew = -30)



if len(self.episode_returns) > 50_000:
logger(self.map_path, "too_slow", sum(self.episode_returns), len(self.episode_returns) * 0.01)
if self.step_for_episode > 50_000:
logger(self.map_path, "too_slow", sum(self.episode_returns), self.step_for_episode * 0.01)
done, reward = episode_end("Too long", -10)

self.episode_returns.append(reward)
self.step_for_episode += 1


return self.normalise_observations(observation['scans'][0]), reward, bool(done), info
Expand Down Expand Up @@ -262,6 +260,7 @@ def reset(self):
# x, y, t = self.start_position()

self.episode_returns = []
self.step_for_episode = 0

self.last_position = {'x': x, 'y': y}

Expand Down
Binary file modified train_test/best_global_model.zip
Binary file not shown.
Binary file modified train_test/best_model.zip
Binary file not shown.
Binary file modified train_test/evaluations.npz
Binary file not shown.
2 changes: 1 addition & 1 deletion train_test/mean_reward.txt
Original file line number Diff line number Diff line change
@@ -1 +1 @@
59.53730455
307.52656115
2 changes: 2 additions & 0 deletions train_test/monitor.csv
Original file line number Diff line number Diff line change
@@ -0,0 +1,2 @@
#{"t_start": 1706261953.0117178, "env_id": "f110-v0"}
r,l,t

0 comments on commit d6296ee

Please sign in to comment.