-
Notifications
You must be signed in to change notification settings - Fork 0
/
search.py
130 lines (88 loc) · 3.82 KB
/
search.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
from constants import *
from node import Node
from priorityqueue import PriorityQueue
import pygame
class SnakeProblem:
def __init__(self, body, maze, mapsize, cur_dir, grid, time = None):
#states are tuples with pos
self.body = body
self.initial = body[0]
self.goal = maze.foodpos
self.maze = maze
self.mapsize = mapsize
self.grid = grid
self.cur_dir = cur_dir
self.possible_actions = { up: [left, up, right],
down: [left, down, right],
left: [up, down, left],
right: [down, up, right] }
self.time = time
self.playerpos = self.maze.playerpos + self.get_adv_next_p_head()
# Actions will be a list of directions
# Logic of avoiding objects parcially treated here
def actions(self, state, last_action):
p_dir = self.possible_actions[last_action]
return [ n_dir for n_dir in p_dir if self.grid[self.result(state, n_dir)] != 1 and
self.result(state, n_dir) not in self.playerpos ]
def result(self, state, action):
return ( (state[0] + action[0]) % self.mapsize[0], (state[1] + action[1]) % self.mapsize[1] )
def goal_test(self, state):
return state == self.goal
def path_cost(self, c, state1, action, state2):
return c + 1
# Using manhattan distance
# adapted to current problem
def heuristic(self,node):
dx = abs(self.goal[0] - node.state[0])
dy = abs(self.goal[1] - node.state[1])
# altered heuristic to make the snake go straight
dir_h = (node.action != node.parent.action) * 0.0001 if node.parent else 0
# explored heuristic
exp_h = (self.grid[node.state] == -1)*0.001
return (min(dx, self.mapsize[0] - dx) + min(dy, self.mapsize[1] - dy) + exp_h + dir_h )* (1 + 1/30)
def get_adv_next_p_head(self):
player_pos = self.maze.playerpos
if self.body[0] == player_pos[0]:
adv_body = player_pos[len(self.body):]
else:
adv_body = player_pos[:len(self.body)]
adv_head = adv_body[0]
poss_head = []
for dir in directions:
poss_head.append(self.result(adv_head, dir))
return poss_head
class Search:
def __init__(self,problem):
self.explored = set()
self.f = lambda n: n.path_cost + problem.heuristic(n)
self.problem = problem
# f is evaluation function
# é tratado aqui a deteção de colisões com outros agentes e ele próprio
def search(self,start):
node = Node(self.problem.initial,action = self.problem.cur_dir)
if self.problem.goal_test(node.state):
return node
frontier = PriorityQueue(self.f)
frontier.append(node)
while frontier:
node = frontier.pop()
finish = pygame.time.get_ticks()
#se demorou 90 % do tempo
if finish-start >= 0.9*self.problem.time:
frontier.append(node)
return node
if self.problem.goal_test(node.state):
return node
self.explored.add(node.state)
for child in node.expand(self.problem):
if child.path_cost == 1 and child.state in self.problem.get_adv_next_p_head() :
continue
if child.state not in self.explored and child not in frontier:
frontier.append(child)
# caso o node expandido ja estiver na frontier
# fica na frontier o que tiver menor função de avaliação
elif child in frontier:
incumbent = frontier[child]
if self.f(child) < self.f(incumbent):
del frontier[incumbent]
return None