-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathrun_example.py
More file actions
240 lines (182 loc) · 7.18 KB
/
run_example.py
File metadata and controls
240 lines (182 loc) · 7.18 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
from regresser import build_regression_list
from sgpolicy import generate_sgpolicy, quality_func_action_count, quality_func_time_estimate, quality_func_min_idle
from temporal_policy import TempPolicy
from simulator import simulate_file
from linearizer import count_linearizations
from pop import *
from krrt.utils import get_opts
from krrt.planning.strips import progress_state
import os, time, random
def build_policy(filename):
P = POP()
P.load_custom_pop(filename)
print "Linearizations: %d\n" % count_linearizations(P)
#print P.init
(suffixes, precset_mapping, apsp_mapping) = build_regression_list(P)
import sys
num_apsps = 0
total_apsps = 0
for s in apsp_mapping.keys():
for a in apsp_mapping[s].keys():
num_apsps += 1
total_apsps += sum([len(d) for d in apsp_mapping[s][a].values()])
print "\nNumber of actions: %d" % len(P.A_map)
print "Number of candidates: %d" % num_apsps
print "Total mappings: %d" % total_apsps
print "Avg. mappings per context: %.2f\n" % (float(total_apsps) / float(num_apsps))
#from old.policy import generate_policy
#generate_policy(suffixes, precset_mapping)
#for level in suffixes:
# for suff in level:
# print "\n\n --{ Suffix }--"
# print "\n".join(map(str, suff.actions))
# print "\n --{ Preconds }--"
# for ps in precset_mapping[suff]:
# print str(ps)
#print "\n".join(map(str, apsp_mapping[suffixes[3].pop()].values()[0].keys()))
pol = TempPolicy(generate_sgpolicy(suffixes, precset_mapping, quality_func_min_idle), apsp_mapping, P)
#print pol.policy
#print pol.get_action(0.0, P.G)
#print pol.get_action(0.0, P.I)
#print pol.get_action(0.0, P.I | P.G)
#print P.network.nodes()
return (pol, P)
def run_simulation(filename, pol, P):
simulate_file(pol, filename, P.A_map, P.F_map)
def run_static(pol, P, mode, silent = False, alter_state = None, alter_settings = None):
if not silent:
print "\nRunning static simulation.\n"
alter_state = alter_state or static_dynamics
current_state = P.init.adds
current_time = 0.0
pol.reset()
alter_settings = alter_settings or {}
alter_settings['laundryok'] = True
alter_settings['fullok'] = True
going = True
n_changes = 0
count = 0
while going:
count += 1
if count > 500:
if not silent:
print "!! Execution failed: loop of actions. !!"
return (False, -1, -1, n_changes, -1)
res = pol.get_action(current_time, current_state)
if not res:
if not silent:
print "!! Execution failed: no action available. !!"
return (False, -1, -1, n_changes, -1)
else:
(act, l, u) = res
if 'startA_do_laundry' == act.operator:
alter_settings['laundryok'] = False
elif 'check_at_home' == act.operator:
alter_settings['laundryok'] = True
elif 'goto_sleep' == act.operator:
alter_settings['laundryok']= False
alter_settings['fullok']= False
if not silent:
print "Execute %s between %.2f and %.2f" % (str(act), l, u)
if act == P.goal:
going = False
else:
if not (act.precond <= current_state):
if not silent:
print "!! Execution failed: action %s is not applicable. !!" % str(act)
return (False, -1, -1, n_changes, -1)
current_state = progress_state(current_state, act)
n_changes += alter_state(current_state, P.F_map, silent, alter_settings)
if 'lower' == mode:
current_time += l
elif 'upper' == mode:
current_time += u
elif 'mid' == mode:
current_time += (l + u) / 2
if not silent:
print "Executing %s at %d:%02d (%f)\n\n-------------\n" % (str(act), int(current_time/60), int(current_time % 60), current_time)
pol.add_action(current_time, act)
current_time += TemporalConstraint.epsilon
if ('stn' == pol.mode) and (not (P.goal.precond <= current_state)):
if not silent:
print "!! Execution failed: goal doesn't hold at the end of execution. !!"
return (False, -1, -1, n_changes, -1)
if not silent:
print "\nGoal reached (%.2f)!" % (current_time + l)
#for i in range(1, len(pol.trace)):
# op = pol.trace[i][0].operator
# t = pol.trace[i][1] - pol.trace[i-1][1]
# print "[self.pop.A_map['%s'], %f, %f]," % (op, t, t)
return (True, current_time, count, n_changes, pol.replan_count)
def static_dynamics(s, fmap, silent, settings):
return 0
def general_dynamics(s, fmap, silent, settings):
full = fmap['f7']
hungry = fmap['f6']
laundry = fmap['f8']
clean = fmap['f9']
read = fmap['f11']
movie = fmap['f12']
groc = fmap ['f10']
n_changes = 0
# Handle the bad things
if settings['fullok'] and (full in s) and (random.random() < settings['prob_hungry']):
if not silent:
print "-- Randomly becoming hungry."
s.remove(full)
s.add(hungry)
n_changes += 1
if settings['laundryok'] and (laundry in s) and (random.random() < settings['prob_unlaundry']):
if not silent:
print "-- Randomly dirtying the laundry."
s.remove(laundry)
n_changes += 1
if (clean in s) and (random.random() < settings['prob_unclean']):
if not silent:
print "-- Randomly dirtying the kitchen."
s.remove(clean)
n_changes += 1
# Handle the good things
for (f, p) in [(laundry, settings['prob_laundry']),
(read, settings['prob_read']),
(movie, settings['prob_movie']),
(groc, settings['prob_groc']),
(full, settings['prob_full'])]:
if random.random() < p:
if not silent:
print "-- Randomly add %s." % str(f)
s.add(f)
if (f == full) and (hungry in s):
s.remove(hungry)
n_changes += 1
return n_changes
def pessimistic_dynamics(s, fmap, silent, settings):
full = fmap['f7']
hungry = fmap['f6']
laundry = fmap['f8']
n_changes = 0
prob_hungry = settings.get('prob_hungry', 0.2)
prob_laundry = settings.get('prob_unlaundry', 0.3)
if settings['fullok'] and (full in s) and (random.random() < prob_hungry):
if not silent:
print "-- Randomly becoming hungry."
s.remove(full)
s.add(hungry)
n_changes += 1
if settings['laundryok'] and (laundry in s) and (random.random() < prob_laundry):
if not silent:
print "-- Randomly dirtying the laundry."
s.remove(laundry)
n_changes += 1
return n_changes
if __name__ == '__main__':
myargs, flags = get_opts()
if '-plan' not in myargs:
print "Error: Must choose a file for the STPOP (-plan)"
os._exit(1)
print
(pol, P) = build_policy(myargs['-plan'])
run_static(pol, P, 'mid')
if '-file' in myargs:
run_simulation(myargs['-file'], pol, P)
print