fixed the publisher bug
This commit is contained in:
parent
0de3eaa360
commit
eee0d0e8e1
@ -48,7 +48,7 @@ class ActiveBOService(Node):
|
|||||||
|
|
||||||
for j in range(nr_episodes):
|
for j in range(nr_episodes):
|
||||||
# active part
|
# active part
|
||||||
if (j > 3) and (np.random.uniform(0.0, 1.0, 1) < epsilon):
|
if (j > 0) and (np.random.uniform(0.0, 1.0, 1) < epsilon):
|
||||||
self.get_logger().info('Active User Input')
|
self.get_logger().info('Active User Input')
|
||||||
old_policy, _, old_weights = BO.get_best_result()
|
old_policy, _, old_weights = BO.get_best_result()
|
||||||
|
|
||||||
@ -63,6 +63,8 @@ class ActiveBOService(Node):
|
|||||||
x_next = BO.next_observation()
|
x_next = BO.next_observation()
|
||||||
BO.eval_new_observation(x_next)
|
BO.eval_new_observation(x_next)
|
||||||
|
|
||||||
|
self.get_logger().info(str(j))
|
||||||
|
|
||||||
best_policy[:, i], best_pol_reward[:, i], best_weights[:, i] = BO.get_best_result()
|
best_policy[:, i], best_pol_reward[:, i], best_weights[:, i] = BO.get_best_result()
|
||||||
|
|
||||||
reward[:, i] = BO.best_reward.T
|
reward[:, i] = BO.best_reward.T
|
||||||
|
@ -40,10 +40,12 @@ class ActiveRLService(Node):
|
|||||||
self.distance_penalty = 0
|
self.distance_penalty = 0
|
||||||
|
|
||||||
def active_rl_eval_callback(self, response):
|
def active_rl_eval_callback(self, response):
|
||||||
if not self.eval_response_received_first:
|
# if not self.eval_response_received_first:
|
||||||
self.eval_response_received_first = True
|
# self.eval_response_received_first = True
|
||||||
self.get_logger().info('/active_rl_eval_response connected!')
|
# self.get_logger().info('/active_rl_eval_response connected!')
|
||||||
else:
|
# else:
|
||||||
|
# self.eval_response = response
|
||||||
|
# self.eval_response_received = True
|
||||||
self.eval_response = response
|
self.eval_response = response
|
||||||
self.eval_response_received = True
|
self.eval_response_received = True
|
||||||
|
|
||||||
@ -92,7 +94,7 @@ class ActiveRLService(Node):
|
|||||||
self.eval_pub.publish(eval_request)
|
self.eval_pub.publish(eval_request)
|
||||||
|
|
||||||
while not self.eval_response_received:
|
while not self.eval_response_received:
|
||||||
time.sleep(0.1)
|
rclpy.spin_once(self)
|
||||||
|
|
||||||
self.get_logger().info('Topic responded!')
|
self.get_logger().info('Topic responded!')
|
||||||
new_policy = self.eval_response.policy
|
new_policy = self.eval_response.policy
|
||||||
@ -103,6 +105,7 @@ class ActiveRLService(Node):
|
|||||||
reward = 0
|
reward = 0
|
||||||
step_count = 0
|
step_count = 0
|
||||||
done = False
|
done = False
|
||||||
|
self.env.reset()
|
||||||
|
|
||||||
for i in range(len(new_policy)):
|
for i in range(len(new_policy)):
|
||||||
action = new_policy[i]
|
action = new_policy[i]
|
||||||
|
Loading…
Reference in New Issue
Block a user