From eee0d0e8e1d83b3311b01e42dfbb70a36f900eb9 Mon Sep 17 00:00:00 2001 From: Niko Date: Mon, 27 Mar 2023 12:24:55 +0200 Subject: [PATCH] fixed the publisher bug --- .../active_bo_ros/active_bo_service.py | 4 +++- .../active_bo_ros/active_rl_service.py | 17 ++++++++++------- 2 files changed, 13 insertions(+), 8 deletions(-) diff --git a/src/active_bo_ros/active_bo_ros/active_bo_service.py b/src/active_bo_ros/active_bo_ros/active_bo_service.py index b37a159..e8d20f7 100644 --- a/src/active_bo_ros/active_bo_ros/active_bo_service.py +++ b/src/active_bo_ros/active_bo_ros/active_bo_service.py @@ -48,7 +48,7 @@ class ActiveBOService(Node): for j in range(nr_episodes): # active part - if (j > 3) and (np.random.uniform(0.0, 1.0, 1) < epsilon): + if (j > 0) and (np.random.uniform(0.0, 1.0, 1) < epsilon): self.get_logger().info('Active User Input') old_policy, _, old_weights = BO.get_best_result() @@ -63,6 +63,8 @@ class ActiveBOService(Node): x_next = BO.next_observation() BO.eval_new_observation(x_next) + self.get_logger().info(str(j)) + best_policy[:, i], best_pol_reward[:, i], best_weights[:, i] = BO.get_best_result() reward[:, i] = BO.best_reward.T diff --git a/src/active_bo_ros/active_bo_ros/active_rl_service.py b/src/active_bo_ros/active_bo_ros/active_rl_service.py index c2ce59b..1c07a31 100644 --- a/src/active_bo_ros/active_bo_ros/active_rl_service.py +++ b/src/active_bo_ros/active_bo_ros/active_rl_service.py @@ -40,12 +40,14 @@ class ActiveRLService(Node): self.distance_penalty = 0 def active_rl_eval_callback(self, response): - if not self.eval_response_received_first: - self.eval_response_received_first = True - self.get_logger().info('/active_rl_eval_response connected!') - else: - self.eval_response = response - self.eval_response_received = True + # if not self.eval_response_received_first: + # self.eval_response_received_first = True + # self.get_logger().info('/active_rl_eval_response connected!') + # else: + # self.eval_response = response + # self.eval_response_received = True + self.eval_response = response + self.eval_response_received = True def active_rl_callback(self, request, response): @@ -92,7 +94,7 @@ class ActiveRLService(Node): self.eval_pub.publish(eval_request) while not self.eval_response_received: - time.sleep(0.1) + rclpy.spin_once(self) self.get_logger().info('Topic responded!') new_policy = self.eval_response.policy @@ -103,6 +105,7 @@ class ActiveRLService(Node): reward = 0 step_count = 0 done = False + self.env.reset() for i in range(len(new_policy)): action = new_policy[i]