diff --git a/src/active_bo_ros/active_bo_ros/active_rl_service.py b/src/active_bo_ros/active_bo_ros/active_rl_service.py index a7de121..c2ce59b 100644 --- a/src/active_bo_ros/active_bo_ros/active_rl_service.py +++ b/src/active_bo_ros/active_bo_ros/active_rl_service.py @@ -10,6 +10,7 @@ from rclpy.callback_groups import ReentrantCallbackGroup from active_bo_ros.ReinforcementLearning.ContinuousMountainCar import Continuous_MountainCarEnv import numpy as np +import time class ActiveRLService(Node): @@ -33,13 +34,18 @@ class ActiveRLService(Node): callback_group=sub_callback_group) self.eval_response_received = False self.eval_response = None + self.eval_response_received_first = False self.env = Continuous_MountainCarEnv(render_mode='rgb_array') self.distance_penalty = 0 def active_rl_eval_callback(self, response): - self.eval_response = response - self.eval_response_received = True + if not self.eval_response_received_first: + self.eval_response_received_first = True + self.get_logger().info('/active_rl_eval_response connected!') + else: + self.eval_response = response + self.eval_response_received = True def active_rl_callback(self, request, response): @@ -86,7 +92,7 @@ class ActiveRLService(Node): self.eval_pub.publish(eval_request) while not self.eval_response_received: - rclpy.spin_once(self) + time.sleep(0.1) self.get_logger().info('Topic responded!') new_policy = self.eval_response.policy