diff --git a/requirements.txt b/requirements.txt
index e69de29..7cf115b 100644
--- a/requirements.txt
+++ b/requirements.txt
@@ -0,0 +1,8 @@
+pytest~=6.2.5
+setuptools==58.2.0
+numpy~=1.26.4
+pydot~=1.4.2
+empy~=3.3.4
+lark~=1.1.1
+scipy~=1.12.0
+scikit-learn~=1.4.0
\ No newline at end of file
diff --git a/src/InteractionQuery/InteractionQuery/query_node.py b/src/InteractionQuery/InteractionQuery/query_node.py
deleted file mode 100644
index b18cde9..0000000
--- a/src/InteractionQuery/InteractionQuery/query_node.py
+++ /dev/null
@@ -1,4 +0,0 @@
-import rclpy
-from rclpy.node import Node
-
-from interaction_msgs.srv import Query
\ No newline at end of file
diff --git a/src/InteractionQuery/InteractionQuery/regular.py b/src/InteractionQuery/InteractionQuery/regular.py
deleted file mode 100644
index 7349859..0000000
--- a/src/InteractionQuery/InteractionQuery/regular.py
+++ /dev/null
@@ -1,12 +0,0 @@
-class RegularQuery:
- def __init__(self, regular, episode):
- self.regular = int(regular)
- self.counter = episode
-
- def query(self):
-
- if self.counter % self.regular == 0 and self.counter != 0:
- return True
-
- else:
- return False
diff --git a/src/InteractionQuery/setup.cfg b/src/InteractionQuery/setup.cfg
deleted file mode 100644
index 1c40041..0000000
--- a/src/InteractionQuery/setup.cfg
+++ /dev/null
@@ -1,4 +0,0 @@
-[develop]
-script_dir=$base/lib/InteractionQuery
-[install]
-install_scripts=$base/lib/InteractionQuery
diff --git a/src/ObjectiveFunctions/package.xml b/src/ObjectiveFunctions/package.xml
deleted file mode 100644
index 9806cd4..0000000
--- a/src/ObjectiveFunctions/package.xml
+++ /dev/null
@@ -1,18 +0,0 @@
-
-
-
- ObjectiveFunctions
- 0.0.0
- TODO: Package description
- niko
- TODO: License declaration
-
- ament_copyright
- ament_flake8
- ament_pep257
- python3-pytest
-
-
- ament_python
-
-
diff --git a/src/ObjectiveFunctions/setup.cfg b/src/ObjectiveFunctions/setup.cfg
deleted file mode 100644
index 2c66535..0000000
--- a/src/ObjectiveFunctions/setup.cfg
+++ /dev/null
@@ -1,4 +0,0 @@
-[develop]
-script_dir=$base/lib/ObjectiveFunctions
-[install]
-install_scripts=$base/lib/ObjectiveFunctions
diff --git a/src/Optimizers/setup.cfg b/src/Optimizers/setup.cfg
deleted file mode 100644
index 588328d..0000000
--- a/src/Optimizers/setup.cfg
+++ /dev/null
@@ -1,4 +0,0 @@
-[develop]
-script_dir=$base/lib/Optimizers
-[install]
-install_scripts=$base/lib/Optimizers
diff --git a/src/RepresentationModels/resource/RepresentationModels b/src/RepresentationModels/resource/RepresentationModels
deleted file mode 100644
index e69de29..0000000
diff --git a/src/RepresentationModels/setup.cfg b/src/RepresentationModels/setup.cfg
deleted file mode 100644
index 4b0d329..0000000
--- a/src/RepresentationModels/setup.cfg
+++ /dev/null
@@ -1,4 +0,0 @@
-[develop]
-script_dir=$base/lib/RepresentationModels
-[install]
-install_scripts=$base/lib/RepresentationModels
diff --git a/src/RepresentationModels/setup.py b/src/RepresentationModels/setup.py
deleted file mode 100644
index a2bbff0..0000000
--- a/src/RepresentationModels/setup.py
+++ /dev/null
@@ -1,25 +0,0 @@
-from setuptools import find_packages, setup
-
-package_name = 'RepresentationModels'
-
-setup(
- name=package_name,
- version='0.0.0',
- packages=find_packages(exclude=['test']),
- data_files=[
- ('share/ament_index/resource_index/packages',
- ['resource/' + package_name]),
- ('share/' + package_name, ['package.xml']),
- ],
- install_requires=['setuptools'],
- zip_safe=True,
- maintainer='niko',
- maintainer_email='nikolaus.feith@unileoben.ac.at',
- description='TODO: Package description',
- license='TODO: License declaration',
- tests_require=['pytest'],
- entry_points={
- 'console_scripts': [
- ],
- },
-)
diff --git a/src/RepresentationModels/test/test_copyright.py b/src/RepresentationModels/test/test_copyright.py
deleted file mode 100644
index 97a3919..0000000
--- a/src/RepresentationModels/test/test_copyright.py
+++ /dev/null
@@ -1,25 +0,0 @@
-# Copyright 2015 Open Source Robotics Foundation, Inc.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-from ament_copyright.main import main
-import pytest
-
-
-# Remove the `skip` decorator once the source file(s) have a copyright header
-@pytest.mark.skip(reason='No copyright header has been placed in the generated source file.')
-@pytest.mark.copyright
-@pytest.mark.linter
-def test_copyright():
- rc = main(argv=['.', 'test'])
- assert rc == 0, 'Found errors'
diff --git a/src/RepresentationModels/test/test_flake8.py b/src/RepresentationModels/test/test_flake8.py
deleted file mode 100644
index 27ee107..0000000
--- a/src/RepresentationModels/test/test_flake8.py
+++ /dev/null
@@ -1,25 +0,0 @@
-# Copyright 2017 Open Source Robotics Foundation, Inc.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-from ament_flake8.main import main_with_errors
-import pytest
-
-
-@pytest.mark.flake8
-@pytest.mark.linter
-def test_flake8():
- rc, errors = main_with_errors(argv=[])
- assert rc == 0, \
- 'Found %d code style errors / warnings:\n' % len(errors) + \
- '\n'.join(errors)
diff --git a/src/RepresentationModels/test/test_pep257.py b/src/RepresentationModels/test/test_pep257.py
deleted file mode 100644
index b234a38..0000000
--- a/src/RepresentationModels/test/test_pep257.py
+++ /dev/null
@@ -1,23 +0,0 @@
-# Copyright 2015 Open Source Robotics Foundation, Inc.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-from ament_pep257.main import main
-import pytest
-
-
-@pytest.mark.linter
-@pytest.mark.pep257
-def test_pep257():
- rc = main(argv=['.', 'test'])
- assert rc == 0, 'Found code style errors / warnings'
diff --git a/src/interaction_msgs/srv/Query.srv b/src/interaction_msgs/srv/Query.srv
index a543115..2d2ccc2 100644
--- a/src/interaction_msgs/srv/Query.srv
+++ b/src/interaction_msgs/srv/Query.srv
@@ -1,3 +1,6 @@
+# MODES: random:=0, regular:=1, improvement:=2
+uint16 modes
+
# random query
float32 threshold
@@ -7,9 +10,9 @@ uint16 current_episode
# improvement query
# float32 threshold
-uint16 period
+# uint16 frequency
uint16 last_queried_episode
-float32[] rewards
+float32[] last_rewards
---
bool interaction
\ No newline at end of file
diff --git a/src/InteractionQuery/InteractionQuery/__init__.py b/src/interaction_objective_function/interaction_objective_function/__init__.py
similarity index 100%
rename from src/InteractionQuery/InteractionQuery/__init__.py
rename to src/interaction_objective_function/interaction_objective_function/__init__.py
diff --git a/src/RepresentationModels/package.xml b/src/interaction_objective_function/package.xml
similarity index 93%
rename from src/RepresentationModels/package.xml
rename to src/interaction_objective_function/package.xml
index 0a5f94f..122c14e 100644
--- a/src/RepresentationModels/package.xml
+++ b/src/interaction_objective_function/package.xml
@@ -1,7 +1,7 @@
- RepresentationModels
+ interaction_objective_function
0.0.0
TODO: Package description
niko
diff --git a/src/InteractionQuery/resource/InteractionQuery b/src/interaction_objective_function/resource/interaction_objective_function
similarity index 100%
rename from src/InteractionQuery/resource/InteractionQuery
rename to src/interaction_objective_function/resource/interaction_objective_function
diff --git a/src/interaction_objective_function/setup.cfg b/src/interaction_objective_function/setup.cfg
new file mode 100644
index 0000000..e6a1bf3
--- /dev/null
+++ b/src/interaction_objective_function/setup.cfg
@@ -0,0 +1,4 @@
+[develop]
+script_dir=$base/lib/interaction_objective_function
+[install]
+install_scripts=$base/lib/interaction_objective_function
diff --git a/src/Optimizers/setup.py b/src/interaction_objective_function/setup.py
similarity index 92%
rename from src/Optimizers/setup.py
rename to src/interaction_objective_function/setup.py
index a038bdf..27798b8 100644
--- a/src/Optimizers/setup.py
+++ b/src/interaction_objective_function/setup.py
@@ -1,6 +1,6 @@
from setuptools import find_packages, setup
-package_name = 'Optimizers'
+package_name = 'interaction_objective_function'
setup(
name=package_name,
diff --git a/src/InteractionQuery/test/test_copyright.py b/src/interaction_objective_function/test/test_copyright.py
similarity index 100%
rename from src/InteractionQuery/test/test_copyright.py
rename to src/interaction_objective_function/test/test_copyright.py
diff --git a/src/InteractionQuery/test/test_flake8.py b/src/interaction_objective_function/test/test_flake8.py
similarity index 100%
rename from src/InteractionQuery/test/test_flake8.py
rename to src/interaction_objective_function/test/test_flake8.py
diff --git a/src/InteractionQuery/test/test_pep257.py b/src/interaction_objective_function/test/test_pep257.py
similarity index 100%
rename from src/InteractionQuery/test/test_pep257.py
rename to src/interaction_objective_function/test/test_pep257.py
diff --git a/src/ObjectiveFunctions/ObjectiveFunctions/__init__.py b/src/interaction_optimizers/interaction_optimizers/__init__.py
similarity index 100%
rename from src/ObjectiveFunctions/ObjectiveFunctions/__init__.py
rename to src/interaction_optimizers/interaction_optimizers/__init__.py
diff --git a/src/interaction_optimizers/interaction_optimizers/acquisition_function/__init__.py b/src/interaction_optimizers/interaction_optimizers/acquisition_function/__init__.py
new file mode 100644
index 0000000..fa31491
--- /dev/null
+++ b/src/interaction_optimizers/interaction_optimizers/acquisition_function/__init__.py
@@ -0,0 +1,4 @@
+from .confidence_bounds import ConfidenceBounds
+from .probability_of_improvement import ProbabilityOfImprovement
+from .expected_improvement import ExpectedImprovement
+from .preference_expected_improvement import PreferenceExpectedImprovement
diff --git a/src/interaction_optimizers/interaction_optimizers/acquisition_function/confidence_bounds.py b/src/interaction_optimizers/interaction_optimizers/acquisition_function/confidence_bounds.py
new file mode 100644
index 0000000..2c5c232
--- /dev/null
+++ b/src/interaction_optimizers/interaction_optimizers/acquisition_function/confidence_bounds.py
@@ -0,0 +1,31 @@
+
+import numpy as np
+
+
+class ConfidenceBounds:
+ def __init__(self, nr_weights, nr_samples=100, beta=1.2, seed=None, lower_bound=-1.0, upper_bound=1.0):
+ self.nr_weights = nr_weights
+ self.nr_samples = nr_samples
+ self.beta = beta # if beta negative => lower confidence bounds
+ self.lower_bound = lower_bound
+ self.upper_bound = upper_bound
+ self.seed = seed
+
+ def __call__(self, gauss_process, _, seed=None):
+ # if seed is set for whole experiment
+ if self.seed is not None:
+ seed = self.seed
+
+ # random generator
+ rng = np.random.default_rng(seed)
+
+ # sample from the surrogate
+ x_test = rng.uniform(self.lower_bound, self.upper_bound, size=(self.nr_samples, self.nr_weights))
+ mu, sigma = gauss_process.predict(x_test, return_std=True)
+
+ # upper/lower confidence bounds
+ cb = mu + self.beta * sigma
+
+ # get the best result and return it
+ idx = np.argmax(cb)
+ return x_test[idx, :]
diff --git a/src/interaction_optimizers/interaction_optimizers/acquisition_function/expected_improvement.py b/src/interaction_optimizers/interaction_optimizers/acquisition_function/expected_improvement.py
new file mode 100644
index 0000000..8bfb579
--- /dev/null
+++ b/src/interaction_optimizers/interaction_optimizers/acquisition_function/expected_improvement.py
@@ -0,0 +1,37 @@
+
+import numpy as np
+from scipy.stats import norm
+
+
+class ExpectedImprovement:
+ def __init__(self, nr_weights, nr_samples=100, kappa=0.0, seed=None, lower_bound=-1.0, upper_bound=1.0):
+ self.nr_weights = nr_weights
+ self.nr_samples = nr_samples
+ self.kappa = kappa
+ self.lower_bound = lower_bound
+ self.upper_bound = upper_bound
+ self.seed = seed
+
+ def __call__(self, gauss_process, x_observed, seed=None):
+ # if seed is set for whole experiment
+ if self.seed is not None:
+ seed = self.seed
+
+ # random generator
+ rng = np.random.default_rng(seed)
+
+ # get the best so far observed y
+ mu = gauss_process.predict(x_observed)
+ y_best = max(mu)
+
+ # sample from surrogate
+ x_test = rng.uniform(self.lower_bound, self.upper_bound, size=(self.nr_samples, self.nr_weights))
+ mu, sigma = gauss_process.predict(x_test, return_std=True)
+
+ # expected improvement
+ z = (mu - y_best - self.kappa) / sigma
+ ei = (mu - y_best - self.kappa) * norm.cdf(z) + sigma * norm.pdf(z)
+
+ # get the best result and return it
+ idx = np.argmax(ei)
+ return x_test[idx, :]
diff --git a/src/interaction_optimizers/interaction_optimizers/acquisition_function/preference_expected_improvement.py b/src/interaction_optimizers/interaction_optimizers/acquisition_function/preference_expected_improvement.py
new file mode 100644
index 0000000..bffcef3
--- /dev/null
+++ b/src/interaction_optimizers/interaction_optimizers/acquisition_function/preference_expected_improvement.py
@@ -0,0 +1,93 @@
+
+import numpy as np
+from scipy.stats import norm
+
+
+class PreferenceExpectedImprovement:
+ def __init__(self, nr_dims, initial_variance, update_variance, nr_samples=100,
+ kappa=0.0, lower_bound=None, upper_bound=None, seed=None, fixed_dims=None):
+ self.nr_dims = nr_dims
+
+ self.initial_variance = initial_variance
+ self.update_variance = update_variance
+
+ self.nr_samples = nr_samples
+ self.kappa = kappa
+
+ if lower_bound is None:
+ self.lower_bound = [-1.] * self.nr_dims
+ else:
+ self.lower_bound = lower_bound
+
+ if upper_bound is None:
+ self.upper_bound = [1.] * self.nr_dims
+ else:
+ self.upper_bound = upper_bound
+
+ self.seed = seed
+
+ # initial proposal distribution
+ self.proposal_mean = np.zeros((nr_dims, 1))
+ self.proposal_cov = np.diag(np.ones((nr_dims,)) * self.initial_variance)
+
+ # fixed dimension for robot experiment
+ self.fixed_dims = fixed_dims
+
+ def rejection_sampling(self, seed=None):
+ rng = np.random.default_rng(seed)
+
+ samples = np.empty((0, self.nr_dims))
+ while samples.shape[0] < self.nr_samples:
+ # sample from the multi variate gaussian distribution
+ sample = np.zeros((1, self.nr_dims))
+ for i in range(self.nr_dims):
+ if i in self.fixed_dims:
+ sample[0, i] = self.fixed_dims[i]
+ else:
+ check = False
+ while not check:
+ sample[0, i] = rng.normal(self.proposal_mean[i], self.proposal_cov[i, i])
+ if self.lower_bound[i] <= sample[0, i] <= self.upper_bound[i]:
+ check = True
+
+ samples = np.append(samples, sample, axis=0)
+
+ return samples
+
+ def __call__(self, gauss_process, x_observed, seed=None):
+ # if seed is set for whole experiment
+ if self.seed is not None:
+ seed = self.seed
+
+ # get the best so far observed y
+ mu = gauss_process.predict(x_observed)
+ y_best = max(mu)
+
+ # sample from surrogate
+ x_test = self.rejection_sampling(seed)
+ mu, sigma = gauss_process.predict(x_test, return_std=True)
+
+ # expected improvement
+ z = (mu - y_best - self.kappa) / sigma
+ ei = (mu - y_best - self.kappa) * norm.cdf(z) + sigma * norm.pdf(z)
+
+ # get the best result and return it
+ idx = np.argmax(ei)
+ return x_test[idx, :]
+
+ def update_proposal_model(self, preference_mean, preference_bool):
+ cov_diag = np.ones((self.nr_dims,)) * self.initial_variance
+ cov_diag[preference_bool] = self.update_variance
+
+ preference_cov = np.diag(cov_diag)
+
+ preference_mean = preference_mean.reshape(-1, 1)
+
+ posterior_mean = np.linalg.inv(np.linalg.inv(self.proposal_cov) + np.linalg.inv(preference_cov))\
+ .dot(np.linalg.inv(self.proposal_cov).dot(self.proposal_mean)
+ + np.linalg.inv(preference_cov).dot(preference_mean))
+
+ posterior_cov = np.linalg.inv(np.linalg.inv(self.proposal_cov) + np.linalg.inv(preference_cov))
+
+ self.proposal_mean = posterior_mean
+ self.proposal_cov = posterior_cov
diff --git a/src/interaction_optimizers/interaction_optimizers/acquisition_function/probability_of_improvement.py b/src/interaction_optimizers/interaction_optimizers/acquisition_function/probability_of_improvement.py
new file mode 100644
index 0000000..d4ccb2e
--- /dev/null
+++ b/src/interaction_optimizers/interaction_optimizers/acquisition_function/probability_of_improvement.py
@@ -0,0 +1,37 @@
+
+import numpy as np
+from scipy.stats import norm
+
+
+class ProbabilityOfImprovement:
+ def __init__(self, nr_weights, nr_samples=100, kappa=0.0, seed=None, lower_bound=-1.0, upper_bound=1.0):
+ self.nr_weights = nr_weights
+ self.nr_samples = nr_samples
+ self.kappa = kappa
+ self.lower_bound = lower_bound
+ self.upper_bound = upper_bound
+ self.seed = seed
+
+ def __call__(self, gauss_process, x_observed, seed=None):
+ # if seed is set for whole experiment
+ if self.seed is not None:
+ seed = self.seed
+
+ # random generator
+ rng = np.random.default_rng(seed)
+
+ # get the best so far observed y
+ mu = gauss_process.predict(x_observed)
+ y_best = max(mu)
+
+ # sample from surrogate
+ x_test = rng.uniform(self.lower_bound, self.upper_bound, size=(self.nr_samples, self.nr_weights))
+ mu, sigma = gauss_process.predict(x_test, return_std=True)
+
+ # probability of improvement
+ z = (mu - y_best - self.kappa) / sigma
+ pi = norm.cdf(z)
+
+ # get the best result and return it
+ idx = np.argmax(pi)
+ return x_test[idx, :]
diff --git a/src/interaction_optimizers/interaction_optimizers/bayesian_optimization_node.py b/src/interaction_optimizers/interaction_optimizers/bayesian_optimization_node.py
new file mode 100644
index 0000000..8b13789
--- /dev/null
+++ b/src/interaction_optimizers/interaction_optimizers/bayesian_optimization_node.py
@@ -0,0 +1 @@
+
diff --git a/src/Optimizers/Optimizers/__init__.py b/src/interaction_optimizers/interaction_optimizers/optimizers/__init__.py
similarity index 100%
rename from src/Optimizers/Optimizers/__init__.py
rename to src/interaction_optimizers/interaction_optimizers/optimizers/__init__.py
diff --git a/src/interaction_optimizers/interaction_optimizers/optimizers/bayesian_optimization.py b/src/interaction_optimizers/interaction_optimizers/optimizers/bayesian_optimization.py
new file mode 100644
index 0000000..b58e90d
--- /dev/null
+++ b/src/interaction_optimizers/interaction_optimizers/optimizers/bayesian_optimization.py
@@ -0,0 +1,137 @@
+
+import numpy as np
+from sklearn.gaussian_process import GaussianProcessRegressor
+from sklearn.gaussian_process.kernels import Matern, RBF, ExpSineSquared
+
+from ..acquisition_function import ConfidenceBounds
+from ..acquisition_function import ProbabilityOfImprovement
+from ..acquisition_function import ExpectedImprovement
+from ..acquisition_function import PreferenceExpectedImprovement
+
+from sklearn.exceptions import ConvergenceWarning
+import warnings
+
+warnings.filterwarnings('ignore', category=ConvergenceWarning)
+
+
+class BayesianOptimization:
+ def __init__(self, nr_steps, nr_dimensions, nr_policy_parameters, seed=None,
+ fixed_dimensions=None, lower_bound=None, upper_bound=None,
+ acquisition_function_name="EI", kernel_name="Matern",
+ **kwargs):
+
+ self.nr_steps = nr_steps
+ self.nr_dimensions = nr_dimensions
+ self.nr_policy_parameters = nr_policy_parameters
+ self.nr_weights = nr_policy_parameters * nr_dimensions
+
+ if lower_bound is None:
+ self.lower_bound = [-1.] * self.nr_weights
+ else:
+ self.lower_bound = lower_bound
+
+ if upper_bound is None:
+ self.upper_bound = [-1.] * self.nr_weights
+ else:
+ self.upper_bound = upper_bound
+
+ self.seed = seed
+ self.fixed_dimensions = fixed_dimensions
+
+ self.x_observed = None
+ self.y_observed = None
+ self.best_reward = None
+ self.episode = 0
+
+ self.gauss_process = None
+ self.n_restarts_optimizer = kwargs.get('n_restarts_optimizer', 5)
+
+
+
+ # region Kernel
+ length_scale = kwargs.get('length_scale', 1.0)
+
+ if kernel_name == "Matern":
+ nu = kwargs.get('nu', 1.5)
+ self.kernel = Matern(nu=nu, length_scale=length_scale)
+
+ elif kernel_name == "RBF":
+ self.kernel = RBF(length_scale=length_scale)
+
+ elif kernel_name == "ExpSineSquared":
+ periodicity = kwargs.get('periodicity', 1.0)
+ self.kernel = ExpSineSquared(length_scale=length_scale, periodicity=periodicity)
+
+ else:
+ raise NotImplementedError("This kernel is not implemented!")
+ # endregion
+
+ # region Acquisitionfunctions
+ if 'nr_samples' in kwargs:
+ nr_samples = kwargs['nr_samples']
+ else:
+ nr_samples = 100
+
+ if acquisition_function_name == "CB":
+ beta = kwargs.get('beta', 1.2)
+ self.acquisition_function = ConfidenceBounds(self.nr_weights, nr_samples=nr_samples, beta=beta, seed=seed,
+ lower_bound=lower_bound, upper_bound=upper_bound)
+
+ elif acquisition_function_name == "PI":
+ kappa = kwargs.get('kappa', 0.0)
+ self.acquisition_function = ProbabilityOfImprovement(self.nr_weights, nr_samples=nr_samples, kappa=kappa,
+ seed=seed, lower_bound=lower_bound,
+ upper_bound=upper_bound)
+ elif acquisition_function_name == "EI":
+ kappa = kwargs.get('kappa', 0.0)
+ self.acquisition_function = ExpectedImprovement(self.nr_weights, nr_samples=nr_samples, kappa=kappa,
+ seed=seed, lower_bound=lower_bound, upper_bound=upper_bound)
+ elif acquisition_function_name == "PEI":
+ kappa = kwargs.get('kappa', 0.0)
+
+ initial_variance = kwargs.get('initial_variance', None)
+ update_variance = kwargs.get('update_variance', None)
+
+ if initial_variance is None or update_variance is None:
+ raise ValueError("Initial_variance and update_variance has to be provided in PEI!")
+
+ self.acquisition_function = PreferenceExpectedImprovement(self.nr_weights, initial_variance,
+ update_variance, nr_samples=nr_samples,
+ kappa=kappa, lower_bound=lower_bound,
+ upper_bound=upper_bound, seed=seed,
+ fixed_dims=fixed_dimensions)
+ else:
+ raise NotImplementedError("This acquisition function is not implemented!")
+ # endregion
+
+ self.reset()
+
+ def reset(self):
+ self.gauss_process = GaussianProcessRegressor(self.kernel, n_restarts_optimizer=self.n_restarts_optimizer)
+ self.best_reward = np.empty((1, 1))
+ self.x_observed = np.zeros((1, self.nr_weights), dtype=np.float64)
+ self.y_observed = np.zeros((1, 1), dtype=np.float64)
+ self.episode = 0
+
+ def next_observation(self):
+ x_next = self.acquisition_function(self.gauss_process, self.x_observed, seed=self.seed)
+ return x_next
+
+ def add_observation(self, y_new, x_new):
+ if self.episode == 0:
+ self.x_observed[0, :] = x_new
+ self.y_observed[0] = y_new
+ self.best_reward[0] = np.max(self.y_observed)
+ else:
+ self.x_observed = np.vstack((self.x_observed, np.around(x_new, decimals=8)))
+ self.y_observed = np.vstack((self.y_observed, y_new))
+ self.best_reward = np.vstack((self.best_reward, np.max(self.y_observed)))
+
+ self.gauss_process.fit(self.x_observed, self.y_observed)
+ self.episode += 1
+
+ def get_best_result(self):
+ y_max = np.max(self.y_observed)
+ idx = np.argmax(self.y_observed)
+ x_max = self.x_observed[idx, :]
+ return y_max, x_max, idx
diff --git a/src/Optimizers/package.xml b/src/interaction_optimizers/package.xml
similarity index 94%
rename from src/Optimizers/package.xml
rename to src/interaction_optimizers/package.xml
index 9ed5d14..404340c 100644
--- a/src/Optimizers/package.xml
+++ b/src/interaction_optimizers/package.xml
@@ -1,7 +1,7 @@
- Optimizers
+ interaction_optimizers
0.0.0
TODO: Package description
niko
diff --git a/src/ObjectiveFunctions/resource/ObjectiveFunctions b/src/interaction_optimizers/resource/interaction_optimizers
similarity index 100%
rename from src/ObjectiveFunctions/resource/ObjectiveFunctions
rename to src/interaction_optimizers/resource/interaction_optimizers
diff --git a/src/interaction_optimizers/setup.cfg b/src/interaction_optimizers/setup.cfg
new file mode 100644
index 0000000..397420e
--- /dev/null
+++ b/src/interaction_optimizers/setup.cfg
@@ -0,0 +1,4 @@
+[develop]
+script_dir=$base/lib/interaction_optimizers
+[install]
+install_scripts=$base/lib/interaction_optimizers
diff --git a/src/ObjectiveFunctions/setup.py b/src/interaction_optimizers/setup.py
similarity index 94%
rename from src/ObjectiveFunctions/setup.py
rename to src/interaction_optimizers/setup.py
index c232c91..0d735f4 100644
--- a/src/ObjectiveFunctions/setup.py
+++ b/src/interaction_optimizers/setup.py
@@ -1,6 +1,6 @@
from setuptools import find_packages, setup
-package_name = 'ObjectiveFunctions'
+package_name = 'interaction_optimizers'
setup(
name=package_name,
diff --git a/src/ObjectiveFunctions/test/test_copyright.py b/src/interaction_optimizers/test/test_copyright.py
similarity index 100%
rename from src/ObjectiveFunctions/test/test_copyright.py
rename to src/interaction_optimizers/test/test_copyright.py
diff --git a/src/ObjectiveFunctions/test/test_flake8.py b/src/interaction_optimizers/test/test_flake8.py
similarity index 100%
rename from src/ObjectiveFunctions/test/test_flake8.py
rename to src/interaction_optimizers/test/test_flake8.py
diff --git a/src/ObjectiveFunctions/test/test_pep257.py b/src/interaction_optimizers/test/test_pep257.py
similarity index 100%
rename from src/ObjectiveFunctions/test/test_pep257.py
rename to src/interaction_optimizers/test/test_pep257.py
diff --git a/src/RepresentationModels/RepresentationModels/__init__.py b/src/interaction_query/interaction_query/__init__.py
similarity index 100%
rename from src/RepresentationModels/RepresentationModels/__init__.py
rename to src/interaction_query/interaction_query/__init__.py
diff --git a/src/InteractionQuery/InteractionQuery/improvement.py b/src/interaction_query/interaction_query/improvement_query.py
similarity index 99%
rename from src/InteractionQuery/InteractionQuery/improvement.py
rename to src/interaction_query/interaction_query/improvement_query.py
index 416a832..014e7a1 100644
--- a/src/InteractionQuery/InteractionQuery/improvement.py
+++ b/src/interaction_query/interaction_query/improvement_query.py
@@ -1,3 +1,4 @@
+
class ImprovementQuery:
def __init__(self, threshold, period, last_query, rewards):
self.threshold = threshold
diff --git a/src/interaction_query/interaction_query/query_node.py b/src/interaction_query/interaction_query/query_node.py
new file mode 100644
index 0000000..13a1b25
--- /dev/null
+++ b/src/interaction_query/interaction_query/query_node.py
@@ -0,0 +1,71 @@
+#!/usr/bin/env python3
+import rclpy
+from rclpy.node import Node
+
+from .random_query import RandomQuery
+from .regular_query import RegularQuery
+from .improvement_query import ImprovementQuery
+
+from interaction_msgs.srv import Query
+
+
+class QueryNode(Node):
+ def __init__(self):
+ super().__init__('query_node')
+ self.query_service = self.create_service(Query, 'user_query', self.query_callback)
+
+ self.get_logger().info('Query node started!')
+
+ def check_random_request(self, req):
+ t = req.threshold
+ if 0 < t <= 1:
+ return True
+ else:
+ self.get_logger().error('Invalid random request in user query!')
+
+ def check_regular_request(self, req):
+ f = req.frequency
+ if f > 0:
+ return True
+ else:
+ self.get_logger().error('Invalid regular request in user query!')
+
+ def check_improvement_request(self, req):
+ t = req.threshold
+ f = req.frequency
+ last_rewards = req.last_rewards
+ if 0 < t <= 1 and f > 0 and isinstance(last_rewards, list):
+ return True
+ else:
+ self.get_logger().error('Invalid improvement request in user query!')
+
+ def query_callback(self, request, response):
+ mode = response.mode
+ query_obj = None
+ if mode == 0:
+ if self.check_random_request(request):
+ query_obj = RandomQuery(request.threshold)
+ elif mode == 1:
+ if self.check_regular_request(request):
+ query_obj = RegularQuery(request.frequency, request.current_episode)
+ elif mode == 2:
+ if self.check_improvement_request(request):
+ query_obj = ImprovementQuery(request.threshold, request.frequency,
+ request.last_queried_episode, request.last_rewards)
+ else:
+ self.get_logger().error('Invalid query mode!')
+
+ if query_obj is not None:
+ response.interaction = query_obj.query()
+ return response
+
+
+def main(args=None):
+ rclpy.init(args=args)
+ node = QueryNode()
+ rclpy.spin(node)
+ rclpy.shutdown()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/src/InteractionQuery/InteractionQuery/random.py b/src/interaction_query/interaction_query/random_query.py
similarity index 100%
rename from src/InteractionQuery/InteractionQuery/random.py
rename to src/interaction_query/interaction_query/random_query.py
diff --git a/src/interaction_query/interaction_query/regular_query.py b/src/interaction_query/interaction_query/regular_query.py
new file mode 100644
index 0000000..80c380d
--- /dev/null
+++ b/src/interaction_query/interaction_query/regular_query.py
@@ -0,0 +1,12 @@
+class RegularQuery:
+ def __init__(self, frequency, episode):
+ self.frequency = int(frequency)
+ self.counter = episode
+
+ def query(self):
+
+ if self.counter % self.frequency == 0 and self.counter != 0:
+ return True
+
+ else:
+ return False
diff --git a/src/InteractionQuery/package.xml b/src/interaction_query/package.xml
similarity index 83%
rename from src/InteractionQuery/package.xml
rename to src/interaction_query/package.xml
index 0e664ec..bdffdd2 100644
--- a/src/InteractionQuery/package.xml
+++ b/src/interaction_query/package.xml
@@ -1,12 +1,15 @@
- InteractionQuery
+ interaction_query
0.0.0
TODO: Package description
root
TODO: License declaration
+ interaction_msgs
+ rclpy
+
ament_copyright
ament_flake8
ament_pep257
diff --git a/src/Optimizers/resource/Optimizers b/src/interaction_query/resource/interaction_query
similarity index 100%
rename from src/Optimizers/resource/Optimizers
rename to src/interaction_query/resource/interaction_query
diff --git a/src/interaction_query/setup.cfg b/src/interaction_query/setup.cfg
new file mode 100644
index 0000000..a593405
--- /dev/null
+++ b/src/interaction_query/setup.cfg
@@ -0,0 +1,4 @@
+[develop]
+script_dir=$base/lib/interaction_query
+[install]
+install_scripts=$base/lib/interaction_query
diff --git a/src/InteractionQuery/setup.py b/src/interaction_query/setup.py
similarity index 86%
rename from src/InteractionQuery/setup.py
rename to src/interaction_query/setup.py
index 74212fe..06c55ac 100644
--- a/src/InteractionQuery/setup.py
+++ b/src/interaction_query/setup.py
@@ -1,6 +1,6 @@
from setuptools import find_packages, setup
-package_name = 'InteractionQuery'
+package_name = 'interaction_query'
setup(
name=package_name,
@@ -20,6 +20,7 @@ setup(
tests_require=['pytest'],
entry_points={
'console_scripts': [
+ 'query_n = interaction_query.query_node:main',
],
},
)
diff --git a/src/Optimizers/test/test_copyright.py b/src/interaction_query/test/test_copyright.py
similarity index 100%
rename from src/Optimizers/test/test_copyright.py
rename to src/interaction_query/test/test_copyright.py
diff --git a/src/Optimizers/test/test_flake8.py b/src/interaction_query/test/test_flake8.py
similarity index 100%
rename from src/Optimizers/test/test_flake8.py
rename to src/interaction_query/test/test_flake8.py
diff --git a/src/Optimizers/test/test_pep257.py b/src/interaction_query/test/test_pep257.py
similarity index 100%
rename from src/Optimizers/test/test_pep257.py
rename to src/interaction_query/test/test_pep257.py