Compare commits
5 Commits
df79df9808
...
0c5a05b6c8
Author | SHA1 | Date | |
---|---|---|---|
0c5a05b6c8 | |||
f54fbb7606 | |||
78eccba14b | |||
1f4f878783 | |||
cf400926dc |
@ -7,4 +7,8 @@
|
||||
<orderEntry type="jdk" jdkName="Python 3.8 (venv)" jdkType="Python SDK" />
|
||||
<orderEntry type="sourceFolder" forTests="false" />
|
||||
</component>
|
||||
<component name="PyDocumentationSettings">
|
||||
<option name="format" value="PLAIN" />
|
||||
<option name="myDocStringFormat" value="Plain" />
|
||||
</component>
|
||||
</module>
|
14
AcquistionFunctions/ConfidenceBound.py
Normal file
14
AcquistionFunctions/ConfidenceBound.py
Normal file
@ -0,0 +1,14 @@
|
||||
import numpy as np
|
||||
from scipy.stats import norm
|
||||
|
||||
def ConfidenceBound(gp, X, nr_test, nr_weights, lam=1.2, seed=None, lower=-1.0, upper=1.0):
|
||||
y_hat = gp.predict(X)
|
||||
best_y = max(y_hat)
|
||||
rng = np.random.default_rng(seed=seed)
|
||||
X_test = rng.uniform(lower, upper, (nr_test, nr_weights))
|
||||
mu, sigma = gp.predict(X_test, return_std=True)
|
||||
cb = mu + lam * sigma
|
||||
|
||||
idx = np.argmax(cb)
|
||||
X_next = X_test[idx, :]
|
||||
return X_next
|
15
AcquistionFunctions/ProbabilityOfImprovement.py
Normal file
15
AcquistionFunctions/ProbabilityOfImprovement.py
Normal file
@ -0,0 +1,15 @@
|
||||
import numpy as np
|
||||
from scipy.stats import norm
|
||||
|
||||
def ProbabilityOfImprovement(gp, X, nr_test, nr_weights, kappa=2.576, seed=None, lower=-1.0, upper=1.0):
|
||||
y_hat = gp.predict(X)
|
||||
best_y = max(y_hat)
|
||||
rng = np.random.default_rng(seed=seed)
|
||||
X_test = rng.uniform(lower, upper, (nr_test, nr_weights))
|
||||
mu, sigma = gp.predict(X_test, return_std=True)
|
||||
z = (mu - best_y - kappa) / sigma
|
||||
pi = norm.cdf(z)
|
||||
|
||||
idx = np.argmax(pi)
|
||||
X_next = X_test[idx, :]
|
||||
return X_next
|
@ -4,26 +4,29 @@ from sklearn.gaussian_process.kernels import Matern
|
||||
|
||||
from PolicyModel.GaussianModel import GaussianPolicy
|
||||
from AcquistionFunctions.ExpectedImprovement import ExpectedImprovement
|
||||
from AcquistionFunctions.ProbabilityOfImprovement import ProbabilityOfImprovement
|
||||
from AcquistionFunctions.ConfidenceBound import ConfidenceBound
|
||||
|
||||
from ToyTask.MountainCarGym import Continuous_MountainCarEnv
|
||||
|
||||
import gym
|
||||
import time
|
||||
|
||||
import matplotlib.pyplot as plt
|
||||
|
||||
from warnings import catch_warnings, simplefilter
|
||||
|
||||
class BayesianOptimization:
|
||||
def __init__(self, env, nr_step, nr_init=3, acq='ei', nr_weights=8, policy_seed=None):
|
||||
def __init__(self, env, nr_step, nr_init=3, acq='ei', nr_weights=6, policy_seed=None):
|
||||
self.env = env
|
||||
self.nr_init = nr_init
|
||||
self.acq = acq
|
||||
self.X = None
|
||||
self.Y = None
|
||||
self.counter_array = np.zeros((1, 1))
|
||||
self.gp = None
|
||||
|
||||
self.episode = 0
|
||||
self.best_reward = np.empty((1, 1))
|
||||
self.distance_penalty = 10
|
||||
self.distance_penalty = 0
|
||||
|
||||
self.nr_policy_weights = nr_weights
|
||||
self.nr_steps = nr_step
|
||||
@ -40,7 +43,8 @@ class BayesianOptimization:
|
||||
|
||||
def initialize(self):
|
||||
self.env.reset()
|
||||
self.env.render()
|
||||
if self.env.render_mode == 'human':
|
||||
self.env.render()
|
||||
|
||||
self.X = np.zeros((self.nr_init, self.nr_policy_weights))
|
||||
self.Y = np.zeros((self.nr_init, 1))
|
||||
@ -50,7 +54,7 @@ class BayesianOptimization:
|
||||
self.X[i, :] = self.policy_model.weights.T
|
||||
policy = self.policy_model.policy_rollout()
|
||||
|
||||
reward = self.runner(policy)
|
||||
reward, step_count = self.runner(policy)
|
||||
|
||||
self.Y[i] = reward
|
||||
|
||||
@ -66,16 +70,29 @@ class BayesianOptimization:
|
||||
output = self.env.step(action)
|
||||
env_reward += output[1]
|
||||
done = output[2]
|
||||
time.sleep(0.0001)
|
||||
if self.env.render_mode == 'human':
|
||||
time.sleep(0.0001)
|
||||
step_count += 1
|
||||
if step_count >= self.nr_steps:
|
||||
done = True
|
||||
distance = -(self.env.goal_position - output[0][0])
|
||||
env_reward += distance * self.distance_penalty
|
||||
|
||||
time.sleep(0.25)
|
||||
if self.counter_array[0] == 0:
|
||||
|
||||
if step_count == 100:
|
||||
step_count = np.NAN
|
||||
self.counter_array[0] = step_count
|
||||
|
||||
else:
|
||||
if step_count == 100:
|
||||
step_count = np.NAN
|
||||
self.counter_array = np.vstack((self.counter_array, step_count))
|
||||
|
||||
if self.env.render_mode == 'human':
|
||||
time.sleep(0.25)
|
||||
self.env.reset()
|
||||
return env_reward
|
||||
return env_reward, step_count
|
||||
|
||||
def next_observation(self):
|
||||
if self.acq == 'ei':
|
||||
@ -83,19 +100,44 @@ class BayesianOptimization:
|
||||
self.X,
|
||||
self.nr_test,
|
||||
self.nr_policy_weights,
|
||||
kappa=0,
|
||||
seed=self.policy_seed,
|
||||
lower=self.lowerb,
|
||||
upper=self.upperb)
|
||||
|
||||
return x_next
|
||||
|
||||
elif self.acq == 'pi':
|
||||
x_next = ProbabilityOfImprovement(self.gp,
|
||||
self.X,
|
||||
self.nr_test,
|
||||
self.nr_policy_weights,
|
||||
kappa=0,
|
||||
seed=self.policy_seed,
|
||||
lower=self.lowerb,
|
||||
upper=self.upperb)
|
||||
|
||||
return x_next
|
||||
|
||||
elif self.acq == 'cb':
|
||||
x_next = ConfidenceBound(self.gp,
|
||||
self.X,
|
||||
self.nr_test,
|
||||
self.nr_policy_weights,
|
||||
lam=2.576,
|
||||
seed=self.policy_seed,
|
||||
lower=self.lowerb,
|
||||
upper=self.upperb)
|
||||
|
||||
return x_next
|
||||
|
||||
else:
|
||||
raise NotImplementedError
|
||||
|
||||
def eval_new_observation(self, x_next):
|
||||
self.policy_model.weights = x_next
|
||||
policy = self.policy_model.policy_rollout()
|
||||
reward = self.runner(policy)
|
||||
reward, step_count = self.runner(policy)
|
||||
|
||||
self.X = np.vstack((self.X, x_next))
|
||||
self.Y = np.vstack((self.Y, reward))
|
||||
@ -108,28 +150,58 @@ class BayesianOptimization:
|
||||
self.best_reward = np.vstack((self.best_reward, max(self.Y)))
|
||||
|
||||
self.episode += 1
|
||||
self.policy_model.plot_policy()
|
||||
return step_count
|
||||
|
||||
def plot_reward(self):
|
||||
epsiodes = np.linspace(0, self.episode, self.episode)
|
||||
plt.plot(epsiodes, self.best_reward)
|
||||
|
||||
plt.show()
|
||||
|
||||
def plot_objective_function(self):
|
||||
plt.scatter(self.X[:, 0], self.Y)
|
||||
|
||||
x_dom = np.linspace(self.lowerb * np.ones((1, 6)), self.upperb * np.ones((1, 6)), 100).reshape(-1, self.nr_policy_weights)
|
||||
y_plot, y_sigma = self.gp.predict(x_dom, return_std=True)
|
||||
|
||||
y_plot = y_plot.reshape(-1)
|
||||
y_sigma = y_sigma.reshape(-1)
|
||||
print(y_plot.shape, x_dom.shape)
|
||||
|
||||
plt.plot(x_dom[:, 0], y_plot)
|
||||
plt.fill_between(
|
||||
x_dom[:, 0],
|
||||
y_plot - 1.96 * y_sigma,
|
||||
y_plot + 1.96 * y_sigma,
|
||||
alpha=0.5
|
||||
)
|
||||
plt.show()
|
||||
|
||||
def get_best_result(self):
|
||||
y_hat = self.gp.predict(self.X)
|
||||
idx = np.argmax(y_hat)
|
||||
x_max = self.X[idx, :]
|
||||
self.policy_model.weights = x_max
|
||||
self.policy_model.policy_rollout()
|
||||
print(self.counter_array[idx], idx)
|
||||
self.policy_model.plot_policy(finished=self.counter_array[idx])
|
||||
|
||||
def main():
|
||||
nr_steps = 80
|
||||
env = gym.envs.make('MountainCarContinuous-v0', render_mode="human")
|
||||
bo = BayesianOptimization(env, nr_steps)
|
||||
nr_steps = 100
|
||||
env = Continuous_MountainCarEnv() # render_mode='human'
|
||||
bo = BayesianOptimization(env, nr_steps, acq='ei')
|
||||
bo.initialize()
|
||||
iteration_steps = 100
|
||||
iteration_steps = 200
|
||||
for i in range(iteration_steps):
|
||||
x_next = bo.next_observation()
|
||||
bo.eval_new_observation(x_next)
|
||||
step_count = bo.eval_new_observation(x_next)
|
||||
|
||||
print(bo.episode, bo.best_reward[-1][0])
|
||||
print(bo.episode, bo.best_reward[-1][0], step_count)
|
||||
|
||||
bo.plot_reward()
|
||||
bo.policy_model.plot_policy()
|
||||
bo.get_best_result()
|
||||
plt.show()
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
|
@ -35,14 +35,14 @@ class GaussianPolicy:
|
||||
|
||||
return self.trajectory
|
||||
|
||||
def plot_policy(self):
|
||||
def plot_policy(self, finished=np.NAN):
|
||||
x = np.linspace(0, self.nr_steps, self.nr_steps)
|
||||
plt.plot(x, self.trajectory)
|
||||
for i in self.mean:
|
||||
gaussian = np.exp(-0.5 * (x - i)**2 / self.std**2)
|
||||
plt.plot(x, gaussian)
|
||||
|
||||
plt.show()
|
||||
if finished != np.NAN:
|
||||
plt.vlines(finished, -1, 1, colors='red')
|
||||
# for i in self.mean:
|
||||
# gaussian = np.exp(-0.5 * (x - i)**2 / self.std**2)
|
||||
# plt.plot(x, gaussian)
|
||||
|
||||
|
||||
def main():
|
||||
|
301
ToyTask/MountainCarGym.py
Normal file
301
ToyTask/MountainCarGym.py
Normal file
@ -0,0 +1,301 @@
|
||||
"""
|
||||
@author: Olivier Sigaud
|
||||
|
||||
A merge between two sources:
|
||||
|
||||
* Adaptation of the MountainCar Environment from the "FAReinforcement" library
|
||||
of Jose Antonio Martin H. (version 1.0), adapted by 'Tom Schaul, tom@idsia.ch'
|
||||
and then modified by Arnaud de Broissia
|
||||
|
||||
* the gym MountainCar environment
|
||||
itself from
|
||||
http://incompleteideas.net/sutton/MountainCar/MountainCar1.cp
|
||||
permalink: https://perma.cc/6Z2N-PFWC
|
||||
"""
|
||||
|
||||
import math
|
||||
from typing import Optional
|
||||
|
||||
import numpy as np
|
||||
|
||||
import gym
|
||||
from gym import spaces
|
||||
from gym.envs.classic_control import utils
|
||||
from gym.error import DependencyNotInstalled
|
||||
|
||||
|
||||
class Continuous_MountainCarEnv(gym.Env):
|
||||
"""
|
||||
### Description
|
||||
|
||||
The Mountain Car MDP is a deterministic MDP that consists of a car placed stochastically
|
||||
at the bottom of a sinusoidal valley, with the only possible actions being the accelerations
|
||||
that can be applied to the car in either direction. The goal of the MDP is to strategically
|
||||
accelerate the car to reach the goal state on top of the right hill. There are two versions
|
||||
of the mountain car domain in gym: one with discrete actions and one with continuous.
|
||||
This version is the one with continuous actions.
|
||||
|
||||
This MDP first appeared in [Andrew Moore's PhD Thesis (1990)](https://www.cl.cam.ac.uk/techreports/UCAM-CL-TR-209.pdf)
|
||||
|
||||
```
|
||||
@TECHREPORT{Moore90efficientmemory-based,
|
||||
author = {Andrew William Moore},
|
||||
title = {Efficient Memory-based Learning for Robot Control},
|
||||
institution = {University of Cambridge},
|
||||
year = {1990}
|
||||
}
|
||||
```
|
||||
|
||||
### Observation Space
|
||||
|
||||
The observation is a `ndarray` with shape `(2,)` where the elements correspond to the following:
|
||||
|
||||
| Num | Observation | Min | Max | Unit |
|
||||
|-----|--------------------------------------|------|-----|--------------|
|
||||
| 0 | position of the car along the x-axis | -Inf | Inf | position (m) |
|
||||
| 1 | velocity of the car | -Inf | Inf | position (m) |
|
||||
|
||||
### Action Space
|
||||
|
||||
The action is a `ndarray` with shape `(1,)`, representing the directional force applied on the car.
|
||||
The action is clipped in the range `[-1,1]` and multiplied by a power of 0.0015.
|
||||
|
||||
### Transition Dynamics:
|
||||
|
||||
Given an action, the mountain car follows the following transition dynamics:
|
||||
|
||||
*velocity<sub>t+1</sub> = velocity<sub>t+1</sub> + force * self.power - 0.0025 * cos(3 * position<sub>t</sub>)*
|
||||
|
||||
*position<sub>t+1</sub> = position<sub>t</sub> + velocity<sub>t+1</sub>*
|
||||
|
||||
where force is the action clipped to the range `[-1,1]` and power is a constant 0.0015.
|
||||
The collisions at either end are inelastic with the velocity set to 0 upon collision with the wall.
|
||||
The position is clipped to the range [-1.2, 0.6] and velocity is clipped to the range [-0.07, 0.07].
|
||||
|
||||
### Reward
|
||||
|
||||
A negative reward of *-0.1 * action<sup>2</sup>* is received at each timestep to penalise for
|
||||
taking actions of large magnitude. If the mountain car reaches the goal then a positive reward of +100
|
||||
is added to the negative reward for that timestep.
|
||||
|
||||
### Starting State
|
||||
|
||||
The position of the car is assigned a uniform random value in `[-0.6 , -0.4]`.
|
||||
The starting velocity of the car is always assigned to 0.
|
||||
|
||||
### Episode End
|
||||
|
||||
The episode ends if either of the following happens:
|
||||
1. Termination: The position of the car is greater than or equal to 0.45 (the goal position on top of the right hill)
|
||||
2. Truncation: The length of the episode is 999.
|
||||
|
||||
### Arguments
|
||||
|
||||
```
|
||||
gym.make('MountainCarContinuous-v0')
|
||||
```
|
||||
|
||||
### Version History
|
||||
|
||||
* v0: Initial versions release (1.0.0)
|
||||
"""
|
||||
|
||||
metadata = {
|
||||
"render_modes": ["human", "rgb_array"],
|
||||
"render_fps": 30,
|
||||
}
|
||||
|
||||
def __init__(self, render_mode: Optional[str] = None, goal_velocity=0):
|
||||
self.min_action = -1.0
|
||||
self.max_action = 1.0
|
||||
self.min_position = -1.2
|
||||
self.max_position = 0.6
|
||||
self.max_speed = 0.07
|
||||
self.goal_position = (
|
||||
0.45 # was 0.5 in gym, 0.45 in Arnaud de Broissia's version
|
||||
)
|
||||
self.goal_velocity = goal_velocity
|
||||
self.power = 0.0015
|
||||
|
||||
self.low_state = np.array(
|
||||
[self.min_position, -self.max_speed], dtype=np.float32
|
||||
)
|
||||
self.high_state = np.array(
|
||||
[self.max_position, self.max_speed], dtype=np.float32
|
||||
)
|
||||
|
||||
self.render_mode = render_mode
|
||||
|
||||
self.screen_width = 600
|
||||
self.screen_height = 400
|
||||
self.screen = None
|
||||
self.clock = None
|
||||
self.isopen = True
|
||||
|
||||
self.action_space = spaces.Box(
|
||||
low=self.min_action, high=self.max_action, shape=(1,), dtype=np.float32
|
||||
)
|
||||
self.observation_space = spaces.Box(
|
||||
low=self.low_state, high=self.high_state, dtype=np.float32
|
||||
)
|
||||
|
||||
def step(self, action: np.ndarray):
|
||||
|
||||
position = self.state[0]
|
||||
velocity = self.state[1]
|
||||
force = min(max(action[0], self.min_action), self.max_action)
|
||||
|
||||
velocity += force * self.power - 0.0025 * math.cos(3 * position)
|
||||
if velocity > self.max_speed:
|
||||
velocity = self.max_speed
|
||||
if velocity < -self.max_speed:
|
||||
velocity = -self.max_speed
|
||||
position += velocity
|
||||
if position > self.max_position:
|
||||
position = self.max_position
|
||||
if position < self.min_position:
|
||||
position = self.min_position
|
||||
if position == self.min_position and velocity < 0:
|
||||
velocity = 0
|
||||
|
||||
# Convert a possible numpy bool to a Python bool.
|
||||
terminated = bool(
|
||||
position >= self.goal_position and velocity >= self.goal_velocity
|
||||
)
|
||||
|
||||
reward = 0
|
||||
if terminated:
|
||||
reward += 10
|
||||
reward -= math.pow(action[0], 2) * 0.1
|
||||
reward -= 1
|
||||
|
||||
self.state = np.array([position, velocity], dtype=np.float32)
|
||||
|
||||
if self.render_mode == "human":
|
||||
self.render()
|
||||
return self.state, reward, terminated, False, {}
|
||||
|
||||
def reset(self, *, seed: Optional[int] = None, options: Optional[dict] = None):
|
||||
super().reset(seed=seed)
|
||||
# Note that if you use custom reset bounds, it may lead to out-of-bound
|
||||
# state/observations.
|
||||
low, high = utils.maybe_parse_reset_bounds(options, -0.6, -0.4)
|
||||
self.state = np.array([self.np_random.uniform(low=low, high=high), 0])
|
||||
|
||||
if self.render_mode == "human":
|
||||
self.render()
|
||||
return np.array(self.state, dtype=np.float32), {}
|
||||
|
||||
def _height(self, xs):
|
||||
return np.sin(3 * xs) * 0.45 + 0.55
|
||||
|
||||
def render(self):
|
||||
if self.render_mode is None:
|
||||
gym.logger.warn(
|
||||
"You are calling render method without specifying any render mode. "
|
||||
"You can specify the render_mode at initialization, "
|
||||
f'e.g. gym("{self.spec.id}", render_mode="rgb_array")'
|
||||
)
|
||||
return
|
||||
|
||||
try:
|
||||
import pygame
|
||||
from pygame import gfxdraw
|
||||
except ImportError:
|
||||
raise DependencyNotInstalled(
|
||||
"pygame is not installed, run `pip install gym[classic_control]`"
|
||||
)
|
||||
|
||||
if self.screen is None:
|
||||
pygame.init()
|
||||
if self.render_mode == "human":
|
||||
pygame.display.init()
|
||||
self.screen = pygame.display.set_mode(
|
||||
(self.screen_width, self.screen_height)
|
||||
)
|
||||
else: # mode == "rgb_array":
|
||||
self.screen = pygame.Surface((self.screen_width, self.screen_height))
|
||||
if self.clock is None:
|
||||
self.clock = pygame.time.Clock()
|
||||
|
||||
world_width = self.max_position - self.min_position
|
||||
scale = self.screen_width / world_width
|
||||
carwidth = 40
|
||||
carheight = 20
|
||||
|
||||
self.surf = pygame.Surface((self.screen_width, self.screen_height))
|
||||
self.surf.fill((255, 255, 255))
|
||||
|
||||
pos = self.state[0]
|
||||
|
||||
xs = np.linspace(self.min_position, self.max_position, 100)
|
||||
ys = self._height(xs)
|
||||
xys = list(zip((xs - self.min_position) * scale, ys * scale))
|
||||
|
||||
pygame.draw.aalines(self.surf, points=xys, closed=False, color=(0, 0, 0))
|
||||
|
||||
clearance = 10
|
||||
|
||||
l, r, t, b = -carwidth / 2, carwidth / 2, carheight, 0
|
||||
coords = []
|
||||
for c in [(l, b), (l, t), (r, t), (r, b)]:
|
||||
c = pygame.math.Vector2(c).rotate_rad(math.cos(3 * pos))
|
||||
coords.append(
|
||||
(
|
||||
c[0] + (pos - self.min_position) * scale,
|
||||
c[1] + clearance + self._height(pos) * scale,
|
||||
)
|
||||
)
|
||||
|
||||
gfxdraw.aapolygon(self.surf, coords, (0, 0, 0))
|
||||
gfxdraw.filled_polygon(self.surf, coords, (0, 0, 0))
|
||||
|
||||
for c in [(carwidth / 4, 0), (-carwidth / 4, 0)]:
|
||||
c = pygame.math.Vector2(c).rotate_rad(math.cos(3 * pos))
|
||||
wheel = (
|
||||
int(c[0] + (pos - self.min_position) * scale),
|
||||
int(c[1] + clearance + self._height(pos) * scale),
|
||||
)
|
||||
|
||||
gfxdraw.aacircle(
|
||||
self.surf, wheel[0], wheel[1], int(carheight / 2.5), (128, 128, 128)
|
||||
)
|
||||
gfxdraw.filled_circle(
|
||||
self.surf, wheel[0], wheel[1], int(carheight / 2.5), (128, 128, 128)
|
||||
)
|
||||
|
||||
flagx = int((self.goal_position - self.min_position) * scale)
|
||||
flagy1 = int(self._height(self.goal_position) * scale)
|
||||
flagy2 = flagy1 + 50
|
||||
gfxdraw.vline(self.surf, flagx, flagy1, flagy2, (0, 0, 0))
|
||||
|
||||
gfxdraw.aapolygon(
|
||||
self.surf,
|
||||
[(flagx, flagy2), (flagx, flagy2 - 10), (flagx + 25, flagy2 - 5)],
|
||||
(204, 204, 0),
|
||||
)
|
||||
gfxdraw.filled_polygon(
|
||||
self.surf,
|
||||
[(flagx, flagy2), (flagx, flagy2 - 10), (flagx + 25, flagy2 - 5)],
|
||||
(204, 204, 0),
|
||||
)
|
||||
|
||||
self.surf = pygame.transform.flip(self.surf, False, True)
|
||||
self.screen.blit(self.surf, (0, 0))
|
||||
if self.render_mode == "human":
|
||||
pygame.event.pump()
|
||||
self.clock.tick(self.metadata["render_fps"])
|
||||
pygame.display.flip()
|
||||
|
||||
elif self.render_mode == "rgb_array":
|
||||
return np.transpose(
|
||||
np.array(pygame.surfarray.pixels3d(self.screen)), axes=(1, 0, 2)
|
||||
)
|
||||
|
||||
def close(self):
|
||||
if self.screen is not None:
|
||||
import pygame
|
||||
|
||||
pygame.display.quit()
|
||||
pygame.quit()
|
||||
self.isopen = False
|
0
runner/BOGymRunner.py
Normal file
0
runner/BOGymRunner.py
Normal file
Loading…
Reference in New Issue
Block a user