From a101b2f53be2d7e18277220f17ac22b28f0574b7 Mon Sep 17 00:00:00 2001 From: Niko Date: Fri, 19 May 2023 09:33:49 +0200 Subject: [PATCH] started adding user query interactions --- .../ReinforcementLearning/CartPole.py | 309 ++++++++++++++++++ .../ReinforcementLearning/Pendulum.py | 273 ++++++++++++++++ src/active_bo_ros/resource/clockwise.png | Bin 0 -> 6992 bytes 3 files changed, 582 insertions(+) create mode 100644 src/active_bo_ros/resource/clockwise.png diff --git a/src/active_bo_ros/active_bo_ros/ReinforcementLearning/CartPole.py b/src/active_bo_ros/active_bo_ros/ReinforcementLearning/CartPole.py index e69de29..9f472dc 100644 --- a/src/active_bo_ros/active_bo_ros/ReinforcementLearning/CartPole.py +++ b/src/active_bo_ros/active_bo_ros/ReinforcementLearning/CartPole.py @@ -0,0 +1,309 @@ +""" +Classic cart-pole system implemented by Rich Sutton et al. +Copied from http://incompleteideas.net/sutton/book/code/pole.c +permalink: https://perma.cc/C9ZM-652R +""" +import math +from typing import Optional, Union + +import numpy as np + +import gym +from gym import logger, spaces +from gym.spaces import Box +from gym.envs.classic_control import utils +from gym.error import DependencyNotInstalled + + +class CartPoleEnv(gym.Env[np.ndarray, Union[int, np.ndarray]]): + """ + ### Description + + This environment corresponds to the version of the cart-pole problem described by Barto, Sutton, and Anderson in + ["Neuronlike Adaptive Elements That Can Solve Difficult Learning Control Problem"](https://ieeexplore.ieee.org/document/6313077). + A pole is attached by an un-actuated joint to a cart, which moves along a frictionless track. + The pendulum is placed upright on the cart and the goal is to balance the pole by applying forces + in the left and right direction on the cart. + + ### Action Space + + Due to the policy shaping approach the action is a 'ndarray' with shape '(1,)' which can take values '[-1,1]' which + is scaled by the force_mag pushing the cart to the left if it is lower than 0, to the right if it is higher than 0 + and doing nothing if the action is equal to 0 + + + ### Observation Space + + The observation is a `ndarray` with shape `(4,)` with the values corresponding to the following positions and velocities: + + | Num | Observation | Min | Max | + |-----|-----------------------|---------------------|-------------------| + | 0 | Cart Position | -4.8 | 4.8 | + | 1 | Cart Velocity | -Inf | Inf | + | 2 | Pole Angle | ~ -0.418 rad (-24°) | ~ 0.418 rad (24°) | + | 3 | Pole Angular Velocity | -Inf | Inf | + + **Note:** While the ranges above denote the possible values for observation space of each element, + it is not reflective of the allowed values of the state space in an unterminated episode. Particularly: + - The cart x-position (index 0) can be take values between `(-4.8, 4.8)`, but the episode terminates + if the cart leaves the `(-2.4, 2.4)` range. + - The pole angle can be observed between `(-.418, .418)` radians (or **±24°**), but the episode terminates + if the pole angle is not in the range `(-.2095, .2095)` (or **±12°**) + + ### Rewards + + Since the goal is to keep the pole upright for as long as possible, a reward of `+1` for every step taken, + including the termination step, is allotted. The threshold for rewards is 475 for v1. + + ### Starting State + + All observations are assigned a uniformly random value in `(-0.05, 0.05)` + + ### Episode End + + The episode ends if any one of the following occurs: + + 1. Termination: Pole Angle is greater than ±12° + 2. Termination: Cart Position is greater than ±2.4 (center of the cart reaches the edge of the display) + 3. Truncation: Episode length is greater than 500 (200 for v0) + + ### Arguments + + ``` + gym.make('CartPole-v1') + ``` + + No additional arguments are currently supported. + """ + + metadata = { + "render_modes": ["human", "rgb_array"], + "render_fps": 50, + } + + def __init__(self, render_mode: Optional[str] = None): + self.gravity = 9.8 + self.masscart = 1.0 + self.masspole = 0.1 + self.total_mass = self.masspole + self.masscart + self.length = 0.5 # actually half the pole's length + self.polemass_length = self.masspole * self.length + self.force_mag = 10.0 + self.tau = 0.02 # seconds between state updates + self.kinematics_integrator = "euler" + + # Angle at which to fail the episode + self.theta_threshold_radians = 12 * 2 * math.pi / 360 + self.x_threshold = 2.4 + + # Angle limit set to 2 * theta_threshold_radians so failing observation + # is still within bounds. + high = np.array( + [ + self.x_threshold * 2, + np.finfo(np.float32).max, + self.theta_threshold_radians * 2, + np.finfo(np.float32).max, + ], + dtype=np.float32, + ) + + self.action_space = Box(low=-1.0, high=1.0, shape=(1,), dtype=np.float32) + self.observation_space = spaces.Box(-high, high, dtype=np.float32) + + self.render_mode = render_mode + + self.screen_width = 600 + self.screen_height = 400 + self.screen = None + self.clock = None + self.isopen = True + self.state = None + + self.steps_beyond_terminated = None + + def step(self, action): + err_msg = f"{action!r} ({type(action)}) invalid" + assert self.action_space.contains(action), err_msg + assert self.state is not None, "Call reset before using step method." + x, x_dot, theta, theta_dot = self.state + # changed usage of action due to policy shaping approach + force = action * self.force_mag + costheta = math.cos(theta) + sintheta = math.sin(theta) + + # For the interested reader: + # https://coneural.org/florian/papers/05_cart_pole.pdf + temp = ( + force + self.polemass_length * theta_dot**2 * sintheta + ) / self.total_mass + thetaacc = (self.gravity * sintheta - costheta * temp) / ( + self.length * (4.0 / 3.0 - self.masspole * costheta**2 / self.total_mass) + ) + xacc = temp - self.polemass_length * thetaacc * costheta / self.total_mass + + if self.kinematics_integrator == "euler": + x = x + self.tau * x_dot + x_dot = x_dot + self.tau * xacc + theta = theta + self.tau * theta_dot + theta_dot = theta_dot + self.tau * thetaacc + else: # semi-implicit euler + x_dot = x_dot + self.tau * xacc + x = x + self.tau * x_dot + theta_dot = theta_dot + self.tau * thetaacc + theta = theta + self.tau * theta_dot + + self.state = (x, x_dot[0], theta, theta_dot[0]) + + terminated = bool( + x < -self.x_threshold + or x > self.x_threshold + or theta < -self.theta_threshold_radians + or theta > self.theta_threshold_radians + ) + + if not terminated: + reward = 1.0 + elif self.steps_beyond_terminated is None: + # Pole just fell! + self.steps_beyond_terminated = 0 + reward = 1.0 + else: + if self.steps_beyond_terminated == 0: + logger.warn( + "You are calling 'step()' even though this " + "environment has already returned terminated = True. You " + "should always call 'reset()' once you receive 'terminated = " + "True' -- any further steps are undefined behavior." + ) + self.steps_beyond_terminated += 1 + reward = 0.0 + + if self.render_mode == "human": + self.render() + + return np.array(self.state, dtype=np.float32), reward, terminated, False, {} + + def reset( + self, + *, + seed: Optional[int] = None, + options: Optional[dict] = None, + ): + super().reset(seed=seed) + # Note that if you use custom reset bounds, it may lead to out-of-bound + # state/observations. + low, high = utils.maybe_parse_reset_bounds( + options, -0.05, 0.05 # default low + ) # default high + self.state = self.np_random.uniform(low=low, high=high, size=(4,)) + self.steps_beyond_terminated = None + + if self.render_mode == "human": + self.render() + return np.array(self.state, dtype=np.float32), {} + + def render(self): + if self.render_mode is None: + gym.logger.warn( + "You are calling render method without specifying any render mode. " + "You can specify the render_mode at initialization, " + f'e.g. gym("{self.spec.id}", render_mode="rgb_array")' + ) + return + + try: + import pygame + from pygame import gfxdraw + except ImportError: + raise DependencyNotInstalled( + "pygame is not installed, run `pip install gym[classic_control]`" + ) + + if self.screen is None: + pygame.init() + if self.render_mode == "human": + pygame.display.init() + self.screen = pygame.display.set_mode( + (self.screen_width, self.screen_height) + ) + else: # mode == "rgb_array" + self.screen = pygame.Surface((self.screen_width, self.screen_height)) + if self.clock is None: + self.clock = pygame.time.Clock() + + world_width = self.x_threshold * 2 + scale = self.screen_width / world_width + polewidth = 10.0 + polelen = scale * (2 * self.length) + cartwidth = 50.0 + cartheight = 30.0 + + if self.state is None: + return None + + x = self.state + + self.surf = pygame.Surface((self.screen_width, self.screen_height)) + self.surf.fill((255, 255, 255)) + + l, r, t, b = -cartwidth / 2, cartwidth / 2, cartheight / 2, -cartheight / 2 + axleoffset = cartheight / 4.0 + cartx = x[0] * scale + self.screen_width / 2.0 # MIDDLE OF CART + carty = 100 # TOP OF CART + cart_coords = [(l, b), (l, t), (r, t), (r, b)] + cart_coords = [(c[0] + cartx, c[1] + carty) for c in cart_coords] + gfxdraw.aapolygon(self.surf, cart_coords, (0, 0, 0)) + gfxdraw.filled_polygon(self.surf, cart_coords, (0, 0, 0)) + + l, r, t, b = ( + -polewidth / 2, + polewidth / 2, + polelen - polewidth / 2, + -polewidth / 2, + ) + + pole_coords = [] + for coord in [(l, b), (l, t), (r, t), (r, b)]: + coord = pygame.math.Vector2(coord).rotate_rad(-x[2]) + coord = (coord[0] + cartx, coord[1] + carty + axleoffset) + pole_coords.append(coord) + gfxdraw.aapolygon(self.surf, pole_coords, (202, 152, 101)) + gfxdraw.filled_polygon(self.surf, pole_coords, (202, 152, 101)) + + gfxdraw.aacircle( + self.surf, + int(cartx), + int(carty + axleoffset), + int(polewidth / 2), + (129, 132, 203), + ) + gfxdraw.filled_circle( + self.surf, + int(cartx), + int(carty + axleoffset), + int(polewidth / 2), + (129, 132, 203), + ) + + gfxdraw.hline(self.surf, 0, self.screen_width, carty, (0, 0, 0)) + + self.surf = pygame.transform.flip(self.surf, False, True) + self.screen.blit(self.surf, (0, 0)) + if self.render_mode == "human": + pygame.event.pump() + self.clock.tick(self.metadata["render_fps"]) + pygame.display.flip() + + elif self.render_mode == "rgb_array": + return np.transpose( + np.array(pygame.surfarray.pixels3d(self.screen)), axes=(1, 0, 2) + ) + + def close(self): + if self.screen is not None: + import pygame + + pygame.display.quit() + pygame.quit() + self.isopen = False \ No newline at end of file diff --git a/src/active_bo_ros/active_bo_ros/ReinforcementLearning/Pendulum.py b/src/active_bo_ros/active_bo_ros/ReinforcementLearning/Pendulum.py index e69de29..758e676 100644 --- a/src/active_bo_ros/active_bo_ros/ReinforcementLearning/Pendulum.py +++ b/src/active_bo_ros/active_bo_ros/ReinforcementLearning/Pendulum.py @@ -0,0 +1,273 @@ +__credits__ = ["Carlos Luis"] + +from os import path +from typing import Optional + +import numpy as np + +import gym +from gym import spaces +from gym.envs.classic_control import utils +from gym.error import DependencyNotInstalled + +DEFAULT_X = np.pi +DEFAULT_Y = 1.0 + + +class PendulumEnv(gym.Env): + """ + ### Description + + The inverted pendulum swingup problem is based on the classic problem in control theory. + The system consists of a pendulum attached at one end to a fixed point, and the other end being free. + The pendulum starts in a random position and the goal is to apply torque on the free end to swing it + into an upright position, with its center of gravity right above the fixed point. + + The diagram below specifies the coordinate system used for the implementation of the pendulum's + dynamic equations. + + ![Pendulum Coordinate System](./diagrams/pendulum.png) + + - `x-y`: cartesian coordinates of the pendulum's end in meters. + - `theta` : angle in radians. + - `tau`: torque in `N m`. Defined as positive _counter-clockwise_. + + ### Action Space + + The action is a `ndarray` with shape `(1,)` representing the torque applied to free end of the pendulum. + + | Num | Action | Min | Max | + |-----|--------|------|-----| + | 0 | Torque | -1.0 | 1.0 | + + + ### Observation Space + + The observation is a `ndarray` with shape `(3,)` representing the x-y coordinates of the pendulum's free + end and its angular velocity. + + | Num | Observation | Min | Max | + |-----|------------------|------|-----| + | 0 | x = cos(theta) | -1.0 | 1.0 | + | 1 | y = sin(theta) | -1.0 | 1.0 | + | 2 | Angular Velocity | -8.0 | 8.0 | + + ### Rewards + + The reward function is defined as: + + *r = -(theta2 + 0.1 * theta_dt2 + 0.001 * torque2)* + + where `$\theta$` is the pendulum's angle normalized between *[-pi, pi]* (with 0 being in the upright position). + Based on the above equation, the minimum reward that can be obtained is + *-(pi2 + 0.1 * 82 + 0.001 * 22) = -16.2736044*, + while the maximum reward is zero (pendulum is upright with zero velocity and no torque applied). + + ### Starting State + + The starting state is a random angle in *[-pi, pi]* and a random angular velocity in *[-1,1]*. + + ### Episode Truncation + + The episode truncates at 200 time steps. + + ### Arguments + + - `g`: acceleration of gravity measured in *(m s-2)* used to calculate the pendulum dynamics. + The default value is g = 10.0 . + + ``` + gym.make('Pendulum-v1', g=9.81) + ``` + + ### Version History + + * v1: Simplify the math equations, no difference in behavior. + * v0: Initial versions release (1.0.0) + + """ + + metadata = { + "render_modes": ["human", "rgb_array"], + "render_fps": 30, + } + + def __init__(self, render_mode: Optional[str] = None, g=10.0): + self.max_speed = 8 + self.max_torque = 2.0 + self.dt = 0.05 + self.g = g + self.m = 1.0 + self.l = 1.0 + + self.render_mode = render_mode + + self.screen_dim = 500 + self.screen = None + self.clock = None + self.isopen = True + + high = np.array([1.0, 1.0, self.max_speed], dtype=np.float32) + # This will throw a warning in tests/envs/test_envs in utils/env_checker.py as the space is not symmetric + # or normalised as max_torque == 2 by default. Ignoring the issue here as the default settings are too old + # to update to follow the openai gym api + self.action_space = spaces.Box( + low=-self.max_torque, high=self.max_torque, shape=(1,), dtype=np.float32 + ) + self.observation_space = spaces.Box(low=-high, high=high, dtype=np.float32) + + def step(self, u): + th, thdot = self.state # th := theta + + g = self.g + m = self.m + l = self.l + dt = self.dt + + u = 2 * u # scaling the action to +/- 2 Nm + + u = np.clip(u, -self.max_torque, self.max_torque)[0] + self.last_u = u # for rendering + costs = angle_normalize(th) ** 2 + 0.1 * thdot**2 + 0.001 * (u**2) + + newthdot = thdot + (3 * g / (2 * l) * np.sin(th) + 3.0 / (m * l**2) * u) * dt + newthdot = np.clip(newthdot, -self.max_speed, self.max_speed) + newth = th + newthdot * dt + + self.state = np.array([newth, newthdot]) + + if self.render_mode == "human": + self.render() + return self._get_obs(), -costs, False, False, {} + + def reset(self, *, seed: Optional[int] = None, options: Optional[dict] = None): + super().reset(seed=seed) + if options is None: + high = np.array([DEFAULT_X, DEFAULT_Y]) + else: + # Note that if you use custom reset bounds, it may lead to out-of-bound + # state/observations. + x = options.get("x_init") if "x_init" in options else DEFAULT_X + y = options.get("y_init") if "y_init" in options else DEFAULT_Y + x = utils.verify_number_and_cast(x) + y = utils.verify_number_and_cast(y) + high = np.array([x, y]) + low = -high # We enforce symmetric limits. + self.state = self.np_random.uniform(low=low, high=high) + self.last_u = None + + if self.render_mode == "human": + self.render() + return self._get_obs(), {} + + def _get_obs(self): + theta, thetadot = self.state + return np.array([np.cos(theta), np.sin(theta), thetadot], dtype=np.float32) + + def render(self): + if self.render_mode is None: + gym.logger.warn( + "You are calling render method without specifying any render mode. " + "You can specify the render_mode at initialization, " + f'e.g. gym("{self.spec.id}", render_mode="rgb_array")' + ) + return + + try: + import pygame + from pygame import gfxdraw + except ImportError: + raise DependencyNotInstalled( + "pygame is not installed, run `pip install gym[classic_control]`" + ) + + if self.screen is None: + pygame.init() + if self.render_mode == "human": + pygame.display.init() + self.screen = pygame.display.set_mode( + (self.screen_dim, self.screen_dim) + ) + else: # mode in "rgb_array" + self.screen = pygame.Surface((self.screen_dim, self.screen_dim)) + if self.clock is None: + self.clock = pygame.time.Clock() + + self.surf = pygame.Surface((self.screen_dim, self.screen_dim)) + self.surf.fill((255, 255, 255)) + + bound = 2.2 + scale = self.screen_dim / (bound * 2) + offset = self.screen_dim // 2 + + rod_length = 1 * scale + rod_width = 0.2 * scale + l, r, t, b = 0, rod_length, rod_width / 2, -rod_width / 2 + coords = [(l, b), (l, t), (r, t), (r, b)] + transformed_coords = [] + for c in coords: + c = pygame.math.Vector2(c).rotate_rad(self.state[0] + np.pi / 2) + c = (c[0] + offset, c[1] + offset) + transformed_coords.append(c) + gfxdraw.aapolygon(self.surf, transformed_coords, (204, 77, 77)) + gfxdraw.filled_polygon(self.surf, transformed_coords, (204, 77, 77)) + + gfxdraw.aacircle(self.surf, offset, offset, int(rod_width / 2), (204, 77, 77)) + gfxdraw.filled_circle( + self.surf, offset, offset, int(rod_width / 2), (204, 77, 77) + ) + + rod_end = (rod_length, 0) + rod_end = pygame.math.Vector2(rod_end).rotate_rad(self.state[0] + np.pi / 2) + rod_end = (int(rod_end[0] + offset), int(rod_end[1] + offset)) + gfxdraw.aacircle( + self.surf, rod_end[0], rod_end[1], int(rod_width / 2), (204, 77, 77) + ) + gfxdraw.filled_circle( + self.surf, rod_end[0], rod_end[1], int(rod_width / 2), (204, 77, 77) + ) + + fname = path.join(path.dirname(__file__), "../../resource/clockwise.png") + img = pygame.image.load(fname) + if self.last_u is not None: + scale_img = pygame.transform.smoothscale( + img, + (scale * np.abs(self.last_u) / 2, scale * np.abs(self.last_u) / 2), + ) + is_flip = bool(self.last_u > 0) + scale_img = pygame.transform.flip(scale_img, is_flip, True) + self.surf.blit( + scale_img, + ( + offset - scale_img.get_rect().centerx, + offset - scale_img.get_rect().centery, + ), + ) + + # drawing axle + gfxdraw.aacircle(self.surf, offset, offset, int(0.05 * scale), (0, 0, 0)) + gfxdraw.filled_circle(self.surf, offset, offset, int(0.05 * scale), (0, 0, 0)) + + self.surf = pygame.transform.flip(self.surf, False, True) + self.screen.blit(self.surf, (0, 0)) + if self.render_mode == "human": + pygame.event.pump() + self.clock.tick(self.metadata["render_fps"]) + pygame.display.flip() + + else: # mode == "rgb_array": + return np.transpose( + np.array(pygame.surfarray.pixels3d(self.screen)), axes=(1, 0, 2) + ) + + def close(self): + if self.screen is not None: + import pygame + + pygame.display.quit() + pygame.quit() + self.isopen = False + + +def angle_normalize(x): + return ((x + np.pi) % (2 * np.pi)) - np.pi \ No newline at end of file diff --git a/src/active_bo_ros/resource/clockwise.png b/src/active_bo_ros/resource/clockwise.png new file mode 100644 index 0000000000000000000000000000000000000000..1aa423652b279e6bf31cd1f06bed6a9c421f9323 GIT binary patch literal 6992 zcmXwe2T)U8*L6rJf)r_@bRh`>0hQhak(NsnBE3uRL^{%f@Th=*TsjB{M3CNl3ko7l z6zL#Dsx;|MQ2zLS^G_y|n|1eIXWyMObJp64*3;FXqhY6kKp=DgS``CN{zX)j75AymGAjvCeEC2D3yN&A8RS_p)F4^UM$2z|ZQbvpsz7eML!j;I%$c-Qa>gU0TDsmIK0~wcEVvFaDh?Sf#4KpR3t~ErM4>Z|3EP-#@~Ott6?_<$XEte7vw$*GzAOPQaw0 zoj2;O##FSyb(Rg+3a{S0#dnOIGmu&6EW`qKo;(j{S;B6o6@WlM{UOi)N7OhN9TxKGU z`E^_ZN~Yn&!(@5%pBcPMDmW^MB=e%=#=ll=W;$6kvyOm|*$C}$V?bek)3EG1lWMqM zN(P}9y~t;-lgt3?o#5zzkJ)i!ua^t->ktHRBI|V4;QC_}R=8YXz&eqs67H8RNSG&c za~9FBp!{35GKnab3MNy+9~vz?q0LRv7~?0folE72{@jIhO~BG!gP2v9dNU2GG0N2h zA6q*l6iDRWoPZTgaM#1foTYX`&D;!gVh1XLb=y$F46dGZ_kv@xgP_4gH`*y=JM^1+d5d6A%h_#6csPSZEN zUSV1`BO3niD?6_Tx*>WY+8g%yG`k??-E2zrdD#%I8Oz9%u8&CHFIMyNatA~qS!wa z&pHr`?Dr!Kh;<7C^&7Cy1%de?G%^Y*ZJguHHRfd@n@-wYP0UrEqQLw%w#geB zRiHtmBGHHCL9&v-VY(J(g5yoI)%G^+T_dn%j)mAtx^a=oZvyl`V#iYSFfSKhw@tyL z93j#VQGg^@2%*q3@R>x=qP;4sJHtoChFmoY%d0#PxPE1Ko zD;Z4gvnQ%A0DF|1Zi%CI^m&uK`{Mgig1~7A7zl<$u{$oHG4dJIDE^$AO0NX8f&NwB z1;AU<KZZAQe!w_bQ3-9mZaEvFcLux2Xjq(g=S7gTPr z@c=If9Ka?xT5 zKEPF6p3zH3k#2KV8_@tq_>~mlxNvF_HA%R7LW1!BG2o4C@siD)>1K7%e;iZHc-0{6 zn@tIuJ1*n#q*A{}CuFsy)JaCc50hh~hu;SU6<$LKW~p^2;Z-8whj_PWM4$4=%j_%i zo^Zcd0o+6A@^AEpj+Lp41UTx5Ong$wU*2~H?6(NmYhf^rld97*E2I*{aT~bS|Njur zVWHv=UxLYF0nx*&IT?cLK>rH)!iR{t^naI*OjS`P;JN<4bxRo-dBkPq{{;^zcHe3q z?q?~0JNUt>;XyhT5QJb+0CVS3PRjq%L|t$}aQz&s0PMH*3O(@S&O=*Z@63-<@_JLp zXBvbH=Qf{WRmc-d<#u`#Ad?#o$Zi2`X=jpt6n<-Z|K%2!-Up^%&R@|2txNdRJ#fL# zh1#Nt7*)_9?lylqP6#UMCg9X$(T&a zbOgU(WBZHwG=+CD5PAxuxQ9Q?h)Y63DR}5JjX7=Kw$P=>e%Bv=Bwg&XtKF$z>tpVM zqh+N}m+5&F?_w`F5~)sM3PzA12D5b`UG+O{zrISCVZZJ^D0W7HP9#a z5WonBF`^o=3w(EsnL0muK^P$NX@<>k)&ou3=JhTyp{&aaEM?fG-AV@oQ9vhMOTl<+ z($yjNrd|fAqUjzQQqL1q-W4E`#w;9>C%sNSxQecFh6Oi4! zhtlwX7D_(sI*w%3_AToofk9>P)S?{YI)t%3`+{mA)JtTo8D9KCK5}`5MF%V*Yh?== zT-k(e8Z7JsnC|ag-afI&zNDMSiVK`0Qgxs~k#muD9$c0qP?xz&dr9q{HN~(nuo%Q^ zQt@T^$kd=#rpE`zEEH%3us^6nnBD|Bsxr_}QfCbsowdYfZ(CeaP?uE+|Hx4{wHLzy zvK*jno7eJwGXHhjp*ELQoI4mu!7uP7*^aA;Hl9VmglW9t@>tT5_4QtcU?MyST4fyD zrK&(B)A8&HoZ2P`uLlQm=-sExP&fRE=#p>!=)7of$%s}&!89qf8m57{s|k=ED!7hY zYtrI%1b;DiVlc>z>{Fsi7R(~GZGFcc|7C@Vh%z(DXr%Z@@u3BsH-zX509V{5JlJGRFzZ2>f|+4z}?;DXkI$|0LplL*+)&# z3#r5SGLZb_an+_JKJTvdE2D`QCJ=WXlrZp?BO6}CnENb{Z6}S87P(A?%Zm6AQ3*?+ z;;t3`SGD@PJ2G;>Uy7YgYVKObrc4RdpJEGXGaqs&|6U7}#(KRu*Qz3Q~+f5nX`w?l4CTZ|EDSnKz{r%bKq3vgZw|3}>I_F^< zyTvy5zV)7bze`9Yq~TZ>6+DHBTU~N+L7`EQbB+IHd&nv8j@C!(z~l7C+|s{KzLFp2 z($30$wqvuSipP70rkecv9X=7itHeVkYP%o%i<_zJXIjLY0Llvc&31v$)CyK$etvKO znajJOe*FtO>gs{n_DI(6+_Y^xGHii?ql=Zjr0u(W%1r?-v1$C-Q(wpmAEGxhu;-P< z`ysrzc25K%(pW?20rp`N<6%2wUl_44@4^IT0d=>0lfTv#bANy-PK>!Jc~o}vT8s|Z zf7^=Z6A!#EEeTvyA)IBExqDRAyl)3oJ|pW@*eM zD|LFk5rprAQxS#daOL&+Yw&b95B~6nWalL#e{AfbHb33{MpC1N{rOUBjdb}$u&u>C zG-+2am9e_v#LY<6Wpbd7ot4T3(AUBn2Ayj)-)VtntnG)#Hl{N@>gJ%|uOA}7Vl=Xk zs$UaKARowpBkCikla>~2n!jt6JOItA-Ks2HfV~bIS<(?Tt?Vm3nJ-S!D;Z}J@FenN z{0d3a_C_z4!urb&@@(ForI;4OStViv3p}5grp;XGT(Z)$%?1rd+iaStA_NAs!2qg( zE%Sen!9m#4_o~xB?lCQrCZa7I1#rU~(|WDU{W@SH#lRGpoc!%?mv=z2R}Fbsm1e*i1&@1Vy^t*>&!F^fb6_j(Wm9S{K_4+80%S3_k9H)~U8T?q1IbT< zs%9?>zcU9v`j}MBZZB5}flG#6-p`@tMtYs_xaFTV&CV*7E;C6Vwbd)m5-Q}b0eo9A zSK4`kzI>uG`U+Yohg4tQ*3v9dsVg|SqfU2G%m>^*2hS#B*3l042m)F!+{{=He1h=>wP2IM2a%;Mvth|6ENZu!5r%6g zr0!p{qw6aH;e@wc-nS}c=z;bvyiGHY0ejCidgNU9-L_vuk`|omeh$yk3&8#%@OKdv zNvAjX&hMO_R8Pukc=U1(&k7B85+KP4>sKVj@ZI6+ zF13i)Aj{%K7EKxQ>gMUcbjvwi)M{CQPNBL+aI^6b_9W2kSlCu}HMkkdx#hf&Hd{Hs zoR1f+R@x2O7s#+Zhy5Y6cR!8SbxOuphkzy(K`*sopL+L|Z6g>!BgRyGej(Gm)KP7Q zE$4*7g3nDQkhF647VSdXG*#R_r>Uhf>coMNtbIpw-Jjg3kV_0j^{8lv8R?aPXa&gg)IavMF!n>w^G8gH6Rg^PUZ{p zJY{75pO_up^5rw8GLt2yHKJD&_QiT8Yl`t_bx?mgOmm8qB*}oU1@MvZC&Vtt<@8|BsmLYBebN4=BIq3bM69@6eg4Y)h z!c~%)F=$XE(k!fAnAupRqQ!$@J(<(1OVkfBeRG=CJ{LTDQ5n`eSj4WhD?ipg{vIO5 z@c|K7SpAcIaALTmuppl&i3KT%lj!JCEIMy#)3sug!ji~?Wt}u}$ zMtvgLGuM_xvSr1l&mX$~oR1{b)~HSwUd3$Eh0w10Yh~La*NjXPpZzmf9zHy8CbLe2 z<=k^Ei#05@t9{O5gWkJ`C*iF1HiyoANtQ~DbxN{c*$neoJrynA9TG#w;0!6Uk2V-l zy-K$6B6onJT3UW-h~x!Je}Y3eyl4%7&(\+8odP_4r{w5p|#McwnYwrp{>%xdfe zNxZoy&2%cs7bQxcQsEgiFji_;zk0k>4M1iJJx$y8O|^#3e{n{bS- z(|WD8D^2<-_)>g1dyKAoed#$H8-1HlH=&a#9m;j|3hzbtviL(i_Z-zjOZ1Ov zKP^S`Wa8I{@&X?mG1(MxcKMeEv0Mn(M;-hCNB@%uxe?kV7vqL)W@9u9q&KkZk62BQ%+obIJ-;vV`<8?|HAslnw7JNls zzt-7tQWprAvFa{rpVX2O)TtzLnKe zxBBm%sOhgxJs)Sk6aWn#=dQ1rdRk|1R%@M3hc7m+&?(l%vJaJe+zzo&TVK*=_-&ra z^ZN^=0J;%E-5l3c=24zNH@8LpqD@>myfT_|_n>Gq*I@q4{S zd4u1Tp!g@;O|#UpDcK4Bgp{VFJfR&F zvD5j_ba=IB^^zdH?&Fq6;0YSWEz_CAfOIl~(4me89o;-)Ls8UvFM- zPrG7jQe3M=Tw08-ThCFt4D~mHQcu(3VHE{!+Y=51?)na>%cp3GPW9!$zCBCAIBCRK z|6dp}0k6Ax$!A*7z;w>RL=bnutX5B#QP@x(1V6 zJd~J(k53ls zJXo-jV>xDS+A47AXb&GQpYuz>J^j8b*&n;gc%^4rm{zQ!)_{~E@NB(msPy& zW2#|>c6fAnOMhJ<7@Avne#e)YD+@P+-ng6t$SDV}?X~d)ite!Czg}v5xP3Oy8|QO= zrD4y_;g-|t^fTZ09dRKl3gfRFy=E`Xius;DlQ^nP@=(@%ahCMt1q0{rfC`5VPXwK4 z=c~H7r>^$kW=JTWTQ~dVOoEX3#e}+43hH^MCxln9dMzrJFfxBOZ zZ92AB{ZaDn)0Nmb1%o z3w)1^%LL4{?b)mDuI2nVBIx3SOXMM~Nlc5POAn&#nwd|aYdMQGgVmRk?4{*jCZir8 z%RQ$0D1l>}8zMi+T-fKU`h1UQekDNXdAGF06*jdnI!IVA)S55*rC)$l;LFgm8#>oR zSVi%u`Zz)SSt^^}yo_RkQ@kNz0sDQw4rjfiFtemLg80ohV#Lxeu}F!zb;gA!JD4C-Lg;944qi_# zaR3c!8~Ey_a;Y{oroC;H}mm7+Iq-7p= zXs