Adding action network
This commit is contained in:
parent
13765c2f9e
commit
4e1ef89924
@ -48,6 +48,7 @@ def parse_args():
|
||||
parser.add_argument('--state_size', default=256, type=int)
|
||||
parser.add_argument('--hidden_size', default=128, type=int)
|
||||
parser.add_argument('--history_size', default=128, type=int)
|
||||
parser.add_argument('--num-units', type=int, default=200, help='num hidden units for reward/value/discount models')
|
||||
parser.add_argument('--load_encoder', default=None, type=str)
|
||||
parser.add_argument('--imagination_horizon', default=15, type=str)
|
||||
# eval
|
||||
@ -197,7 +198,7 @@ class DPI:
|
||||
if args.save_video:
|
||||
self.env.video.init(enabled=True)
|
||||
self.env_clean.video.init(enabled=True)
|
||||
|
||||
|
||||
for i in range(self.args.episode_length):
|
||||
action = self.env.action_space.sample()
|
||||
|
||||
@ -258,7 +259,7 @@ class DPI:
|
||||
self.actions, self.history, i)
|
||||
|
||||
|
||||
|
||||
print(past_encoder_loss, past_latent_loss)
|
||||
|
||||
previous_information_loss = past_latent_loss
|
||||
previous_encoder_loss = past_encoder_loss
|
||||
|
Loading…
Reference in New Issue
Block a user