repo
stringclasses 679
values | path
stringlengths 6
122
| func_name
stringlengths 2
76
| original_string
stringlengths 87
70.9k
| language
stringclasses 1
value | code
stringlengths 87
70.9k
| code_tokens
sequencelengths 20
6.91k
| docstring
stringlengths 1
21.7k
| docstring_tokens
sequencelengths 1
1.6k
| sha
stringclasses 679
values | url
stringlengths 92
213
| partition
stringclasses 1
value |
---|---|---|---|---|---|---|---|---|---|---|---|
openai/baselines | baselines/deepq/deepq.py | learn | def learn(env,
network,
seed=None,
lr=5e-4,
total_timesteps=100000,
buffer_size=50000,
exploration_fraction=0.1,
exploration_final_eps=0.02,
train_freq=1,
batch_size=32,
print_freq=100,
checkpoint_freq=10000,
checkpoint_path=None,
learning_starts=1000,
gamma=1.0,
target_network_update_freq=500,
prioritized_replay=False,
prioritized_replay_alpha=0.6,
prioritized_replay_beta0=0.4,
prioritized_replay_beta_iters=None,
prioritized_replay_eps=1e-6,
param_noise=False,
callback=None,
load_path=None,
**network_kwargs
):
"""Train a deepq model.
Parameters
-------
env: gym.Env
environment to train on
network: string or a function
neural network to use as a q function approximator. If string, has to be one of the names of registered models in baselines.common.models
(mlp, cnn, conv_only). If a function, should take an observation tensor and return a latent variable tensor, which
will be mapped to the Q function heads (see build_q_func in baselines.deepq.models for details on that)
seed: int or None
prng seed. The runs with the same seed "should" give the same results. If None, no seeding is used.
lr: float
learning rate for adam optimizer
total_timesteps: int
number of env steps to optimizer for
buffer_size: int
size of the replay buffer
exploration_fraction: float
fraction of entire training period over which the exploration rate is annealed
exploration_final_eps: float
final value of random action probability
train_freq: int
update the model every `train_freq` steps.
set to None to disable printing
batch_size: int
size of a batched sampled from replay buffer for training
print_freq: int
how often to print out training progress
set to None to disable printing
checkpoint_freq: int
how often to save the model. This is so that the best version is restored
at the end of the training. If you do not wish to restore the best version at
the end of the training set this variable to None.
learning_starts: int
how many steps of the model to collect transitions for before learning starts
gamma: float
discount factor
target_network_update_freq: int
update the target network every `target_network_update_freq` steps.
prioritized_replay: True
if True prioritized replay buffer will be used.
prioritized_replay_alpha: float
alpha parameter for prioritized replay buffer
prioritized_replay_beta0: float
initial value of beta for prioritized replay buffer
prioritized_replay_beta_iters: int
number of iterations over which beta will be annealed from initial value
to 1.0. If set to None equals to total_timesteps.
prioritized_replay_eps: float
epsilon to add to the TD errors when updating priorities.
param_noise: bool
whether or not to use parameter space noise (https://arxiv.org/abs/1706.01905)
callback: (locals, globals) -> None
function called at every steps with state of the algorithm.
If callback returns true training stops.
load_path: str
path to load the model from. (default: None)
**network_kwargs
additional keyword arguments to pass to the network builder.
Returns
-------
act: ActWrapper
Wrapper over act function. Adds ability to save it and load it.
See header of baselines/deepq/categorical.py for details on the act function.
"""
# Create all the functions necessary to train the model
sess = get_session()
set_global_seeds(seed)
q_func = build_q_func(network, **network_kwargs)
# capture the shape outside the closure so that the env object is not serialized
# by cloudpickle when serializing make_obs_ph
observation_space = env.observation_space
def make_obs_ph(name):
return ObservationInput(observation_space, name=name)
act, train, update_target, debug = deepq.build_train(
make_obs_ph=make_obs_ph,
q_func=q_func,
num_actions=env.action_space.n,
optimizer=tf.train.AdamOptimizer(learning_rate=lr),
gamma=gamma,
grad_norm_clipping=10,
param_noise=param_noise
)
act_params = {
'make_obs_ph': make_obs_ph,
'q_func': q_func,
'num_actions': env.action_space.n,
}
act = ActWrapper(act, act_params)
# Create the replay buffer
if prioritized_replay:
replay_buffer = PrioritizedReplayBuffer(buffer_size, alpha=prioritized_replay_alpha)
if prioritized_replay_beta_iters is None:
prioritized_replay_beta_iters = total_timesteps
beta_schedule = LinearSchedule(prioritized_replay_beta_iters,
initial_p=prioritized_replay_beta0,
final_p=1.0)
else:
replay_buffer = ReplayBuffer(buffer_size)
beta_schedule = None
# Create the schedule for exploration starting from 1.
exploration = LinearSchedule(schedule_timesteps=int(exploration_fraction * total_timesteps),
initial_p=1.0,
final_p=exploration_final_eps)
# Initialize the parameters and copy them to the target network.
U.initialize()
update_target()
episode_rewards = [0.0]
saved_mean_reward = None
obs = env.reset()
reset = True
with tempfile.TemporaryDirectory() as td:
td = checkpoint_path or td
model_file = os.path.join(td, "model")
model_saved = False
if tf.train.latest_checkpoint(td) is not None:
load_variables(model_file)
logger.log('Loaded model from {}'.format(model_file))
model_saved = True
elif load_path is not None:
load_variables(load_path)
logger.log('Loaded model from {}'.format(load_path))
for t in range(total_timesteps):
if callback is not None:
if callback(locals(), globals()):
break
# Take action and update exploration to the newest value
kwargs = {}
if not param_noise:
update_eps = exploration.value(t)
update_param_noise_threshold = 0.
else:
update_eps = 0.
# Compute the threshold such that the KL divergence between perturbed and non-perturbed
# policy is comparable to eps-greedy exploration with eps = exploration.value(t).
# See Appendix C.1 in Parameter Space Noise for Exploration, Plappert et al., 2017
# for detailed explanation.
update_param_noise_threshold = -np.log(1. - exploration.value(t) + exploration.value(t) / float(env.action_space.n))
kwargs['reset'] = reset
kwargs['update_param_noise_threshold'] = update_param_noise_threshold
kwargs['update_param_noise_scale'] = True
action = act(np.array(obs)[None], update_eps=update_eps, **kwargs)[0]
env_action = action
reset = False
new_obs, rew, done, _ = env.step(env_action)
# Store transition in the replay buffer.
replay_buffer.add(obs, action, rew, new_obs, float(done))
obs = new_obs
episode_rewards[-1] += rew
if done:
obs = env.reset()
episode_rewards.append(0.0)
reset = True
if t > learning_starts and t % train_freq == 0:
# Minimize the error in Bellman's equation on a batch sampled from replay buffer.
if prioritized_replay:
experience = replay_buffer.sample(batch_size, beta=beta_schedule.value(t))
(obses_t, actions, rewards, obses_tp1, dones, weights, batch_idxes) = experience
else:
obses_t, actions, rewards, obses_tp1, dones = replay_buffer.sample(batch_size)
weights, batch_idxes = np.ones_like(rewards), None
td_errors = train(obses_t, actions, rewards, obses_tp1, dones, weights)
if prioritized_replay:
new_priorities = np.abs(td_errors) + prioritized_replay_eps
replay_buffer.update_priorities(batch_idxes, new_priorities)
if t > learning_starts and t % target_network_update_freq == 0:
# Update target network periodically.
update_target()
mean_100ep_reward = round(np.mean(episode_rewards[-101:-1]), 1)
num_episodes = len(episode_rewards)
if done and print_freq is not None and len(episode_rewards) % print_freq == 0:
logger.record_tabular("steps", t)
logger.record_tabular("episodes", num_episodes)
logger.record_tabular("mean 100 episode reward", mean_100ep_reward)
logger.record_tabular("% time spent exploring", int(100 * exploration.value(t)))
logger.dump_tabular()
if (checkpoint_freq is not None and t > learning_starts and
num_episodes > 100 and t % checkpoint_freq == 0):
if saved_mean_reward is None or mean_100ep_reward > saved_mean_reward:
if print_freq is not None:
logger.log("Saving model due to mean reward increase: {} -> {}".format(
saved_mean_reward, mean_100ep_reward))
save_variables(model_file)
model_saved = True
saved_mean_reward = mean_100ep_reward
if model_saved:
if print_freq is not None:
logger.log("Restored model with mean reward: {}".format(saved_mean_reward))
load_variables(model_file)
return act | python | def learn(env,
network,
seed=None,
lr=5e-4,
total_timesteps=100000,
buffer_size=50000,
exploration_fraction=0.1,
exploration_final_eps=0.02,
train_freq=1,
batch_size=32,
print_freq=100,
checkpoint_freq=10000,
checkpoint_path=None,
learning_starts=1000,
gamma=1.0,
target_network_update_freq=500,
prioritized_replay=False,
prioritized_replay_alpha=0.6,
prioritized_replay_beta0=0.4,
prioritized_replay_beta_iters=None,
prioritized_replay_eps=1e-6,
param_noise=False,
callback=None,
load_path=None,
**network_kwargs
):
"""Train a deepq model.
Parameters
-------
env: gym.Env
environment to train on
network: string or a function
neural network to use as a q function approximator. If string, has to be one of the names of registered models in baselines.common.models
(mlp, cnn, conv_only). If a function, should take an observation tensor and return a latent variable tensor, which
will be mapped to the Q function heads (see build_q_func in baselines.deepq.models for details on that)
seed: int or None
prng seed. The runs with the same seed "should" give the same results. If None, no seeding is used.
lr: float
learning rate for adam optimizer
total_timesteps: int
number of env steps to optimizer for
buffer_size: int
size of the replay buffer
exploration_fraction: float
fraction of entire training period over which the exploration rate is annealed
exploration_final_eps: float
final value of random action probability
train_freq: int
update the model every `train_freq` steps.
set to None to disable printing
batch_size: int
size of a batched sampled from replay buffer for training
print_freq: int
how often to print out training progress
set to None to disable printing
checkpoint_freq: int
how often to save the model. This is so that the best version is restored
at the end of the training. If you do not wish to restore the best version at
the end of the training set this variable to None.
learning_starts: int
how many steps of the model to collect transitions for before learning starts
gamma: float
discount factor
target_network_update_freq: int
update the target network every `target_network_update_freq` steps.
prioritized_replay: True
if True prioritized replay buffer will be used.
prioritized_replay_alpha: float
alpha parameter for prioritized replay buffer
prioritized_replay_beta0: float
initial value of beta for prioritized replay buffer
prioritized_replay_beta_iters: int
number of iterations over which beta will be annealed from initial value
to 1.0. If set to None equals to total_timesteps.
prioritized_replay_eps: float
epsilon to add to the TD errors when updating priorities.
param_noise: bool
whether or not to use parameter space noise (https://arxiv.org/abs/1706.01905)
callback: (locals, globals) -> None
function called at every steps with state of the algorithm.
If callback returns true training stops.
load_path: str
path to load the model from. (default: None)
**network_kwargs
additional keyword arguments to pass to the network builder.
Returns
-------
act: ActWrapper
Wrapper over act function. Adds ability to save it and load it.
See header of baselines/deepq/categorical.py for details on the act function.
"""
# Create all the functions necessary to train the model
sess = get_session()
set_global_seeds(seed)
q_func = build_q_func(network, **network_kwargs)
# capture the shape outside the closure so that the env object is not serialized
# by cloudpickle when serializing make_obs_ph
observation_space = env.observation_space
def make_obs_ph(name):
return ObservationInput(observation_space, name=name)
act, train, update_target, debug = deepq.build_train(
make_obs_ph=make_obs_ph,
q_func=q_func,
num_actions=env.action_space.n,
optimizer=tf.train.AdamOptimizer(learning_rate=lr),
gamma=gamma,
grad_norm_clipping=10,
param_noise=param_noise
)
act_params = {
'make_obs_ph': make_obs_ph,
'q_func': q_func,
'num_actions': env.action_space.n,
}
act = ActWrapper(act, act_params)
# Create the replay buffer
if prioritized_replay:
replay_buffer = PrioritizedReplayBuffer(buffer_size, alpha=prioritized_replay_alpha)
if prioritized_replay_beta_iters is None:
prioritized_replay_beta_iters = total_timesteps
beta_schedule = LinearSchedule(prioritized_replay_beta_iters,
initial_p=prioritized_replay_beta0,
final_p=1.0)
else:
replay_buffer = ReplayBuffer(buffer_size)
beta_schedule = None
# Create the schedule for exploration starting from 1.
exploration = LinearSchedule(schedule_timesteps=int(exploration_fraction * total_timesteps),
initial_p=1.0,
final_p=exploration_final_eps)
# Initialize the parameters and copy them to the target network.
U.initialize()
update_target()
episode_rewards = [0.0]
saved_mean_reward = None
obs = env.reset()
reset = True
with tempfile.TemporaryDirectory() as td:
td = checkpoint_path or td
model_file = os.path.join(td, "model")
model_saved = False
if tf.train.latest_checkpoint(td) is not None:
load_variables(model_file)
logger.log('Loaded model from {}'.format(model_file))
model_saved = True
elif load_path is not None:
load_variables(load_path)
logger.log('Loaded model from {}'.format(load_path))
for t in range(total_timesteps):
if callback is not None:
if callback(locals(), globals()):
break
# Take action and update exploration to the newest value
kwargs = {}
if not param_noise:
update_eps = exploration.value(t)
update_param_noise_threshold = 0.
else:
update_eps = 0.
# Compute the threshold such that the KL divergence between perturbed and non-perturbed
# policy is comparable to eps-greedy exploration with eps = exploration.value(t).
# See Appendix C.1 in Parameter Space Noise for Exploration, Plappert et al., 2017
# for detailed explanation.
update_param_noise_threshold = -np.log(1. - exploration.value(t) + exploration.value(t) / float(env.action_space.n))
kwargs['reset'] = reset
kwargs['update_param_noise_threshold'] = update_param_noise_threshold
kwargs['update_param_noise_scale'] = True
action = act(np.array(obs)[None], update_eps=update_eps, **kwargs)[0]
env_action = action
reset = False
new_obs, rew, done, _ = env.step(env_action)
# Store transition in the replay buffer.
replay_buffer.add(obs, action, rew, new_obs, float(done))
obs = new_obs
episode_rewards[-1] += rew
if done:
obs = env.reset()
episode_rewards.append(0.0)
reset = True
if t > learning_starts and t % train_freq == 0:
# Minimize the error in Bellman's equation on a batch sampled from replay buffer.
if prioritized_replay:
experience = replay_buffer.sample(batch_size, beta=beta_schedule.value(t))
(obses_t, actions, rewards, obses_tp1, dones, weights, batch_idxes) = experience
else:
obses_t, actions, rewards, obses_tp1, dones = replay_buffer.sample(batch_size)
weights, batch_idxes = np.ones_like(rewards), None
td_errors = train(obses_t, actions, rewards, obses_tp1, dones, weights)
if prioritized_replay:
new_priorities = np.abs(td_errors) + prioritized_replay_eps
replay_buffer.update_priorities(batch_idxes, new_priorities)
if t > learning_starts and t % target_network_update_freq == 0:
# Update target network periodically.
update_target()
mean_100ep_reward = round(np.mean(episode_rewards[-101:-1]), 1)
num_episodes = len(episode_rewards)
if done and print_freq is not None and len(episode_rewards) % print_freq == 0:
logger.record_tabular("steps", t)
logger.record_tabular("episodes", num_episodes)
logger.record_tabular("mean 100 episode reward", mean_100ep_reward)
logger.record_tabular("% time spent exploring", int(100 * exploration.value(t)))
logger.dump_tabular()
if (checkpoint_freq is not None and t > learning_starts and
num_episodes > 100 and t % checkpoint_freq == 0):
if saved_mean_reward is None or mean_100ep_reward > saved_mean_reward:
if print_freq is not None:
logger.log("Saving model due to mean reward increase: {} -> {}".format(
saved_mean_reward, mean_100ep_reward))
save_variables(model_file)
model_saved = True
saved_mean_reward = mean_100ep_reward
if model_saved:
if print_freq is not None:
logger.log("Restored model with mean reward: {}".format(saved_mean_reward))
load_variables(model_file)
return act | [
"def",
"learn",
"(",
"env",
",",
"network",
",",
"seed",
"=",
"None",
",",
"lr",
"=",
"5e-4",
",",
"total_timesteps",
"=",
"100000",
",",
"buffer_size",
"=",
"50000",
",",
"exploration_fraction",
"=",
"0.1",
",",
"exploration_final_eps",
"=",
"0.02",
",",
"train_freq",
"=",
"1",
",",
"batch_size",
"=",
"32",
",",
"print_freq",
"=",
"100",
",",
"checkpoint_freq",
"=",
"10000",
",",
"checkpoint_path",
"=",
"None",
",",
"learning_starts",
"=",
"1000",
",",
"gamma",
"=",
"1.0",
",",
"target_network_update_freq",
"=",
"500",
",",
"prioritized_replay",
"=",
"False",
",",
"prioritized_replay_alpha",
"=",
"0.6",
",",
"prioritized_replay_beta0",
"=",
"0.4",
",",
"prioritized_replay_beta_iters",
"=",
"None",
",",
"prioritized_replay_eps",
"=",
"1e-6",
",",
"param_noise",
"=",
"False",
",",
"callback",
"=",
"None",
",",
"load_path",
"=",
"None",
",",
"*",
"*",
"network_kwargs",
")",
":",
"# Create all the functions necessary to train the model",
"sess",
"=",
"get_session",
"(",
")",
"set_global_seeds",
"(",
"seed",
")",
"q_func",
"=",
"build_q_func",
"(",
"network",
",",
"*",
"*",
"network_kwargs",
")",
"# capture the shape outside the closure so that the env object is not serialized",
"# by cloudpickle when serializing make_obs_ph",
"observation_space",
"=",
"env",
".",
"observation_space",
"def",
"make_obs_ph",
"(",
"name",
")",
":",
"return",
"ObservationInput",
"(",
"observation_space",
",",
"name",
"=",
"name",
")",
"act",
",",
"train",
",",
"update_target",
",",
"debug",
"=",
"deepq",
".",
"build_train",
"(",
"make_obs_ph",
"=",
"make_obs_ph",
",",
"q_func",
"=",
"q_func",
",",
"num_actions",
"=",
"env",
".",
"action_space",
".",
"n",
",",
"optimizer",
"=",
"tf",
".",
"train",
".",
"AdamOptimizer",
"(",
"learning_rate",
"=",
"lr",
")",
",",
"gamma",
"=",
"gamma",
",",
"grad_norm_clipping",
"=",
"10",
",",
"param_noise",
"=",
"param_noise",
")",
"act_params",
"=",
"{",
"'make_obs_ph'",
":",
"make_obs_ph",
",",
"'q_func'",
":",
"q_func",
",",
"'num_actions'",
":",
"env",
".",
"action_space",
".",
"n",
",",
"}",
"act",
"=",
"ActWrapper",
"(",
"act",
",",
"act_params",
")",
"# Create the replay buffer",
"if",
"prioritized_replay",
":",
"replay_buffer",
"=",
"PrioritizedReplayBuffer",
"(",
"buffer_size",
",",
"alpha",
"=",
"prioritized_replay_alpha",
")",
"if",
"prioritized_replay_beta_iters",
"is",
"None",
":",
"prioritized_replay_beta_iters",
"=",
"total_timesteps",
"beta_schedule",
"=",
"LinearSchedule",
"(",
"prioritized_replay_beta_iters",
",",
"initial_p",
"=",
"prioritized_replay_beta0",
",",
"final_p",
"=",
"1.0",
")",
"else",
":",
"replay_buffer",
"=",
"ReplayBuffer",
"(",
"buffer_size",
")",
"beta_schedule",
"=",
"None",
"# Create the schedule for exploration starting from 1.",
"exploration",
"=",
"LinearSchedule",
"(",
"schedule_timesteps",
"=",
"int",
"(",
"exploration_fraction",
"*",
"total_timesteps",
")",
",",
"initial_p",
"=",
"1.0",
",",
"final_p",
"=",
"exploration_final_eps",
")",
"# Initialize the parameters and copy them to the target network.",
"U",
".",
"initialize",
"(",
")",
"update_target",
"(",
")",
"episode_rewards",
"=",
"[",
"0.0",
"]",
"saved_mean_reward",
"=",
"None",
"obs",
"=",
"env",
".",
"reset",
"(",
")",
"reset",
"=",
"True",
"with",
"tempfile",
".",
"TemporaryDirectory",
"(",
")",
"as",
"td",
":",
"td",
"=",
"checkpoint_path",
"or",
"td",
"model_file",
"=",
"os",
".",
"path",
".",
"join",
"(",
"td",
",",
"\"model\"",
")",
"model_saved",
"=",
"False",
"if",
"tf",
".",
"train",
".",
"latest_checkpoint",
"(",
"td",
")",
"is",
"not",
"None",
":",
"load_variables",
"(",
"model_file",
")",
"logger",
".",
"log",
"(",
"'Loaded model from {}'",
".",
"format",
"(",
"model_file",
")",
")",
"model_saved",
"=",
"True",
"elif",
"load_path",
"is",
"not",
"None",
":",
"load_variables",
"(",
"load_path",
")",
"logger",
".",
"log",
"(",
"'Loaded model from {}'",
".",
"format",
"(",
"load_path",
")",
")",
"for",
"t",
"in",
"range",
"(",
"total_timesteps",
")",
":",
"if",
"callback",
"is",
"not",
"None",
":",
"if",
"callback",
"(",
"locals",
"(",
")",
",",
"globals",
"(",
")",
")",
":",
"break",
"# Take action and update exploration to the newest value",
"kwargs",
"=",
"{",
"}",
"if",
"not",
"param_noise",
":",
"update_eps",
"=",
"exploration",
".",
"value",
"(",
"t",
")",
"update_param_noise_threshold",
"=",
"0.",
"else",
":",
"update_eps",
"=",
"0.",
"# Compute the threshold such that the KL divergence between perturbed and non-perturbed",
"# policy is comparable to eps-greedy exploration with eps = exploration.value(t).",
"# See Appendix C.1 in Parameter Space Noise for Exploration, Plappert et al., 2017",
"# for detailed explanation.",
"update_param_noise_threshold",
"=",
"-",
"np",
".",
"log",
"(",
"1.",
"-",
"exploration",
".",
"value",
"(",
"t",
")",
"+",
"exploration",
".",
"value",
"(",
"t",
")",
"/",
"float",
"(",
"env",
".",
"action_space",
".",
"n",
")",
")",
"kwargs",
"[",
"'reset'",
"]",
"=",
"reset",
"kwargs",
"[",
"'update_param_noise_threshold'",
"]",
"=",
"update_param_noise_threshold",
"kwargs",
"[",
"'update_param_noise_scale'",
"]",
"=",
"True",
"action",
"=",
"act",
"(",
"np",
".",
"array",
"(",
"obs",
")",
"[",
"None",
"]",
",",
"update_eps",
"=",
"update_eps",
",",
"*",
"*",
"kwargs",
")",
"[",
"0",
"]",
"env_action",
"=",
"action",
"reset",
"=",
"False",
"new_obs",
",",
"rew",
",",
"done",
",",
"_",
"=",
"env",
".",
"step",
"(",
"env_action",
")",
"# Store transition in the replay buffer.",
"replay_buffer",
".",
"add",
"(",
"obs",
",",
"action",
",",
"rew",
",",
"new_obs",
",",
"float",
"(",
"done",
")",
")",
"obs",
"=",
"new_obs",
"episode_rewards",
"[",
"-",
"1",
"]",
"+=",
"rew",
"if",
"done",
":",
"obs",
"=",
"env",
".",
"reset",
"(",
")",
"episode_rewards",
".",
"append",
"(",
"0.0",
")",
"reset",
"=",
"True",
"if",
"t",
">",
"learning_starts",
"and",
"t",
"%",
"train_freq",
"==",
"0",
":",
"# Minimize the error in Bellman's equation on a batch sampled from replay buffer.",
"if",
"prioritized_replay",
":",
"experience",
"=",
"replay_buffer",
".",
"sample",
"(",
"batch_size",
",",
"beta",
"=",
"beta_schedule",
".",
"value",
"(",
"t",
")",
")",
"(",
"obses_t",
",",
"actions",
",",
"rewards",
",",
"obses_tp1",
",",
"dones",
",",
"weights",
",",
"batch_idxes",
")",
"=",
"experience",
"else",
":",
"obses_t",
",",
"actions",
",",
"rewards",
",",
"obses_tp1",
",",
"dones",
"=",
"replay_buffer",
".",
"sample",
"(",
"batch_size",
")",
"weights",
",",
"batch_idxes",
"=",
"np",
".",
"ones_like",
"(",
"rewards",
")",
",",
"None",
"td_errors",
"=",
"train",
"(",
"obses_t",
",",
"actions",
",",
"rewards",
",",
"obses_tp1",
",",
"dones",
",",
"weights",
")",
"if",
"prioritized_replay",
":",
"new_priorities",
"=",
"np",
".",
"abs",
"(",
"td_errors",
")",
"+",
"prioritized_replay_eps",
"replay_buffer",
".",
"update_priorities",
"(",
"batch_idxes",
",",
"new_priorities",
")",
"if",
"t",
">",
"learning_starts",
"and",
"t",
"%",
"target_network_update_freq",
"==",
"0",
":",
"# Update target network periodically.",
"update_target",
"(",
")",
"mean_100ep_reward",
"=",
"round",
"(",
"np",
".",
"mean",
"(",
"episode_rewards",
"[",
"-",
"101",
":",
"-",
"1",
"]",
")",
",",
"1",
")",
"num_episodes",
"=",
"len",
"(",
"episode_rewards",
")",
"if",
"done",
"and",
"print_freq",
"is",
"not",
"None",
"and",
"len",
"(",
"episode_rewards",
")",
"%",
"print_freq",
"==",
"0",
":",
"logger",
".",
"record_tabular",
"(",
"\"steps\"",
",",
"t",
")",
"logger",
".",
"record_tabular",
"(",
"\"episodes\"",
",",
"num_episodes",
")",
"logger",
".",
"record_tabular",
"(",
"\"mean 100 episode reward\"",
",",
"mean_100ep_reward",
")",
"logger",
".",
"record_tabular",
"(",
"\"% time spent exploring\"",
",",
"int",
"(",
"100",
"*",
"exploration",
".",
"value",
"(",
"t",
")",
")",
")",
"logger",
".",
"dump_tabular",
"(",
")",
"if",
"(",
"checkpoint_freq",
"is",
"not",
"None",
"and",
"t",
">",
"learning_starts",
"and",
"num_episodes",
">",
"100",
"and",
"t",
"%",
"checkpoint_freq",
"==",
"0",
")",
":",
"if",
"saved_mean_reward",
"is",
"None",
"or",
"mean_100ep_reward",
">",
"saved_mean_reward",
":",
"if",
"print_freq",
"is",
"not",
"None",
":",
"logger",
".",
"log",
"(",
"\"Saving model due to mean reward increase: {} -> {}\"",
".",
"format",
"(",
"saved_mean_reward",
",",
"mean_100ep_reward",
")",
")",
"save_variables",
"(",
"model_file",
")",
"model_saved",
"=",
"True",
"saved_mean_reward",
"=",
"mean_100ep_reward",
"if",
"model_saved",
":",
"if",
"print_freq",
"is",
"not",
"None",
":",
"logger",
".",
"log",
"(",
"\"Restored model with mean reward: {}\"",
".",
"format",
"(",
"saved_mean_reward",
")",
")",
"load_variables",
"(",
"model_file",
")",
"return",
"act"
] | Train a deepq model.
Parameters
-------
env: gym.Env
environment to train on
network: string or a function
neural network to use as a q function approximator. If string, has to be one of the names of registered models in baselines.common.models
(mlp, cnn, conv_only). If a function, should take an observation tensor and return a latent variable tensor, which
will be mapped to the Q function heads (see build_q_func in baselines.deepq.models for details on that)
seed: int or None
prng seed. The runs with the same seed "should" give the same results. If None, no seeding is used.
lr: float
learning rate for adam optimizer
total_timesteps: int
number of env steps to optimizer for
buffer_size: int
size of the replay buffer
exploration_fraction: float
fraction of entire training period over which the exploration rate is annealed
exploration_final_eps: float
final value of random action probability
train_freq: int
update the model every `train_freq` steps.
set to None to disable printing
batch_size: int
size of a batched sampled from replay buffer for training
print_freq: int
how often to print out training progress
set to None to disable printing
checkpoint_freq: int
how often to save the model. This is so that the best version is restored
at the end of the training. If you do not wish to restore the best version at
the end of the training set this variable to None.
learning_starts: int
how many steps of the model to collect transitions for before learning starts
gamma: float
discount factor
target_network_update_freq: int
update the target network every `target_network_update_freq` steps.
prioritized_replay: True
if True prioritized replay buffer will be used.
prioritized_replay_alpha: float
alpha parameter for prioritized replay buffer
prioritized_replay_beta0: float
initial value of beta for prioritized replay buffer
prioritized_replay_beta_iters: int
number of iterations over which beta will be annealed from initial value
to 1.0. If set to None equals to total_timesteps.
prioritized_replay_eps: float
epsilon to add to the TD errors when updating priorities.
param_noise: bool
whether or not to use parameter space noise (https://arxiv.org/abs/1706.01905)
callback: (locals, globals) -> None
function called at every steps with state of the algorithm.
If callback returns true training stops.
load_path: str
path to load the model from. (default: None)
**network_kwargs
additional keyword arguments to pass to the network builder.
Returns
-------
act: ActWrapper
Wrapper over act function. Adds ability to save it and load it.
See header of baselines/deepq/categorical.py for details on the act function. | [
"Train",
"a",
"deepq",
"model",
"."
] | 3301089b48c42b87b396e246ea3f56fa4bfc9678 | https://github.com/openai/baselines/blob/3301089b48c42b87b396e246ea3f56fa4bfc9678/baselines/deepq/deepq.py#L95-L333 | valid |
openai/baselines | baselines/deepq/deepq.py | ActWrapper.save_act | def save_act(self, path=None):
"""Save model to a pickle located at `path`"""
if path is None:
path = os.path.join(logger.get_dir(), "model.pkl")
with tempfile.TemporaryDirectory() as td:
save_variables(os.path.join(td, "model"))
arc_name = os.path.join(td, "packed.zip")
with zipfile.ZipFile(arc_name, 'w') as zipf:
for root, dirs, files in os.walk(td):
for fname in files:
file_path = os.path.join(root, fname)
if file_path != arc_name:
zipf.write(file_path, os.path.relpath(file_path, td))
with open(arc_name, "rb") as f:
model_data = f.read()
with open(path, "wb") as f:
cloudpickle.dump((model_data, self._act_params), f) | python | def save_act(self, path=None):
"""Save model to a pickle located at `path`"""
if path is None:
path = os.path.join(logger.get_dir(), "model.pkl")
with tempfile.TemporaryDirectory() as td:
save_variables(os.path.join(td, "model"))
arc_name = os.path.join(td, "packed.zip")
with zipfile.ZipFile(arc_name, 'w') as zipf:
for root, dirs, files in os.walk(td):
for fname in files:
file_path = os.path.join(root, fname)
if file_path != arc_name:
zipf.write(file_path, os.path.relpath(file_path, td))
with open(arc_name, "rb") as f:
model_data = f.read()
with open(path, "wb") as f:
cloudpickle.dump((model_data, self._act_params), f) | [
"def",
"save_act",
"(",
"self",
",",
"path",
"=",
"None",
")",
":",
"if",
"path",
"is",
"None",
":",
"path",
"=",
"os",
".",
"path",
".",
"join",
"(",
"logger",
".",
"get_dir",
"(",
")",
",",
"\"model.pkl\"",
")",
"with",
"tempfile",
".",
"TemporaryDirectory",
"(",
")",
"as",
"td",
":",
"save_variables",
"(",
"os",
".",
"path",
".",
"join",
"(",
"td",
",",
"\"model\"",
")",
")",
"arc_name",
"=",
"os",
".",
"path",
".",
"join",
"(",
"td",
",",
"\"packed.zip\"",
")",
"with",
"zipfile",
".",
"ZipFile",
"(",
"arc_name",
",",
"'w'",
")",
"as",
"zipf",
":",
"for",
"root",
",",
"dirs",
",",
"files",
"in",
"os",
".",
"walk",
"(",
"td",
")",
":",
"for",
"fname",
"in",
"files",
":",
"file_path",
"=",
"os",
".",
"path",
".",
"join",
"(",
"root",
",",
"fname",
")",
"if",
"file_path",
"!=",
"arc_name",
":",
"zipf",
".",
"write",
"(",
"file_path",
",",
"os",
".",
"path",
".",
"relpath",
"(",
"file_path",
",",
"td",
")",
")",
"with",
"open",
"(",
"arc_name",
",",
"\"rb\"",
")",
"as",
"f",
":",
"model_data",
"=",
"f",
".",
"read",
"(",
")",
"with",
"open",
"(",
"path",
",",
"\"wb\"",
")",
"as",
"f",
":",
"cloudpickle",
".",
"dump",
"(",
"(",
"model_data",
",",
"self",
".",
"_act_params",
")",
",",
"f",
")"
] | Save model to a pickle located at `path` | [
"Save",
"model",
"to",
"a",
"pickle",
"located",
"at",
"path"
] | 3301089b48c42b87b396e246ea3f56fa4bfc9678 | https://github.com/openai/baselines/blob/3301089b48c42b87b396e246ea3f56fa4bfc9678/baselines/deepq/deepq.py#L55-L72 | valid |
openai/baselines | baselines/common/models.py | nature_cnn | def nature_cnn(unscaled_images, **conv_kwargs):
"""
CNN from Nature paper.
"""
scaled_images = tf.cast(unscaled_images, tf.float32) / 255.
activ = tf.nn.relu
h = activ(conv(scaled_images, 'c1', nf=32, rf=8, stride=4, init_scale=np.sqrt(2),
**conv_kwargs))
h2 = activ(conv(h, 'c2', nf=64, rf=4, stride=2, init_scale=np.sqrt(2), **conv_kwargs))
h3 = activ(conv(h2, 'c3', nf=64, rf=3, stride=1, init_scale=np.sqrt(2), **conv_kwargs))
h3 = conv_to_fc(h3)
return activ(fc(h3, 'fc1', nh=512, init_scale=np.sqrt(2))) | python | def nature_cnn(unscaled_images, **conv_kwargs):
"""
CNN from Nature paper.
"""
scaled_images = tf.cast(unscaled_images, tf.float32) / 255.
activ = tf.nn.relu
h = activ(conv(scaled_images, 'c1', nf=32, rf=8, stride=4, init_scale=np.sqrt(2),
**conv_kwargs))
h2 = activ(conv(h, 'c2', nf=64, rf=4, stride=2, init_scale=np.sqrt(2), **conv_kwargs))
h3 = activ(conv(h2, 'c3', nf=64, rf=3, stride=1, init_scale=np.sqrt(2), **conv_kwargs))
h3 = conv_to_fc(h3)
return activ(fc(h3, 'fc1', nh=512, init_scale=np.sqrt(2))) | [
"def",
"nature_cnn",
"(",
"unscaled_images",
",",
"*",
"*",
"conv_kwargs",
")",
":",
"scaled_images",
"=",
"tf",
".",
"cast",
"(",
"unscaled_images",
",",
"tf",
".",
"float32",
")",
"/",
"255.",
"activ",
"=",
"tf",
".",
"nn",
".",
"relu",
"h",
"=",
"activ",
"(",
"conv",
"(",
"scaled_images",
",",
"'c1'",
",",
"nf",
"=",
"32",
",",
"rf",
"=",
"8",
",",
"stride",
"=",
"4",
",",
"init_scale",
"=",
"np",
".",
"sqrt",
"(",
"2",
")",
",",
"*",
"*",
"conv_kwargs",
")",
")",
"h2",
"=",
"activ",
"(",
"conv",
"(",
"h",
",",
"'c2'",
",",
"nf",
"=",
"64",
",",
"rf",
"=",
"4",
",",
"stride",
"=",
"2",
",",
"init_scale",
"=",
"np",
".",
"sqrt",
"(",
"2",
")",
",",
"*",
"*",
"conv_kwargs",
")",
")",
"h3",
"=",
"activ",
"(",
"conv",
"(",
"h2",
",",
"'c3'",
",",
"nf",
"=",
"64",
",",
"rf",
"=",
"3",
",",
"stride",
"=",
"1",
",",
"init_scale",
"=",
"np",
".",
"sqrt",
"(",
"2",
")",
",",
"*",
"*",
"conv_kwargs",
")",
")",
"h3",
"=",
"conv_to_fc",
"(",
"h3",
")",
"return",
"activ",
"(",
"fc",
"(",
"h3",
",",
"'fc1'",
",",
"nh",
"=",
"512",
",",
"init_scale",
"=",
"np",
".",
"sqrt",
"(",
"2",
")",
")",
")"
] | CNN from Nature paper. | [
"CNN",
"from",
"Nature",
"paper",
"."
] | 3301089b48c42b87b396e246ea3f56fa4bfc9678 | https://github.com/openai/baselines/blob/3301089b48c42b87b396e246ea3f56fa4bfc9678/baselines/common/models.py#L16-L27 | valid |
openai/baselines | baselines/common/models.py | mlp | def mlp(num_layers=2, num_hidden=64, activation=tf.tanh, layer_norm=False):
"""
Stack of fully-connected layers to be used in a policy / q-function approximator
Parameters:
----------
num_layers: int number of fully-connected layers (default: 2)
num_hidden: int size of fully-connected layers (default: 64)
activation: activation function (default: tf.tanh)
Returns:
-------
function that builds fully connected network with a given input tensor / placeholder
"""
def network_fn(X):
h = tf.layers.flatten(X)
for i in range(num_layers):
h = fc(h, 'mlp_fc{}'.format(i), nh=num_hidden, init_scale=np.sqrt(2))
if layer_norm:
h = tf.contrib.layers.layer_norm(h, center=True, scale=True)
h = activation(h)
return h
return network_fn | python | def mlp(num_layers=2, num_hidden=64, activation=tf.tanh, layer_norm=False):
"""
Stack of fully-connected layers to be used in a policy / q-function approximator
Parameters:
----------
num_layers: int number of fully-connected layers (default: 2)
num_hidden: int size of fully-connected layers (default: 64)
activation: activation function (default: tf.tanh)
Returns:
-------
function that builds fully connected network with a given input tensor / placeholder
"""
def network_fn(X):
h = tf.layers.flatten(X)
for i in range(num_layers):
h = fc(h, 'mlp_fc{}'.format(i), nh=num_hidden, init_scale=np.sqrt(2))
if layer_norm:
h = tf.contrib.layers.layer_norm(h, center=True, scale=True)
h = activation(h)
return h
return network_fn | [
"def",
"mlp",
"(",
"num_layers",
"=",
"2",
",",
"num_hidden",
"=",
"64",
",",
"activation",
"=",
"tf",
".",
"tanh",
",",
"layer_norm",
"=",
"False",
")",
":",
"def",
"network_fn",
"(",
"X",
")",
":",
"h",
"=",
"tf",
".",
"layers",
".",
"flatten",
"(",
"X",
")",
"for",
"i",
"in",
"range",
"(",
"num_layers",
")",
":",
"h",
"=",
"fc",
"(",
"h",
",",
"'mlp_fc{}'",
".",
"format",
"(",
"i",
")",
",",
"nh",
"=",
"num_hidden",
",",
"init_scale",
"=",
"np",
".",
"sqrt",
"(",
"2",
")",
")",
"if",
"layer_norm",
":",
"h",
"=",
"tf",
".",
"contrib",
".",
"layers",
".",
"layer_norm",
"(",
"h",
",",
"center",
"=",
"True",
",",
"scale",
"=",
"True",
")",
"h",
"=",
"activation",
"(",
"h",
")",
"return",
"h",
"return",
"network_fn"
] | Stack of fully-connected layers to be used in a policy / q-function approximator
Parameters:
----------
num_layers: int number of fully-connected layers (default: 2)
num_hidden: int size of fully-connected layers (default: 64)
activation: activation function (default: tf.tanh)
Returns:
-------
function that builds fully connected network with a given input tensor / placeholder | [
"Stack",
"of",
"fully",
"-",
"connected",
"layers",
"to",
"be",
"used",
"in",
"a",
"policy",
"/",
"q",
"-",
"function",
"approximator"
] | 3301089b48c42b87b396e246ea3f56fa4bfc9678 | https://github.com/openai/baselines/blob/3301089b48c42b87b396e246ea3f56fa4bfc9678/baselines/common/models.py#L31-L59 | valid |
openai/baselines | baselines/common/models.py | lstm | def lstm(nlstm=128, layer_norm=False):
"""
Builds LSTM (Long-Short Term Memory) network to be used in a policy.
Note that the resulting function returns not only the output of the LSTM
(i.e. hidden state of lstm for each step in the sequence), but also a dictionary
with auxiliary tensors to be set as policy attributes.
Specifically,
S is a placeholder to feed current state (LSTM state has to be managed outside policy)
M is a placeholder for the mask (used to mask out observations after the end of the episode, but can be used for other purposes too)
initial_state is a numpy array containing initial lstm state (usually zeros)
state is the output LSTM state (to be fed into S at the next call)
An example of usage of lstm-based policy can be found here: common/tests/test_doc_examples.py/test_lstm_example
Parameters:
----------
nlstm: int LSTM hidden state size
layer_norm: bool if True, layer-normalized version of LSTM is used
Returns:
-------
function that builds LSTM with a given input tensor / placeholder
"""
def network_fn(X, nenv=1):
nbatch = X.shape[0]
nsteps = nbatch // nenv
h = tf.layers.flatten(X)
M = tf.placeholder(tf.float32, [nbatch]) #mask (done t-1)
S = tf.placeholder(tf.float32, [nenv, 2*nlstm]) #states
xs = batch_to_seq(h, nenv, nsteps)
ms = batch_to_seq(M, nenv, nsteps)
if layer_norm:
h5, snew = utils.lnlstm(xs, ms, S, scope='lnlstm', nh=nlstm)
else:
h5, snew = utils.lstm(xs, ms, S, scope='lstm', nh=nlstm)
h = seq_to_batch(h5)
initial_state = np.zeros(S.shape.as_list(), dtype=float)
return h, {'S':S, 'M':M, 'state':snew, 'initial_state':initial_state}
return network_fn | python | def lstm(nlstm=128, layer_norm=False):
"""
Builds LSTM (Long-Short Term Memory) network to be used in a policy.
Note that the resulting function returns not only the output of the LSTM
(i.e. hidden state of lstm for each step in the sequence), but also a dictionary
with auxiliary tensors to be set as policy attributes.
Specifically,
S is a placeholder to feed current state (LSTM state has to be managed outside policy)
M is a placeholder for the mask (used to mask out observations after the end of the episode, but can be used for other purposes too)
initial_state is a numpy array containing initial lstm state (usually zeros)
state is the output LSTM state (to be fed into S at the next call)
An example of usage of lstm-based policy can be found here: common/tests/test_doc_examples.py/test_lstm_example
Parameters:
----------
nlstm: int LSTM hidden state size
layer_norm: bool if True, layer-normalized version of LSTM is used
Returns:
-------
function that builds LSTM with a given input tensor / placeholder
"""
def network_fn(X, nenv=1):
nbatch = X.shape[0]
nsteps = nbatch // nenv
h = tf.layers.flatten(X)
M = tf.placeholder(tf.float32, [nbatch]) #mask (done t-1)
S = tf.placeholder(tf.float32, [nenv, 2*nlstm]) #states
xs = batch_to_seq(h, nenv, nsteps)
ms = batch_to_seq(M, nenv, nsteps)
if layer_norm:
h5, snew = utils.lnlstm(xs, ms, S, scope='lnlstm', nh=nlstm)
else:
h5, snew = utils.lstm(xs, ms, S, scope='lstm', nh=nlstm)
h = seq_to_batch(h5)
initial_state = np.zeros(S.shape.as_list(), dtype=float)
return h, {'S':S, 'M':M, 'state':snew, 'initial_state':initial_state}
return network_fn | [
"def",
"lstm",
"(",
"nlstm",
"=",
"128",
",",
"layer_norm",
"=",
"False",
")",
":",
"def",
"network_fn",
"(",
"X",
",",
"nenv",
"=",
"1",
")",
":",
"nbatch",
"=",
"X",
".",
"shape",
"[",
"0",
"]",
"nsteps",
"=",
"nbatch",
"//",
"nenv",
"h",
"=",
"tf",
".",
"layers",
".",
"flatten",
"(",
"X",
")",
"M",
"=",
"tf",
".",
"placeholder",
"(",
"tf",
".",
"float32",
",",
"[",
"nbatch",
"]",
")",
"#mask (done t-1)",
"S",
"=",
"tf",
".",
"placeholder",
"(",
"tf",
".",
"float32",
",",
"[",
"nenv",
",",
"2",
"*",
"nlstm",
"]",
")",
"#states",
"xs",
"=",
"batch_to_seq",
"(",
"h",
",",
"nenv",
",",
"nsteps",
")",
"ms",
"=",
"batch_to_seq",
"(",
"M",
",",
"nenv",
",",
"nsteps",
")",
"if",
"layer_norm",
":",
"h5",
",",
"snew",
"=",
"utils",
".",
"lnlstm",
"(",
"xs",
",",
"ms",
",",
"S",
",",
"scope",
"=",
"'lnlstm'",
",",
"nh",
"=",
"nlstm",
")",
"else",
":",
"h5",
",",
"snew",
"=",
"utils",
".",
"lstm",
"(",
"xs",
",",
"ms",
",",
"S",
",",
"scope",
"=",
"'lstm'",
",",
"nh",
"=",
"nlstm",
")",
"h",
"=",
"seq_to_batch",
"(",
"h5",
")",
"initial_state",
"=",
"np",
".",
"zeros",
"(",
"S",
".",
"shape",
".",
"as_list",
"(",
")",
",",
"dtype",
"=",
"float",
")",
"return",
"h",
",",
"{",
"'S'",
":",
"S",
",",
"'M'",
":",
"M",
",",
"'state'",
":",
"snew",
",",
"'initial_state'",
":",
"initial_state",
"}",
"return",
"network_fn"
] | Builds LSTM (Long-Short Term Memory) network to be used in a policy.
Note that the resulting function returns not only the output of the LSTM
(i.e. hidden state of lstm for each step in the sequence), but also a dictionary
with auxiliary tensors to be set as policy attributes.
Specifically,
S is a placeholder to feed current state (LSTM state has to be managed outside policy)
M is a placeholder for the mask (used to mask out observations after the end of the episode, but can be used for other purposes too)
initial_state is a numpy array containing initial lstm state (usually zeros)
state is the output LSTM state (to be fed into S at the next call)
An example of usage of lstm-based policy can be found here: common/tests/test_doc_examples.py/test_lstm_example
Parameters:
----------
nlstm: int LSTM hidden state size
layer_norm: bool if True, layer-normalized version of LSTM is used
Returns:
-------
function that builds LSTM with a given input tensor / placeholder | [
"Builds",
"LSTM",
"(",
"Long",
"-",
"Short",
"Term",
"Memory",
")",
"network",
"to",
"be",
"used",
"in",
"a",
"policy",
".",
"Note",
"that",
"the",
"resulting",
"function",
"returns",
"not",
"only",
"the",
"output",
"of",
"the",
"LSTM",
"(",
"i",
".",
"e",
".",
"hidden",
"state",
"of",
"lstm",
"for",
"each",
"step",
"in",
"the",
"sequence",
")",
"but",
"also",
"a",
"dictionary",
"with",
"auxiliary",
"tensors",
"to",
"be",
"set",
"as",
"policy",
"attributes",
"."
] | 3301089b48c42b87b396e246ea3f56fa4bfc9678 | https://github.com/openai/baselines/blob/3301089b48c42b87b396e246ea3f56fa4bfc9678/baselines/common/models.py#L84-L135 | valid |
openai/baselines | baselines/common/models.py | conv_only | def conv_only(convs=[(32, 8, 4), (64, 4, 2), (64, 3, 1)], **conv_kwargs):
'''
convolutions-only net
Parameters:
----------
conv: list of triples (filter_number, filter_size, stride) specifying parameters for each layer.
Returns:
function that takes tensorflow tensor as input and returns the output of the last convolutional layer
'''
def network_fn(X):
out = tf.cast(X, tf.float32) / 255.
with tf.variable_scope("convnet"):
for num_outputs, kernel_size, stride in convs:
out = layers.convolution2d(out,
num_outputs=num_outputs,
kernel_size=kernel_size,
stride=stride,
activation_fn=tf.nn.relu,
**conv_kwargs)
return out
return network_fn | python | def conv_only(convs=[(32, 8, 4), (64, 4, 2), (64, 3, 1)], **conv_kwargs):
'''
convolutions-only net
Parameters:
----------
conv: list of triples (filter_number, filter_size, stride) specifying parameters for each layer.
Returns:
function that takes tensorflow tensor as input and returns the output of the last convolutional layer
'''
def network_fn(X):
out = tf.cast(X, tf.float32) / 255.
with tf.variable_scope("convnet"):
for num_outputs, kernel_size, stride in convs:
out = layers.convolution2d(out,
num_outputs=num_outputs,
kernel_size=kernel_size,
stride=stride,
activation_fn=tf.nn.relu,
**conv_kwargs)
return out
return network_fn | [
"def",
"conv_only",
"(",
"convs",
"=",
"[",
"(",
"32",
",",
"8",
",",
"4",
")",
",",
"(",
"64",
",",
"4",
",",
"2",
")",
",",
"(",
"64",
",",
"3",
",",
"1",
")",
"]",
",",
"*",
"*",
"conv_kwargs",
")",
":",
"def",
"network_fn",
"(",
"X",
")",
":",
"out",
"=",
"tf",
".",
"cast",
"(",
"X",
",",
"tf",
".",
"float32",
")",
"/",
"255.",
"with",
"tf",
".",
"variable_scope",
"(",
"\"convnet\"",
")",
":",
"for",
"num_outputs",
",",
"kernel_size",
",",
"stride",
"in",
"convs",
":",
"out",
"=",
"layers",
".",
"convolution2d",
"(",
"out",
",",
"num_outputs",
"=",
"num_outputs",
",",
"kernel_size",
"=",
"kernel_size",
",",
"stride",
"=",
"stride",
",",
"activation_fn",
"=",
"tf",
".",
"nn",
".",
"relu",
",",
"*",
"*",
"conv_kwargs",
")",
"return",
"out",
"return",
"network_fn"
] | convolutions-only net
Parameters:
----------
conv: list of triples (filter_number, filter_size, stride) specifying parameters for each layer.
Returns:
function that takes tensorflow tensor as input and returns the output of the last convolutional layer | [
"convolutions",
"-",
"only",
"net"
] | 3301089b48c42b87b396e246ea3f56fa4bfc9678 | https://github.com/openai/baselines/blob/3301089b48c42b87b396e246ea3f56fa4bfc9678/baselines/common/models.py#L171-L198 | valid |
openai/baselines | baselines/common/models.py | get_network_builder | def get_network_builder(name):
"""
If you want to register your own network outside models.py, you just need:
Usage Example:
-------------
from baselines.common.models import register
@register("your_network_name")
def your_network_define(**net_kwargs):
...
return network_fn
"""
if callable(name):
return name
elif name in mapping:
return mapping[name]
else:
raise ValueError('Unknown network type: {}'.format(name)) | python | def get_network_builder(name):
"""
If you want to register your own network outside models.py, you just need:
Usage Example:
-------------
from baselines.common.models import register
@register("your_network_name")
def your_network_define(**net_kwargs):
...
return network_fn
"""
if callable(name):
return name
elif name in mapping:
return mapping[name]
else:
raise ValueError('Unknown network type: {}'.format(name)) | [
"def",
"get_network_builder",
"(",
"name",
")",
":",
"if",
"callable",
"(",
"name",
")",
":",
"return",
"name",
"elif",
"name",
"in",
"mapping",
":",
"return",
"mapping",
"[",
"name",
"]",
"else",
":",
"raise",
"ValueError",
"(",
"'Unknown network type: {}'",
".",
"format",
"(",
"name",
")",
")"
] | If you want to register your own network outside models.py, you just need:
Usage Example:
-------------
from baselines.common.models import register
@register("your_network_name")
def your_network_define(**net_kwargs):
...
return network_fn | [
"If",
"you",
"want",
"to",
"register",
"your",
"own",
"network",
"outside",
"models",
".",
"py",
"you",
"just",
"need",
":"
] | 3301089b48c42b87b396e246ea3f56fa4bfc9678 | https://github.com/openai/baselines/blob/3301089b48c42b87b396e246ea3f56fa4bfc9678/baselines/common/models.py#L206-L224 | valid |
openai/baselines | baselines/deepq/models.py | mlp | def mlp(hiddens=[], layer_norm=False):
"""This model takes as input an observation and returns values of all actions.
Parameters
----------
hiddens: [int]
list of sizes of hidden layers
layer_norm: bool
if true applies layer normalization for every layer
as described in https://arxiv.org/abs/1607.06450
Returns
-------
q_func: function
q_function for DQN algorithm.
"""
return lambda *args, **kwargs: _mlp(hiddens, layer_norm=layer_norm, *args, **kwargs) | python | def mlp(hiddens=[], layer_norm=False):
"""This model takes as input an observation and returns values of all actions.
Parameters
----------
hiddens: [int]
list of sizes of hidden layers
layer_norm: bool
if true applies layer normalization for every layer
as described in https://arxiv.org/abs/1607.06450
Returns
-------
q_func: function
q_function for DQN algorithm.
"""
return lambda *args, **kwargs: _mlp(hiddens, layer_norm=layer_norm, *args, **kwargs) | [
"def",
"mlp",
"(",
"hiddens",
"=",
"[",
"]",
",",
"layer_norm",
"=",
"False",
")",
":",
"return",
"lambda",
"*",
"args",
",",
"*",
"*",
"kwargs",
":",
"_mlp",
"(",
"hiddens",
",",
"layer_norm",
"=",
"layer_norm",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")"
] | This model takes as input an observation and returns values of all actions.
Parameters
----------
hiddens: [int]
list of sizes of hidden layers
layer_norm: bool
if true applies layer normalization for every layer
as described in https://arxiv.org/abs/1607.06450
Returns
-------
q_func: function
q_function for DQN algorithm. | [
"This",
"model",
"takes",
"as",
"input",
"an",
"observation",
"and",
"returns",
"values",
"of",
"all",
"actions",
"."
] | 3301089b48c42b87b396e246ea3f56fa4bfc9678 | https://github.com/openai/baselines/blob/3301089b48c42b87b396e246ea3f56fa4bfc9678/baselines/deepq/models.py#L17-L33 | valid |
openai/baselines | baselines/deepq/models.py | cnn_to_mlp | def cnn_to_mlp(convs, hiddens, dueling=False, layer_norm=False):
"""This model takes as input an observation and returns values of all actions.
Parameters
----------
convs: [(int, int, int)]
list of convolutional layers in form of
(num_outputs, kernel_size, stride)
hiddens: [int]
list of sizes of hidden layers
dueling: bool
if true double the output MLP to compute a baseline
for action scores
layer_norm: bool
if true applies layer normalization for every layer
as described in https://arxiv.org/abs/1607.06450
Returns
-------
q_func: function
q_function for DQN algorithm.
"""
return lambda *args, **kwargs: _cnn_to_mlp(convs, hiddens, dueling, layer_norm=layer_norm, *args, **kwargs) | python | def cnn_to_mlp(convs, hiddens, dueling=False, layer_norm=False):
"""This model takes as input an observation and returns values of all actions.
Parameters
----------
convs: [(int, int, int)]
list of convolutional layers in form of
(num_outputs, kernel_size, stride)
hiddens: [int]
list of sizes of hidden layers
dueling: bool
if true double the output MLP to compute a baseline
for action scores
layer_norm: bool
if true applies layer normalization for every layer
as described in https://arxiv.org/abs/1607.06450
Returns
-------
q_func: function
q_function for DQN algorithm.
"""
return lambda *args, **kwargs: _cnn_to_mlp(convs, hiddens, dueling, layer_norm=layer_norm, *args, **kwargs) | [
"def",
"cnn_to_mlp",
"(",
"convs",
",",
"hiddens",
",",
"dueling",
"=",
"False",
",",
"layer_norm",
"=",
"False",
")",
":",
"return",
"lambda",
"*",
"args",
",",
"*",
"*",
"kwargs",
":",
"_cnn_to_mlp",
"(",
"convs",
",",
"hiddens",
",",
"dueling",
",",
"layer_norm",
"=",
"layer_norm",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")"
] | This model takes as input an observation and returns values of all actions.
Parameters
----------
convs: [(int, int, int)]
list of convolutional layers in form of
(num_outputs, kernel_size, stride)
hiddens: [int]
list of sizes of hidden layers
dueling: bool
if true double the output MLP to compute a baseline
for action scores
layer_norm: bool
if true applies layer normalization for every layer
as described in https://arxiv.org/abs/1607.06450
Returns
-------
q_func: function
q_function for DQN algorithm. | [
"This",
"model",
"takes",
"as",
"input",
"an",
"observation",
"and",
"returns",
"values",
"of",
"all",
"actions",
"."
] | 3301089b48c42b87b396e246ea3f56fa4bfc9678 | https://github.com/openai/baselines/blob/3301089b48c42b87b396e246ea3f56fa4bfc9678/baselines/deepq/models.py#L73-L96 | valid |
openai/baselines | baselines/common/cmd_util.py | make_vec_env | def make_vec_env(env_id, env_type, num_env, seed,
wrapper_kwargs=None,
start_index=0,
reward_scale=1.0,
flatten_dict_observations=True,
gamestate=None):
"""
Create a wrapped, monitored SubprocVecEnv for Atari and MuJoCo.
"""
wrapper_kwargs = wrapper_kwargs or {}
mpi_rank = MPI.COMM_WORLD.Get_rank() if MPI else 0
seed = seed + 10000 * mpi_rank if seed is not None else None
logger_dir = logger.get_dir()
def make_thunk(rank):
return lambda: make_env(
env_id=env_id,
env_type=env_type,
mpi_rank=mpi_rank,
subrank=rank,
seed=seed,
reward_scale=reward_scale,
gamestate=gamestate,
flatten_dict_observations=flatten_dict_observations,
wrapper_kwargs=wrapper_kwargs,
logger_dir=logger_dir
)
set_global_seeds(seed)
if num_env > 1:
return SubprocVecEnv([make_thunk(i + start_index) for i in range(num_env)])
else:
return DummyVecEnv([make_thunk(start_index)]) | python | def make_vec_env(env_id, env_type, num_env, seed,
wrapper_kwargs=None,
start_index=0,
reward_scale=1.0,
flatten_dict_observations=True,
gamestate=None):
"""
Create a wrapped, monitored SubprocVecEnv for Atari and MuJoCo.
"""
wrapper_kwargs = wrapper_kwargs or {}
mpi_rank = MPI.COMM_WORLD.Get_rank() if MPI else 0
seed = seed + 10000 * mpi_rank if seed is not None else None
logger_dir = logger.get_dir()
def make_thunk(rank):
return lambda: make_env(
env_id=env_id,
env_type=env_type,
mpi_rank=mpi_rank,
subrank=rank,
seed=seed,
reward_scale=reward_scale,
gamestate=gamestate,
flatten_dict_observations=flatten_dict_observations,
wrapper_kwargs=wrapper_kwargs,
logger_dir=logger_dir
)
set_global_seeds(seed)
if num_env > 1:
return SubprocVecEnv([make_thunk(i + start_index) for i in range(num_env)])
else:
return DummyVecEnv([make_thunk(start_index)]) | [
"def",
"make_vec_env",
"(",
"env_id",
",",
"env_type",
",",
"num_env",
",",
"seed",
",",
"wrapper_kwargs",
"=",
"None",
",",
"start_index",
"=",
"0",
",",
"reward_scale",
"=",
"1.0",
",",
"flatten_dict_observations",
"=",
"True",
",",
"gamestate",
"=",
"None",
")",
":",
"wrapper_kwargs",
"=",
"wrapper_kwargs",
"or",
"{",
"}",
"mpi_rank",
"=",
"MPI",
".",
"COMM_WORLD",
".",
"Get_rank",
"(",
")",
"if",
"MPI",
"else",
"0",
"seed",
"=",
"seed",
"+",
"10000",
"*",
"mpi_rank",
"if",
"seed",
"is",
"not",
"None",
"else",
"None",
"logger_dir",
"=",
"logger",
".",
"get_dir",
"(",
")",
"def",
"make_thunk",
"(",
"rank",
")",
":",
"return",
"lambda",
":",
"make_env",
"(",
"env_id",
"=",
"env_id",
",",
"env_type",
"=",
"env_type",
",",
"mpi_rank",
"=",
"mpi_rank",
",",
"subrank",
"=",
"rank",
",",
"seed",
"=",
"seed",
",",
"reward_scale",
"=",
"reward_scale",
",",
"gamestate",
"=",
"gamestate",
",",
"flatten_dict_observations",
"=",
"flatten_dict_observations",
",",
"wrapper_kwargs",
"=",
"wrapper_kwargs",
",",
"logger_dir",
"=",
"logger_dir",
")",
"set_global_seeds",
"(",
"seed",
")",
"if",
"num_env",
">",
"1",
":",
"return",
"SubprocVecEnv",
"(",
"[",
"make_thunk",
"(",
"i",
"+",
"start_index",
")",
"for",
"i",
"in",
"range",
"(",
"num_env",
")",
"]",
")",
"else",
":",
"return",
"DummyVecEnv",
"(",
"[",
"make_thunk",
"(",
"start_index",
")",
"]",
")"
] | Create a wrapped, monitored SubprocVecEnv for Atari and MuJoCo. | [
"Create",
"a",
"wrapped",
"monitored",
"SubprocVecEnv",
"for",
"Atari",
"and",
"MuJoCo",
"."
] | 3301089b48c42b87b396e246ea3f56fa4bfc9678 | https://github.com/openai/baselines/blob/3301089b48c42b87b396e246ea3f56fa4bfc9678/baselines/common/cmd_util.py#L21-L52 | valid |
openai/baselines | baselines/common/cmd_util.py | make_mujoco_env | def make_mujoco_env(env_id, seed, reward_scale=1.0):
"""
Create a wrapped, monitored gym.Env for MuJoCo.
"""
rank = MPI.COMM_WORLD.Get_rank()
myseed = seed + 1000 * rank if seed is not None else None
set_global_seeds(myseed)
env = gym.make(env_id)
logger_path = None if logger.get_dir() is None else os.path.join(logger.get_dir(), str(rank))
env = Monitor(env, logger_path, allow_early_resets=True)
env.seed(seed)
if reward_scale != 1.0:
from baselines.common.retro_wrappers import RewardScaler
env = RewardScaler(env, reward_scale)
return env | python | def make_mujoco_env(env_id, seed, reward_scale=1.0):
"""
Create a wrapped, monitored gym.Env for MuJoCo.
"""
rank = MPI.COMM_WORLD.Get_rank()
myseed = seed + 1000 * rank if seed is not None else None
set_global_seeds(myseed)
env = gym.make(env_id)
logger_path = None if logger.get_dir() is None else os.path.join(logger.get_dir(), str(rank))
env = Monitor(env, logger_path, allow_early_resets=True)
env.seed(seed)
if reward_scale != 1.0:
from baselines.common.retro_wrappers import RewardScaler
env = RewardScaler(env, reward_scale)
return env | [
"def",
"make_mujoco_env",
"(",
"env_id",
",",
"seed",
",",
"reward_scale",
"=",
"1.0",
")",
":",
"rank",
"=",
"MPI",
".",
"COMM_WORLD",
".",
"Get_rank",
"(",
")",
"myseed",
"=",
"seed",
"+",
"1000",
"*",
"rank",
"if",
"seed",
"is",
"not",
"None",
"else",
"None",
"set_global_seeds",
"(",
"myseed",
")",
"env",
"=",
"gym",
".",
"make",
"(",
"env_id",
")",
"logger_path",
"=",
"None",
"if",
"logger",
".",
"get_dir",
"(",
")",
"is",
"None",
"else",
"os",
".",
"path",
".",
"join",
"(",
"logger",
".",
"get_dir",
"(",
")",
",",
"str",
"(",
"rank",
")",
")",
"env",
"=",
"Monitor",
"(",
"env",
",",
"logger_path",
",",
"allow_early_resets",
"=",
"True",
")",
"env",
".",
"seed",
"(",
"seed",
")",
"if",
"reward_scale",
"!=",
"1.0",
":",
"from",
"baselines",
".",
"common",
".",
"retro_wrappers",
"import",
"RewardScaler",
"env",
"=",
"RewardScaler",
"(",
"env",
",",
"reward_scale",
")",
"return",
"env"
] | Create a wrapped, monitored gym.Env for MuJoCo. | [
"Create",
"a",
"wrapped",
"monitored",
"gym",
".",
"Env",
"for",
"MuJoCo",
"."
] | 3301089b48c42b87b396e246ea3f56fa4bfc9678 | https://github.com/openai/baselines/blob/3301089b48c42b87b396e246ea3f56fa4bfc9678/baselines/common/cmd_util.py#L88-L102 | valid |
openai/baselines | baselines/common/cmd_util.py | make_robotics_env | def make_robotics_env(env_id, seed, rank=0):
"""
Create a wrapped, monitored gym.Env for MuJoCo.
"""
set_global_seeds(seed)
env = gym.make(env_id)
env = FlattenDictWrapper(env, ['observation', 'desired_goal'])
env = Monitor(
env, logger.get_dir() and os.path.join(logger.get_dir(), str(rank)),
info_keywords=('is_success',))
env.seed(seed)
return env | python | def make_robotics_env(env_id, seed, rank=0):
"""
Create a wrapped, monitored gym.Env for MuJoCo.
"""
set_global_seeds(seed)
env = gym.make(env_id)
env = FlattenDictWrapper(env, ['observation', 'desired_goal'])
env = Monitor(
env, logger.get_dir() and os.path.join(logger.get_dir(), str(rank)),
info_keywords=('is_success',))
env.seed(seed)
return env | [
"def",
"make_robotics_env",
"(",
"env_id",
",",
"seed",
",",
"rank",
"=",
"0",
")",
":",
"set_global_seeds",
"(",
"seed",
")",
"env",
"=",
"gym",
".",
"make",
"(",
"env_id",
")",
"env",
"=",
"FlattenDictWrapper",
"(",
"env",
",",
"[",
"'observation'",
",",
"'desired_goal'",
"]",
")",
"env",
"=",
"Monitor",
"(",
"env",
",",
"logger",
".",
"get_dir",
"(",
")",
"and",
"os",
".",
"path",
".",
"join",
"(",
"logger",
".",
"get_dir",
"(",
")",
",",
"str",
"(",
"rank",
")",
")",
",",
"info_keywords",
"=",
"(",
"'is_success'",
",",
")",
")",
"env",
".",
"seed",
"(",
"seed",
")",
"return",
"env"
] | Create a wrapped, monitored gym.Env for MuJoCo. | [
"Create",
"a",
"wrapped",
"monitored",
"gym",
".",
"Env",
"for",
"MuJoCo",
"."
] | 3301089b48c42b87b396e246ea3f56fa4bfc9678 | https://github.com/openai/baselines/blob/3301089b48c42b87b396e246ea3f56fa4bfc9678/baselines/common/cmd_util.py#L104-L115 | valid |
openai/baselines | baselines/common/cmd_util.py | common_arg_parser | def common_arg_parser():
"""
Create an argparse.ArgumentParser for run_mujoco.py.
"""
parser = arg_parser()
parser.add_argument('--env', help='environment ID', type=str, default='Reacher-v2')
parser.add_argument('--env_type', help='type of environment, used when the environment type cannot be automatically determined', type=str)
parser.add_argument('--seed', help='RNG seed', type=int, default=None)
parser.add_argument('--alg', help='Algorithm', type=str, default='ppo2')
parser.add_argument('--num_timesteps', type=float, default=1e6),
parser.add_argument('--network', help='network type (mlp, cnn, lstm, cnn_lstm, conv_only)', default=None)
parser.add_argument('--gamestate', help='game state to load (so far only used in retro games)', default=None)
parser.add_argument('--num_env', help='Number of environment copies being run in parallel. When not specified, set to number of cpus for Atari, and to 1 for Mujoco', default=None, type=int)
parser.add_argument('--reward_scale', help='Reward scale factor. Default: 1.0', default=1.0, type=float)
parser.add_argument('--save_path', help='Path to save trained model to', default=None, type=str)
parser.add_argument('--save_video_interval', help='Save video every x steps (0 = disabled)', default=0, type=int)
parser.add_argument('--save_video_length', help='Length of recorded video. Default: 200', default=200, type=int)
parser.add_argument('--play', default=False, action='store_true')
return parser | python | def common_arg_parser():
"""
Create an argparse.ArgumentParser for run_mujoco.py.
"""
parser = arg_parser()
parser.add_argument('--env', help='environment ID', type=str, default='Reacher-v2')
parser.add_argument('--env_type', help='type of environment, used when the environment type cannot be automatically determined', type=str)
parser.add_argument('--seed', help='RNG seed', type=int, default=None)
parser.add_argument('--alg', help='Algorithm', type=str, default='ppo2')
parser.add_argument('--num_timesteps', type=float, default=1e6),
parser.add_argument('--network', help='network type (mlp, cnn, lstm, cnn_lstm, conv_only)', default=None)
parser.add_argument('--gamestate', help='game state to load (so far only used in retro games)', default=None)
parser.add_argument('--num_env', help='Number of environment copies being run in parallel. When not specified, set to number of cpus for Atari, and to 1 for Mujoco', default=None, type=int)
parser.add_argument('--reward_scale', help='Reward scale factor. Default: 1.0', default=1.0, type=float)
parser.add_argument('--save_path', help='Path to save trained model to', default=None, type=str)
parser.add_argument('--save_video_interval', help='Save video every x steps (0 = disabled)', default=0, type=int)
parser.add_argument('--save_video_length', help='Length of recorded video. Default: 200', default=200, type=int)
parser.add_argument('--play', default=False, action='store_true')
return parser | [
"def",
"common_arg_parser",
"(",
")",
":",
"parser",
"=",
"arg_parser",
"(",
")",
"parser",
".",
"add_argument",
"(",
"'--env'",
",",
"help",
"=",
"'environment ID'",
",",
"type",
"=",
"str",
",",
"default",
"=",
"'Reacher-v2'",
")",
"parser",
".",
"add_argument",
"(",
"'--env_type'",
",",
"help",
"=",
"'type of environment, used when the environment type cannot be automatically determined'",
",",
"type",
"=",
"str",
")",
"parser",
".",
"add_argument",
"(",
"'--seed'",
",",
"help",
"=",
"'RNG seed'",
",",
"type",
"=",
"int",
",",
"default",
"=",
"None",
")",
"parser",
".",
"add_argument",
"(",
"'--alg'",
",",
"help",
"=",
"'Algorithm'",
",",
"type",
"=",
"str",
",",
"default",
"=",
"'ppo2'",
")",
"parser",
".",
"add_argument",
"(",
"'--num_timesteps'",
",",
"type",
"=",
"float",
",",
"default",
"=",
"1e6",
")",
",",
"parser",
".",
"add_argument",
"(",
"'--network'",
",",
"help",
"=",
"'network type (mlp, cnn, lstm, cnn_lstm, conv_only)'",
",",
"default",
"=",
"None",
")",
"parser",
".",
"add_argument",
"(",
"'--gamestate'",
",",
"help",
"=",
"'game state to load (so far only used in retro games)'",
",",
"default",
"=",
"None",
")",
"parser",
".",
"add_argument",
"(",
"'--num_env'",
",",
"help",
"=",
"'Number of environment copies being run in parallel. When not specified, set to number of cpus for Atari, and to 1 for Mujoco'",
",",
"default",
"=",
"None",
",",
"type",
"=",
"int",
")",
"parser",
".",
"add_argument",
"(",
"'--reward_scale'",
",",
"help",
"=",
"'Reward scale factor. Default: 1.0'",
",",
"default",
"=",
"1.0",
",",
"type",
"=",
"float",
")",
"parser",
".",
"add_argument",
"(",
"'--save_path'",
",",
"help",
"=",
"'Path to save trained model to'",
",",
"default",
"=",
"None",
",",
"type",
"=",
"str",
")",
"parser",
".",
"add_argument",
"(",
"'--save_video_interval'",
",",
"help",
"=",
"'Save video every x steps (0 = disabled)'",
",",
"default",
"=",
"0",
",",
"type",
"=",
"int",
")",
"parser",
".",
"add_argument",
"(",
"'--save_video_length'",
",",
"help",
"=",
"'Length of recorded video. Default: 200'",
",",
"default",
"=",
"200",
",",
"type",
"=",
"int",
")",
"parser",
".",
"add_argument",
"(",
"'--play'",
",",
"default",
"=",
"False",
",",
"action",
"=",
"'store_true'",
")",
"return",
"parser"
] | Create an argparse.ArgumentParser for run_mujoco.py. | [
"Create",
"an",
"argparse",
".",
"ArgumentParser",
"for",
"run_mujoco",
".",
"py",
"."
] | 3301089b48c42b87b396e246ea3f56fa4bfc9678 | https://github.com/openai/baselines/blob/3301089b48c42b87b396e246ea3f56fa4bfc9678/baselines/common/cmd_util.py#L135-L153 | valid |
openai/baselines | baselines/common/cmd_util.py | robotics_arg_parser | def robotics_arg_parser():
"""
Create an argparse.ArgumentParser for run_mujoco.py.
"""
parser = arg_parser()
parser.add_argument('--env', help='environment ID', type=str, default='FetchReach-v0')
parser.add_argument('--seed', help='RNG seed', type=int, default=None)
parser.add_argument('--num-timesteps', type=int, default=int(1e6))
return parser | python | def robotics_arg_parser():
"""
Create an argparse.ArgumentParser for run_mujoco.py.
"""
parser = arg_parser()
parser.add_argument('--env', help='environment ID', type=str, default='FetchReach-v0')
parser.add_argument('--seed', help='RNG seed', type=int, default=None)
parser.add_argument('--num-timesteps', type=int, default=int(1e6))
return parser | [
"def",
"robotics_arg_parser",
"(",
")",
":",
"parser",
"=",
"arg_parser",
"(",
")",
"parser",
".",
"add_argument",
"(",
"'--env'",
",",
"help",
"=",
"'environment ID'",
",",
"type",
"=",
"str",
",",
"default",
"=",
"'FetchReach-v0'",
")",
"parser",
".",
"add_argument",
"(",
"'--seed'",
",",
"help",
"=",
"'RNG seed'",
",",
"type",
"=",
"int",
",",
"default",
"=",
"None",
")",
"parser",
".",
"add_argument",
"(",
"'--num-timesteps'",
",",
"type",
"=",
"int",
",",
"default",
"=",
"int",
"(",
"1e6",
")",
")",
"return",
"parser"
] | Create an argparse.ArgumentParser for run_mujoco.py. | [
"Create",
"an",
"argparse",
".",
"ArgumentParser",
"for",
"run_mujoco",
".",
"py",
"."
] | 3301089b48c42b87b396e246ea3f56fa4bfc9678 | https://github.com/openai/baselines/blob/3301089b48c42b87b396e246ea3f56fa4bfc9678/baselines/common/cmd_util.py#L155-L163 | valid |
openai/baselines | baselines/common/cmd_util.py | parse_unknown_args | def parse_unknown_args(args):
"""
Parse arguments not consumed by arg parser into a dicitonary
"""
retval = {}
preceded_by_key = False
for arg in args:
if arg.startswith('--'):
if '=' in arg:
key = arg.split('=')[0][2:]
value = arg.split('=')[1]
retval[key] = value
else:
key = arg[2:]
preceded_by_key = True
elif preceded_by_key:
retval[key] = arg
preceded_by_key = False
return retval | python | def parse_unknown_args(args):
"""
Parse arguments not consumed by arg parser into a dicitonary
"""
retval = {}
preceded_by_key = False
for arg in args:
if arg.startswith('--'):
if '=' in arg:
key = arg.split('=')[0][2:]
value = arg.split('=')[1]
retval[key] = value
else:
key = arg[2:]
preceded_by_key = True
elif preceded_by_key:
retval[key] = arg
preceded_by_key = False
return retval | [
"def",
"parse_unknown_args",
"(",
"args",
")",
":",
"retval",
"=",
"{",
"}",
"preceded_by_key",
"=",
"False",
"for",
"arg",
"in",
"args",
":",
"if",
"arg",
".",
"startswith",
"(",
"'--'",
")",
":",
"if",
"'='",
"in",
"arg",
":",
"key",
"=",
"arg",
".",
"split",
"(",
"'='",
")",
"[",
"0",
"]",
"[",
"2",
":",
"]",
"value",
"=",
"arg",
".",
"split",
"(",
"'='",
")",
"[",
"1",
"]",
"retval",
"[",
"key",
"]",
"=",
"value",
"else",
":",
"key",
"=",
"arg",
"[",
"2",
":",
"]",
"preceded_by_key",
"=",
"True",
"elif",
"preceded_by_key",
":",
"retval",
"[",
"key",
"]",
"=",
"arg",
"preceded_by_key",
"=",
"False",
"return",
"retval"
] | Parse arguments not consumed by arg parser into a dicitonary | [
"Parse",
"arguments",
"not",
"consumed",
"by",
"arg",
"parser",
"into",
"a",
"dicitonary"
] | 3301089b48c42b87b396e246ea3f56fa4bfc9678 | https://github.com/openai/baselines/blob/3301089b48c42b87b396e246ea3f56fa4bfc9678/baselines/common/cmd_util.py#L166-L185 | valid |
openai/baselines | baselines/common/vec_env/vec_env.py | clear_mpi_env_vars | def clear_mpi_env_vars():
"""
from mpi4py import MPI will call MPI_Init by default. If the child process has MPI environment variables, MPI will think that the child process is an MPI process just like the parent and do bad things such as hang.
This context manager is a hacky way to clear those environment variables temporarily such as when we are starting multiprocessing
Processes.
"""
removed_environment = {}
for k, v in list(os.environ.items()):
for prefix in ['OMPI_', 'PMI_']:
if k.startswith(prefix):
removed_environment[k] = v
del os.environ[k]
try:
yield
finally:
os.environ.update(removed_environment) | python | def clear_mpi_env_vars():
"""
from mpi4py import MPI will call MPI_Init by default. If the child process has MPI environment variables, MPI will think that the child process is an MPI process just like the parent and do bad things such as hang.
This context manager is a hacky way to clear those environment variables temporarily such as when we are starting multiprocessing
Processes.
"""
removed_environment = {}
for k, v in list(os.environ.items()):
for prefix in ['OMPI_', 'PMI_']:
if k.startswith(prefix):
removed_environment[k] = v
del os.environ[k]
try:
yield
finally:
os.environ.update(removed_environment) | [
"def",
"clear_mpi_env_vars",
"(",
")",
":",
"removed_environment",
"=",
"{",
"}",
"for",
"k",
",",
"v",
"in",
"list",
"(",
"os",
".",
"environ",
".",
"items",
"(",
")",
")",
":",
"for",
"prefix",
"in",
"[",
"'OMPI_'",
",",
"'PMI_'",
"]",
":",
"if",
"k",
".",
"startswith",
"(",
"prefix",
")",
":",
"removed_environment",
"[",
"k",
"]",
"=",
"v",
"del",
"os",
".",
"environ",
"[",
"k",
"]",
"try",
":",
"yield",
"finally",
":",
"os",
".",
"environ",
".",
"update",
"(",
"removed_environment",
")"
] | from mpi4py import MPI will call MPI_Init by default. If the child process has MPI environment variables, MPI will think that the child process is an MPI process just like the parent and do bad things such as hang.
This context manager is a hacky way to clear those environment variables temporarily such as when we are starting multiprocessing
Processes. | [
"from",
"mpi4py",
"import",
"MPI",
"will",
"call",
"MPI_Init",
"by",
"default",
".",
"If",
"the",
"child",
"process",
"has",
"MPI",
"environment",
"variables",
"MPI",
"will",
"think",
"that",
"the",
"child",
"process",
"is",
"an",
"MPI",
"process",
"just",
"like",
"the",
"parent",
"and",
"do",
"bad",
"things",
"such",
"as",
"hang",
".",
"This",
"context",
"manager",
"is",
"a",
"hacky",
"way",
"to",
"clear",
"those",
"environment",
"variables",
"temporarily",
"such",
"as",
"when",
"we",
"are",
"starting",
"multiprocessing",
"Processes",
"."
] | 3301089b48c42b87b396e246ea3f56fa4bfc9678 | https://github.com/openai/baselines/blob/3301089b48c42b87b396e246ea3f56fa4bfc9678/baselines/common/vec_env/vec_env.py#L204-L219 | valid |
openai/baselines | baselines/ppo2/ppo2.py | learn | def learn(*, network, env, total_timesteps, eval_env = None, seed=None, nsteps=2048, ent_coef=0.0, lr=3e-4,
vf_coef=0.5, max_grad_norm=0.5, gamma=0.99, lam=0.95,
log_interval=10, nminibatches=4, noptepochs=4, cliprange=0.2,
save_interval=0, load_path=None, model_fn=None, **network_kwargs):
'''
Learn policy using PPO algorithm (https://arxiv.org/abs/1707.06347)
Parameters:
----------
network: policy network architecture. Either string (mlp, lstm, lnlstm, cnn_lstm, cnn, cnn_small, conv_only - see baselines.common/models.py for full list)
specifying the standard network architecture, or a function that takes tensorflow tensor as input and returns
tuple (output_tensor, extra_feed) where output tensor is the last network layer output, extra_feed is None for feed-forward
neural nets, and extra_feed is a dictionary describing how to feed state into the network for recurrent neural nets.
See common/models.py/lstm for more details on using recurrent nets in policies
env: baselines.common.vec_env.VecEnv environment. Needs to be vectorized for parallel environment simulation.
The environments produced by gym.make can be wrapped using baselines.common.vec_env.DummyVecEnv class.
nsteps: int number of steps of the vectorized environment per update (i.e. batch size is nsteps * nenv where
nenv is number of environment copies simulated in parallel)
total_timesteps: int number of timesteps (i.e. number of actions taken in the environment)
ent_coef: float policy entropy coefficient in the optimization objective
lr: float or function learning rate, constant or a schedule function [0,1] -> R+ where 1 is beginning of the
training and 0 is the end of the training.
vf_coef: float value function loss coefficient in the optimization objective
max_grad_norm: float or None gradient norm clipping coefficient
gamma: float discounting factor
lam: float advantage estimation discounting factor (lambda in the paper)
log_interval: int number of timesteps between logging events
nminibatches: int number of training minibatches per update. For recurrent policies,
should be smaller or equal than number of environments run in parallel.
noptepochs: int number of training epochs per update
cliprange: float or function clipping range, constant or schedule function [0,1] -> R+ where 1 is beginning of the training
and 0 is the end of the training
save_interval: int number of timesteps between saving events
load_path: str path to load the model from
**network_kwargs: keyword arguments to the policy / network builder. See baselines.common/policies.py/build_policy and arguments to a particular type of network
For instance, 'mlp' network architecture has arguments num_hidden and num_layers.
'''
set_global_seeds(seed)
if isinstance(lr, float): lr = constfn(lr)
else: assert callable(lr)
if isinstance(cliprange, float): cliprange = constfn(cliprange)
else: assert callable(cliprange)
total_timesteps = int(total_timesteps)
policy = build_policy(env, network, **network_kwargs)
# Get the nb of env
nenvs = env.num_envs
# Get state_space and action_space
ob_space = env.observation_space
ac_space = env.action_space
# Calculate the batch_size
nbatch = nenvs * nsteps
nbatch_train = nbatch // nminibatches
# Instantiate the model object (that creates act_model and train_model)
if model_fn is None:
from baselines.ppo2.model import Model
model_fn = Model
model = model_fn(policy=policy, ob_space=ob_space, ac_space=ac_space, nbatch_act=nenvs, nbatch_train=nbatch_train,
nsteps=nsteps, ent_coef=ent_coef, vf_coef=vf_coef,
max_grad_norm=max_grad_norm)
if load_path is not None:
model.load(load_path)
# Instantiate the runner object
runner = Runner(env=env, model=model, nsteps=nsteps, gamma=gamma, lam=lam)
if eval_env is not None:
eval_runner = Runner(env = eval_env, model = model, nsteps = nsteps, gamma = gamma, lam= lam)
epinfobuf = deque(maxlen=100)
if eval_env is not None:
eval_epinfobuf = deque(maxlen=100)
# Start total timer
tfirststart = time.perf_counter()
nupdates = total_timesteps//nbatch
for update in range(1, nupdates+1):
assert nbatch % nminibatches == 0
# Start timer
tstart = time.perf_counter()
frac = 1.0 - (update - 1.0) / nupdates
# Calculate the learning rate
lrnow = lr(frac)
# Calculate the cliprange
cliprangenow = cliprange(frac)
# Get minibatch
obs, returns, masks, actions, values, neglogpacs, states, epinfos = runner.run() #pylint: disable=E0632
if eval_env is not None:
eval_obs, eval_returns, eval_masks, eval_actions, eval_values, eval_neglogpacs, eval_states, eval_epinfos = eval_runner.run() #pylint: disable=E0632
epinfobuf.extend(epinfos)
if eval_env is not None:
eval_epinfobuf.extend(eval_epinfos)
# Here what we're going to do is for each minibatch calculate the loss and append it.
mblossvals = []
if states is None: # nonrecurrent version
# Index of each element of batch_size
# Create the indices array
inds = np.arange(nbatch)
for _ in range(noptepochs):
# Randomize the indexes
np.random.shuffle(inds)
# 0 to batch_size with batch_train_size step
for start in range(0, nbatch, nbatch_train):
end = start + nbatch_train
mbinds = inds[start:end]
slices = (arr[mbinds] for arr in (obs, returns, masks, actions, values, neglogpacs))
mblossvals.append(model.train(lrnow, cliprangenow, *slices))
else: # recurrent version
assert nenvs % nminibatches == 0
envsperbatch = nenvs // nminibatches
envinds = np.arange(nenvs)
flatinds = np.arange(nenvs * nsteps).reshape(nenvs, nsteps)
for _ in range(noptepochs):
np.random.shuffle(envinds)
for start in range(0, nenvs, envsperbatch):
end = start + envsperbatch
mbenvinds = envinds[start:end]
mbflatinds = flatinds[mbenvinds].ravel()
slices = (arr[mbflatinds] for arr in (obs, returns, masks, actions, values, neglogpacs))
mbstates = states[mbenvinds]
mblossvals.append(model.train(lrnow, cliprangenow, *slices, mbstates))
# Feedforward --> get losses --> update
lossvals = np.mean(mblossvals, axis=0)
# End timer
tnow = time.perf_counter()
# Calculate the fps (frame per second)
fps = int(nbatch / (tnow - tstart))
if update % log_interval == 0 or update == 1:
# Calculates if value function is a good predicator of the returns (ev > 1)
# or if it's just worse than predicting nothing (ev =< 0)
ev = explained_variance(values, returns)
logger.logkv("serial_timesteps", update*nsteps)
logger.logkv("nupdates", update)
logger.logkv("total_timesteps", update*nbatch)
logger.logkv("fps", fps)
logger.logkv("explained_variance", float(ev))
logger.logkv('eprewmean', safemean([epinfo['r'] for epinfo in epinfobuf]))
logger.logkv('eplenmean', safemean([epinfo['l'] for epinfo in epinfobuf]))
if eval_env is not None:
logger.logkv('eval_eprewmean', safemean([epinfo['r'] for epinfo in eval_epinfobuf]) )
logger.logkv('eval_eplenmean', safemean([epinfo['l'] for epinfo in eval_epinfobuf]) )
logger.logkv('time_elapsed', tnow - tfirststart)
for (lossval, lossname) in zip(lossvals, model.loss_names):
logger.logkv(lossname, lossval)
if MPI is None or MPI.COMM_WORLD.Get_rank() == 0:
logger.dumpkvs()
if save_interval and (update % save_interval == 0 or update == 1) and logger.get_dir() and (MPI is None or MPI.COMM_WORLD.Get_rank() == 0):
checkdir = osp.join(logger.get_dir(), 'checkpoints')
os.makedirs(checkdir, exist_ok=True)
savepath = osp.join(checkdir, '%.5i'%update)
print('Saving to', savepath)
model.save(savepath)
return model | python | def learn(*, network, env, total_timesteps, eval_env = None, seed=None, nsteps=2048, ent_coef=0.0, lr=3e-4,
vf_coef=0.5, max_grad_norm=0.5, gamma=0.99, lam=0.95,
log_interval=10, nminibatches=4, noptepochs=4, cliprange=0.2,
save_interval=0, load_path=None, model_fn=None, **network_kwargs):
'''
Learn policy using PPO algorithm (https://arxiv.org/abs/1707.06347)
Parameters:
----------
network: policy network architecture. Either string (mlp, lstm, lnlstm, cnn_lstm, cnn, cnn_small, conv_only - see baselines.common/models.py for full list)
specifying the standard network architecture, or a function that takes tensorflow tensor as input and returns
tuple (output_tensor, extra_feed) where output tensor is the last network layer output, extra_feed is None for feed-forward
neural nets, and extra_feed is a dictionary describing how to feed state into the network for recurrent neural nets.
See common/models.py/lstm for more details on using recurrent nets in policies
env: baselines.common.vec_env.VecEnv environment. Needs to be vectorized for parallel environment simulation.
The environments produced by gym.make can be wrapped using baselines.common.vec_env.DummyVecEnv class.
nsteps: int number of steps of the vectorized environment per update (i.e. batch size is nsteps * nenv where
nenv is number of environment copies simulated in parallel)
total_timesteps: int number of timesteps (i.e. number of actions taken in the environment)
ent_coef: float policy entropy coefficient in the optimization objective
lr: float or function learning rate, constant or a schedule function [0,1] -> R+ where 1 is beginning of the
training and 0 is the end of the training.
vf_coef: float value function loss coefficient in the optimization objective
max_grad_norm: float or None gradient norm clipping coefficient
gamma: float discounting factor
lam: float advantage estimation discounting factor (lambda in the paper)
log_interval: int number of timesteps between logging events
nminibatches: int number of training minibatches per update. For recurrent policies,
should be smaller or equal than number of environments run in parallel.
noptepochs: int number of training epochs per update
cliprange: float or function clipping range, constant or schedule function [0,1] -> R+ where 1 is beginning of the training
and 0 is the end of the training
save_interval: int number of timesteps between saving events
load_path: str path to load the model from
**network_kwargs: keyword arguments to the policy / network builder. See baselines.common/policies.py/build_policy and arguments to a particular type of network
For instance, 'mlp' network architecture has arguments num_hidden and num_layers.
'''
set_global_seeds(seed)
if isinstance(lr, float): lr = constfn(lr)
else: assert callable(lr)
if isinstance(cliprange, float): cliprange = constfn(cliprange)
else: assert callable(cliprange)
total_timesteps = int(total_timesteps)
policy = build_policy(env, network, **network_kwargs)
# Get the nb of env
nenvs = env.num_envs
# Get state_space and action_space
ob_space = env.observation_space
ac_space = env.action_space
# Calculate the batch_size
nbatch = nenvs * nsteps
nbatch_train = nbatch // nminibatches
# Instantiate the model object (that creates act_model and train_model)
if model_fn is None:
from baselines.ppo2.model import Model
model_fn = Model
model = model_fn(policy=policy, ob_space=ob_space, ac_space=ac_space, nbatch_act=nenvs, nbatch_train=nbatch_train,
nsteps=nsteps, ent_coef=ent_coef, vf_coef=vf_coef,
max_grad_norm=max_grad_norm)
if load_path is not None:
model.load(load_path)
# Instantiate the runner object
runner = Runner(env=env, model=model, nsteps=nsteps, gamma=gamma, lam=lam)
if eval_env is not None:
eval_runner = Runner(env = eval_env, model = model, nsteps = nsteps, gamma = gamma, lam= lam)
epinfobuf = deque(maxlen=100)
if eval_env is not None:
eval_epinfobuf = deque(maxlen=100)
# Start total timer
tfirststart = time.perf_counter()
nupdates = total_timesteps//nbatch
for update in range(1, nupdates+1):
assert nbatch % nminibatches == 0
# Start timer
tstart = time.perf_counter()
frac = 1.0 - (update - 1.0) / nupdates
# Calculate the learning rate
lrnow = lr(frac)
# Calculate the cliprange
cliprangenow = cliprange(frac)
# Get minibatch
obs, returns, masks, actions, values, neglogpacs, states, epinfos = runner.run() #pylint: disable=E0632
if eval_env is not None:
eval_obs, eval_returns, eval_masks, eval_actions, eval_values, eval_neglogpacs, eval_states, eval_epinfos = eval_runner.run() #pylint: disable=E0632
epinfobuf.extend(epinfos)
if eval_env is not None:
eval_epinfobuf.extend(eval_epinfos)
# Here what we're going to do is for each minibatch calculate the loss and append it.
mblossvals = []
if states is None: # nonrecurrent version
# Index of each element of batch_size
# Create the indices array
inds = np.arange(nbatch)
for _ in range(noptepochs):
# Randomize the indexes
np.random.shuffle(inds)
# 0 to batch_size with batch_train_size step
for start in range(0, nbatch, nbatch_train):
end = start + nbatch_train
mbinds = inds[start:end]
slices = (arr[mbinds] for arr in (obs, returns, masks, actions, values, neglogpacs))
mblossvals.append(model.train(lrnow, cliprangenow, *slices))
else: # recurrent version
assert nenvs % nminibatches == 0
envsperbatch = nenvs // nminibatches
envinds = np.arange(nenvs)
flatinds = np.arange(nenvs * nsteps).reshape(nenvs, nsteps)
for _ in range(noptepochs):
np.random.shuffle(envinds)
for start in range(0, nenvs, envsperbatch):
end = start + envsperbatch
mbenvinds = envinds[start:end]
mbflatinds = flatinds[mbenvinds].ravel()
slices = (arr[mbflatinds] for arr in (obs, returns, masks, actions, values, neglogpacs))
mbstates = states[mbenvinds]
mblossvals.append(model.train(lrnow, cliprangenow, *slices, mbstates))
# Feedforward --> get losses --> update
lossvals = np.mean(mblossvals, axis=0)
# End timer
tnow = time.perf_counter()
# Calculate the fps (frame per second)
fps = int(nbatch / (tnow - tstart))
if update % log_interval == 0 or update == 1:
# Calculates if value function is a good predicator of the returns (ev > 1)
# or if it's just worse than predicting nothing (ev =< 0)
ev = explained_variance(values, returns)
logger.logkv("serial_timesteps", update*nsteps)
logger.logkv("nupdates", update)
logger.logkv("total_timesteps", update*nbatch)
logger.logkv("fps", fps)
logger.logkv("explained_variance", float(ev))
logger.logkv('eprewmean', safemean([epinfo['r'] for epinfo in epinfobuf]))
logger.logkv('eplenmean', safemean([epinfo['l'] for epinfo in epinfobuf]))
if eval_env is not None:
logger.logkv('eval_eprewmean', safemean([epinfo['r'] for epinfo in eval_epinfobuf]) )
logger.logkv('eval_eplenmean', safemean([epinfo['l'] for epinfo in eval_epinfobuf]) )
logger.logkv('time_elapsed', tnow - tfirststart)
for (lossval, lossname) in zip(lossvals, model.loss_names):
logger.logkv(lossname, lossval)
if MPI is None or MPI.COMM_WORLD.Get_rank() == 0:
logger.dumpkvs()
if save_interval and (update % save_interval == 0 or update == 1) and logger.get_dir() and (MPI is None or MPI.COMM_WORLD.Get_rank() == 0):
checkdir = osp.join(logger.get_dir(), 'checkpoints')
os.makedirs(checkdir, exist_ok=True)
savepath = osp.join(checkdir, '%.5i'%update)
print('Saving to', savepath)
model.save(savepath)
return model | [
"def",
"learn",
"(",
"*",
",",
"network",
",",
"env",
",",
"total_timesteps",
",",
"eval_env",
"=",
"None",
",",
"seed",
"=",
"None",
",",
"nsteps",
"=",
"2048",
",",
"ent_coef",
"=",
"0.0",
",",
"lr",
"=",
"3e-4",
",",
"vf_coef",
"=",
"0.5",
",",
"max_grad_norm",
"=",
"0.5",
",",
"gamma",
"=",
"0.99",
",",
"lam",
"=",
"0.95",
",",
"log_interval",
"=",
"10",
",",
"nminibatches",
"=",
"4",
",",
"noptepochs",
"=",
"4",
",",
"cliprange",
"=",
"0.2",
",",
"save_interval",
"=",
"0",
",",
"load_path",
"=",
"None",
",",
"model_fn",
"=",
"None",
",",
"*",
"*",
"network_kwargs",
")",
":",
"set_global_seeds",
"(",
"seed",
")",
"if",
"isinstance",
"(",
"lr",
",",
"float",
")",
":",
"lr",
"=",
"constfn",
"(",
"lr",
")",
"else",
":",
"assert",
"callable",
"(",
"lr",
")",
"if",
"isinstance",
"(",
"cliprange",
",",
"float",
")",
":",
"cliprange",
"=",
"constfn",
"(",
"cliprange",
")",
"else",
":",
"assert",
"callable",
"(",
"cliprange",
")",
"total_timesteps",
"=",
"int",
"(",
"total_timesteps",
")",
"policy",
"=",
"build_policy",
"(",
"env",
",",
"network",
",",
"*",
"*",
"network_kwargs",
")",
"# Get the nb of env",
"nenvs",
"=",
"env",
".",
"num_envs",
"# Get state_space and action_space",
"ob_space",
"=",
"env",
".",
"observation_space",
"ac_space",
"=",
"env",
".",
"action_space",
"# Calculate the batch_size",
"nbatch",
"=",
"nenvs",
"*",
"nsteps",
"nbatch_train",
"=",
"nbatch",
"//",
"nminibatches",
"# Instantiate the model object (that creates act_model and train_model)",
"if",
"model_fn",
"is",
"None",
":",
"from",
"baselines",
".",
"ppo2",
".",
"model",
"import",
"Model",
"model_fn",
"=",
"Model",
"model",
"=",
"model_fn",
"(",
"policy",
"=",
"policy",
",",
"ob_space",
"=",
"ob_space",
",",
"ac_space",
"=",
"ac_space",
",",
"nbatch_act",
"=",
"nenvs",
",",
"nbatch_train",
"=",
"nbatch_train",
",",
"nsteps",
"=",
"nsteps",
",",
"ent_coef",
"=",
"ent_coef",
",",
"vf_coef",
"=",
"vf_coef",
",",
"max_grad_norm",
"=",
"max_grad_norm",
")",
"if",
"load_path",
"is",
"not",
"None",
":",
"model",
".",
"load",
"(",
"load_path",
")",
"# Instantiate the runner object",
"runner",
"=",
"Runner",
"(",
"env",
"=",
"env",
",",
"model",
"=",
"model",
",",
"nsteps",
"=",
"nsteps",
",",
"gamma",
"=",
"gamma",
",",
"lam",
"=",
"lam",
")",
"if",
"eval_env",
"is",
"not",
"None",
":",
"eval_runner",
"=",
"Runner",
"(",
"env",
"=",
"eval_env",
",",
"model",
"=",
"model",
",",
"nsteps",
"=",
"nsteps",
",",
"gamma",
"=",
"gamma",
",",
"lam",
"=",
"lam",
")",
"epinfobuf",
"=",
"deque",
"(",
"maxlen",
"=",
"100",
")",
"if",
"eval_env",
"is",
"not",
"None",
":",
"eval_epinfobuf",
"=",
"deque",
"(",
"maxlen",
"=",
"100",
")",
"# Start total timer",
"tfirststart",
"=",
"time",
".",
"perf_counter",
"(",
")",
"nupdates",
"=",
"total_timesteps",
"//",
"nbatch",
"for",
"update",
"in",
"range",
"(",
"1",
",",
"nupdates",
"+",
"1",
")",
":",
"assert",
"nbatch",
"%",
"nminibatches",
"==",
"0",
"# Start timer",
"tstart",
"=",
"time",
".",
"perf_counter",
"(",
")",
"frac",
"=",
"1.0",
"-",
"(",
"update",
"-",
"1.0",
")",
"/",
"nupdates",
"# Calculate the learning rate",
"lrnow",
"=",
"lr",
"(",
"frac",
")",
"# Calculate the cliprange",
"cliprangenow",
"=",
"cliprange",
"(",
"frac",
")",
"# Get minibatch",
"obs",
",",
"returns",
",",
"masks",
",",
"actions",
",",
"values",
",",
"neglogpacs",
",",
"states",
",",
"epinfos",
"=",
"runner",
".",
"run",
"(",
")",
"#pylint: disable=E0632",
"if",
"eval_env",
"is",
"not",
"None",
":",
"eval_obs",
",",
"eval_returns",
",",
"eval_masks",
",",
"eval_actions",
",",
"eval_values",
",",
"eval_neglogpacs",
",",
"eval_states",
",",
"eval_epinfos",
"=",
"eval_runner",
".",
"run",
"(",
")",
"#pylint: disable=E0632",
"epinfobuf",
".",
"extend",
"(",
"epinfos",
")",
"if",
"eval_env",
"is",
"not",
"None",
":",
"eval_epinfobuf",
".",
"extend",
"(",
"eval_epinfos",
")",
"# Here what we're going to do is for each minibatch calculate the loss and append it.",
"mblossvals",
"=",
"[",
"]",
"if",
"states",
"is",
"None",
":",
"# nonrecurrent version",
"# Index of each element of batch_size",
"# Create the indices array",
"inds",
"=",
"np",
".",
"arange",
"(",
"nbatch",
")",
"for",
"_",
"in",
"range",
"(",
"noptepochs",
")",
":",
"# Randomize the indexes",
"np",
".",
"random",
".",
"shuffle",
"(",
"inds",
")",
"# 0 to batch_size with batch_train_size step",
"for",
"start",
"in",
"range",
"(",
"0",
",",
"nbatch",
",",
"nbatch_train",
")",
":",
"end",
"=",
"start",
"+",
"nbatch_train",
"mbinds",
"=",
"inds",
"[",
"start",
":",
"end",
"]",
"slices",
"=",
"(",
"arr",
"[",
"mbinds",
"]",
"for",
"arr",
"in",
"(",
"obs",
",",
"returns",
",",
"masks",
",",
"actions",
",",
"values",
",",
"neglogpacs",
")",
")",
"mblossvals",
".",
"append",
"(",
"model",
".",
"train",
"(",
"lrnow",
",",
"cliprangenow",
",",
"*",
"slices",
")",
")",
"else",
":",
"# recurrent version",
"assert",
"nenvs",
"%",
"nminibatches",
"==",
"0",
"envsperbatch",
"=",
"nenvs",
"//",
"nminibatches",
"envinds",
"=",
"np",
".",
"arange",
"(",
"nenvs",
")",
"flatinds",
"=",
"np",
".",
"arange",
"(",
"nenvs",
"*",
"nsteps",
")",
".",
"reshape",
"(",
"nenvs",
",",
"nsteps",
")",
"for",
"_",
"in",
"range",
"(",
"noptepochs",
")",
":",
"np",
".",
"random",
".",
"shuffle",
"(",
"envinds",
")",
"for",
"start",
"in",
"range",
"(",
"0",
",",
"nenvs",
",",
"envsperbatch",
")",
":",
"end",
"=",
"start",
"+",
"envsperbatch",
"mbenvinds",
"=",
"envinds",
"[",
"start",
":",
"end",
"]",
"mbflatinds",
"=",
"flatinds",
"[",
"mbenvinds",
"]",
".",
"ravel",
"(",
")",
"slices",
"=",
"(",
"arr",
"[",
"mbflatinds",
"]",
"for",
"arr",
"in",
"(",
"obs",
",",
"returns",
",",
"masks",
",",
"actions",
",",
"values",
",",
"neglogpacs",
")",
")",
"mbstates",
"=",
"states",
"[",
"mbenvinds",
"]",
"mblossvals",
".",
"append",
"(",
"model",
".",
"train",
"(",
"lrnow",
",",
"cliprangenow",
",",
"*",
"slices",
",",
"mbstates",
")",
")",
"# Feedforward --> get losses --> update",
"lossvals",
"=",
"np",
".",
"mean",
"(",
"mblossvals",
",",
"axis",
"=",
"0",
")",
"# End timer",
"tnow",
"=",
"time",
".",
"perf_counter",
"(",
")",
"# Calculate the fps (frame per second)",
"fps",
"=",
"int",
"(",
"nbatch",
"/",
"(",
"tnow",
"-",
"tstart",
")",
")",
"if",
"update",
"%",
"log_interval",
"==",
"0",
"or",
"update",
"==",
"1",
":",
"# Calculates if value function is a good predicator of the returns (ev > 1)",
"# or if it's just worse than predicting nothing (ev =< 0)",
"ev",
"=",
"explained_variance",
"(",
"values",
",",
"returns",
")",
"logger",
".",
"logkv",
"(",
"\"serial_timesteps\"",
",",
"update",
"*",
"nsteps",
")",
"logger",
".",
"logkv",
"(",
"\"nupdates\"",
",",
"update",
")",
"logger",
".",
"logkv",
"(",
"\"total_timesteps\"",
",",
"update",
"*",
"nbatch",
")",
"logger",
".",
"logkv",
"(",
"\"fps\"",
",",
"fps",
")",
"logger",
".",
"logkv",
"(",
"\"explained_variance\"",
",",
"float",
"(",
"ev",
")",
")",
"logger",
".",
"logkv",
"(",
"'eprewmean'",
",",
"safemean",
"(",
"[",
"epinfo",
"[",
"'r'",
"]",
"for",
"epinfo",
"in",
"epinfobuf",
"]",
")",
")",
"logger",
".",
"logkv",
"(",
"'eplenmean'",
",",
"safemean",
"(",
"[",
"epinfo",
"[",
"'l'",
"]",
"for",
"epinfo",
"in",
"epinfobuf",
"]",
")",
")",
"if",
"eval_env",
"is",
"not",
"None",
":",
"logger",
".",
"logkv",
"(",
"'eval_eprewmean'",
",",
"safemean",
"(",
"[",
"epinfo",
"[",
"'r'",
"]",
"for",
"epinfo",
"in",
"eval_epinfobuf",
"]",
")",
")",
"logger",
".",
"logkv",
"(",
"'eval_eplenmean'",
",",
"safemean",
"(",
"[",
"epinfo",
"[",
"'l'",
"]",
"for",
"epinfo",
"in",
"eval_epinfobuf",
"]",
")",
")",
"logger",
".",
"logkv",
"(",
"'time_elapsed'",
",",
"tnow",
"-",
"tfirststart",
")",
"for",
"(",
"lossval",
",",
"lossname",
")",
"in",
"zip",
"(",
"lossvals",
",",
"model",
".",
"loss_names",
")",
":",
"logger",
".",
"logkv",
"(",
"lossname",
",",
"lossval",
")",
"if",
"MPI",
"is",
"None",
"or",
"MPI",
".",
"COMM_WORLD",
".",
"Get_rank",
"(",
")",
"==",
"0",
":",
"logger",
".",
"dumpkvs",
"(",
")",
"if",
"save_interval",
"and",
"(",
"update",
"%",
"save_interval",
"==",
"0",
"or",
"update",
"==",
"1",
")",
"and",
"logger",
".",
"get_dir",
"(",
")",
"and",
"(",
"MPI",
"is",
"None",
"or",
"MPI",
".",
"COMM_WORLD",
".",
"Get_rank",
"(",
")",
"==",
"0",
")",
":",
"checkdir",
"=",
"osp",
".",
"join",
"(",
"logger",
".",
"get_dir",
"(",
")",
",",
"'checkpoints'",
")",
"os",
".",
"makedirs",
"(",
"checkdir",
",",
"exist_ok",
"=",
"True",
")",
"savepath",
"=",
"osp",
".",
"join",
"(",
"checkdir",
",",
"'%.5i'",
"%",
"update",
")",
"print",
"(",
"'Saving to'",
",",
"savepath",
")",
"model",
".",
"save",
"(",
"savepath",
")",
"return",
"model"
] | Learn policy using PPO algorithm (https://arxiv.org/abs/1707.06347)
Parameters:
----------
network: policy network architecture. Either string (mlp, lstm, lnlstm, cnn_lstm, cnn, cnn_small, conv_only - see baselines.common/models.py for full list)
specifying the standard network architecture, or a function that takes tensorflow tensor as input and returns
tuple (output_tensor, extra_feed) where output tensor is the last network layer output, extra_feed is None for feed-forward
neural nets, and extra_feed is a dictionary describing how to feed state into the network for recurrent neural nets.
See common/models.py/lstm for more details on using recurrent nets in policies
env: baselines.common.vec_env.VecEnv environment. Needs to be vectorized for parallel environment simulation.
The environments produced by gym.make can be wrapped using baselines.common.vec_env.DummyVecEnv class.
nsteps: int number of steps of the vectorized environment per update (i.e. batch size is nsteps * nenv where
nenv is number of environment copies simulated in parallel)
total_timesteps: int number of timesteps (i.e. number of actions taken in the environment)
ent_coef: float policy entropy coefficient in the optimization objective
lr: float or function learning rate, constant or a schedule function [0,1] -> R+ where 1 is beginning of the
training and 0 is the end of the training.
vf_coef: float value function loss coefficient in the optimization objective
max_grad_norm: float or None gradient norm clipping coefficient
gamma: float discounting factor
lam: float advantage estimation discounting factor (lambda in the paper)
log_interval: int number of timesteps between logging events
nminibatches: int number of training minibatches per update. For recurrent policies,
should be smaller or equal than number of environments run in parallel.
noptepochs: int number of training epochs per update
cliprange: float or function clipping range, constant or schedule function [0,1] -> R+ where 1 is beginning of the training
and 0 is the end of the training
save_interval: int number of timesteps between saving events
load_path: str path to load the model from
**network_kwargs: keyword arguments to the policy / network builder. See baselines.common/policies.py/build_policy and arguments to a particular type of network
For instance, 'mlp' network architecture has arguments num_hidden and num_layers. | [
"Learn",
"policy",
"using",
"PPO",
"algorithm",
"(",
"https",
":",
"//",
"arxiv",
".",
"org",
"/",
"abs",
"/",
"1707",
".",
"06347",
")"
] | 3301089b48c42b87b396e246ea3f56fa4bfc9678 | https://github.com/openai/baselines/blob/3301089b48c42b87b396e246ea3f56fa4bfc9678/baselines/ppo2/ppo2.py#L21-L204 | valid |
openai/baselines | baselines/common/cg.py | cg | def cg(f_Ax, b, cg_iters=10, callback=None, verbose=False, residual_tol=1e-10):
"""
Demmel p 312
"""
p = b.copy()
r = b.copy()
x = np.zeros_like(b)
rdotr = r.dot(r)
fmtstr = "%10i %10.3g %10.3g"
titlestr = "%10s %10s %10s"
if verbose: print(titlestr % ("iter", "residual norm", "soln norm"))
for i in range(cg_iters):
if callback is not None:
callback(x)
if verbose: print(fmtstr % (i, rdotr, np.linalg.norm(x)))
z = f_Ax(p)
v = rdotr / p.dot(z)
x += v*p
r -= v*z
newrdotr = r.dot(r)
mu = newrdotr/rdotr
p = r + mu*p
rdotr = newrdotr
if rdotr < residual_tol:
break
if callback is not None:
callback(x)
if verbose: print(fmtstr % (i+1, rdotr, np.linalg.norm(x))) # pylint: disable=W0631
return x | python | def cg(f_Ax, b, cg_iters=10, callback=None, verbose=False, residual_tol=1e-10):
"""
Demmel p 312
"""
p = b.copy()
r = b.copy()
x = np.zeros_like(b)
rdotr = r.dot(r)
fmtstr = "%10i %10.3g %10.3g"
titlestr = "%10s %10s %10s"
if verbose: print(titlestr % ("iter", "residual norm", "soln norm"))
for i in range(cg_iters):
if callback is not None:
callback(x)
if verbose: print(fmtstr % (i, rdotr, np.linalg.norm(x)))
z = f_Ax(p)
v = rdotr / p.dot(z)
x += v*p
r -= v*z
newrdotr = r.dot(r)
mu = newrdotr/rdotr
p = r + mu*p
rdotr = newrdotr
if rdotr < residual_tol:
break
if callback is not None:
callback(x)
if verbose: print(fmtstr % (i+1, rdotr, np.linalg.norm(x))) # pylint: disable=W0631
return x | [
"def",
"cg",
"(",
"f_Ax",
",",
"b",
",",
"cg_iters",
"=",
"10",
",",
"callback",
"=",
"None",
",",
"verbose",
"=",
"False",
",",
"residual_tol",
"=",
"1e-10",
")",
":",
"p",
"=",
"b",
".",
"copy",
"(",
")",
"r",
"=",
"b",
".",
"copy",
"(",
")",
"x",
"=",
"np",
".",
"zeros_like",
"(",
"b",
")",
"rdotr",
"=",
"r",
".",
"dot",
"(",
"r",
")",
"fmtstr",
"=",
"\"%10i %10.3g %10.3g\"",
"titlestr",
"=",
"\"%10s %10s %10s\"",
"if",
"verbose",
":",
"print",
"(",
"titlestr",
"%",
"(",
"\"iter\"",
",",
"\"residual norm\"",
",",
"\"soln norm\"",
")",
")",
"for",
"i",
"in",
"range",
"(",
"cg_iters",
")",
":",
"if",
"callback",
"is",
"not",
"None",
":",
"callback",
"(",
"x",
")",
"if",
"verbose",
":",
"print",
"(",
"fmtstr",
"%",
"(",
"i",
",",
"rdotr",
",",
"np",
".",
"linalg",
".",
"norm",
"(",
"x",
")",
")",
")",
"z",
"=",
"f_Ax",
"(",
"p",
")",
"v",
"=",
"rdotr",
"/",
"p",
".",
"dot",
"(",
"z",
")",
"x",
"+=",
"v",
"*",
"p",
"r",
"-=",
"v",
"*",
"z",
"newrdotr",
"=",
"r",
".",
"dot",
"(",
"r",
")",
"mu",
"=",
"newrdotr",
"/",
"rdotr",
"p",
"=",
"r",
"+",
"mu",
"*",
"p",
"rdotr",
"=",
"newrdotr",
"if",
"rdotr",
"<",
"residual_tol",
":",
"break",
"if",
"callback",
"is",
"not",
"None",
":",
"callback",
"(",
"x",
")",
"if",
"verbose",
":",
"print",
"(",
"fmtstr",
"%",
"(",
"i",
"+",
"1",
",",
"rdotr",
",",
"np",
".",
"linalg",
".",
"norm",
"(",
"x",
")",
")",
")",
"# pylint: disable=W0631",
"return",
"x"
] | Demmel p 312 | [
"Demmel",
"p",
"312"
] | 3301089b48c42b87b396e246ea3f56fa4bfc9678 | https://github.com/openai/baselines/blob/3301089b48c42b87b396e246ea3f56fa4bfc9678/baselines/common/cg.py#L2-L34 | valid |
openai/baselines | baselines/common/input.py | observation_placeholder | def observation_placeholder(ob_space, batch_size=None, name='Ob'):
'''
Create placeholder to feed observations into of the size appropriate to the observation space
Parameters:
----------
ob_space: gym.Space observation space
batch_size: int size of the batch to be fed into input. Can be left None in most cases.
name: str name of the placeholder
Returns:
-------
tensorflow placeholder tensor
'''
assert isinstance(ob_space, Discrete) or isinstance(ob_space, Box) or isinstance(ob_space, MultiDiscrete), \
'Can only deal with Discrete and Box observation spaces for now'
dtype = ob_space.dtype
if dtype == np.int8:
dtype = np.uint8
return tf.placeholder(shape=(batch_size,) + ob_space.shape, dtype=dtype, name=name) | python | def observation_placeholder(ob_space, batch_size=None, name='Ob'):
'''
Create placeholder to feed observations into of the size appropriate to the observation space
Parameters:
----------
ob_space: gym.Space observation space
batch_size: int size of the batch to be fed into input. Can be left None in most cases.
name: str name of the placeholder
Returns:
-------
tensorflow placeholder tensor
'''
assert isinstance(ob_space, Discrete) or isinstance(ob_space, Box) or isinstance(ob_space, MultiDiscrete), \
'Can only deal with Discrete and Box observation spaces for now'
dtype = ob_space.dtype
if dtype == np.int8:
dtype = np.uint8
return tf.placeholder(shape=(batch_size,) + ob_space.shape, dtype=dtype, name=name) | [
"def",
"observation_placeholder",
"(",
"ob_space",
",",
"batch_size",
"=",
"None",
",",
"name",
"=",
"'Ob'",
")",
":",
"assert",
"isinstance",
"(",
"ob_space",
",",
"Discrete",
")",
"or",
"isinstance",
"(",
"ob_space",
",",
"Box",
")",
"or",
"isinstance",
"(",
"ob_space",
",",
"MultiDiscrete",
")",
",",
"'Can only deal with Discrete and Box observation spaces for now'",
"dtype",
"=",
"ob_space",
".",
"dtype",
"if",
"dtype",
"==",
"np",
".",
"int8",
":",
"dtype",
"=",
"np",
".",
"uint8",
"return",
"tf",
".",
"placeholder",
"(",
"shape",
"=",
"(",
"batch_size",
",",
")",
"+",
"ob_space",
".",
"shape",
",",
"dtype",
"=",
"dtype",
",",
"name",
"=",
"name",
")"
] | Create placeholder to feed observations into of the size appropriate to the observation space
Parameters:
----------
ob_space: gym.Space observation space
batch_size: int size of the batch to be fed into input. Can be left None in most cases.
name: str name of the placeholder
Returns:
-------
tensorflow placeholder tensor | [
"Create",
"placeholder",
"to",
"feed",
"observations",
"into",
"of",
"the",
"size",
"appropriate",
"to",
"the",
"observation",
"space"
] | 3301089b48c42b87b396e246ea3f56fa4bfc9678 | https://github.com/openai/baselines/blob/3301089b48c42b87b396e246ea3f56fa4bfc9678/baselines/common/input.py#L5-L31 | valid |
openai/baselines | baselines/common/input.py | observation_input | def observation_input(ob_space, batch_size=None, name='Ob'):
'''
Create placeholder to feed observations into of the size appropriate to the observation space, and add input
encoder of the appropriate type.
'''
placeholder = observation_placeholder(ob_space, batch_size, name)
return placeholder, encode_observation(ob_space, placeholder) | python | def observation_input(ob_space, batch_size=None, name='Ob'):
'''
Create placeholder to feed observations into of the size appropriate to the observation space, and add input
encoder of the appropriate type.
'''
placeholder = observation_placeholder(ob_space, batch_size, name)
return placeholder, encode_observation(ob_space, placeholder) | [
"def",
"observation_input",
"(",
"ob_space",
",",
"batch_size",
"=",
"None",
",",
"name",
"=",
"'Ob'",
")",
":",
"placeholder",
"=",
"observation_placeholder",
"(",
"ob_space",
",",
"batch_size",
",",
"name",
")",
"return",
"placeholder",
",",
"encode_observation",
"(",
"ob_space",
",",
"placeholder",
")"
] | Create placeholder to feed observations into of the size appropriate to the observation space, and add input
encoder of the appropriate type. | [
"Create",
"placeholder",
"to",
"feed",
"observations",
"into",
"of",
"the",
"size",
"appropriate",
"to",
"the",
"observation",
"space",
"and",
"add",
"input",
"encoder",
"of",
"the",
"appropriate",
"type",
"."
] | 3301089b48c42b87b396e246ea3f56fa4bfc9678 | https://github.com/openai/baselines/blob/3301089b48c42b87b396e246ea3f56fa4bfc9678/baselines/common/input.py#L34-L41 | valid |
openai/baselines | baselines/common/input.py | encode_observation | def encode_observation(ob_space, placeholder):
'''
Encode input in the way that is appropriate to the observation space
Parameters:
----------
ob_space: gym.Space observation space
placeholder: tf.placeholder observation input placeholder
'''
if isinstance(ob_space, Discrete):
return tf.to_float(tf.one_hot(placeholder, ob_space.n))
elif isinstance(ob_space, Box):
return tf.to_float(placeholder)
elif isinstance(ob_space, MultiDiscrete):
placeholder = tf.cast(placeholder, tf.int32)
one_hots = [tf.to_float(tf.one_hot(placeholder[..., i], ob_space.nvec[i])) for i in range(placeholder.shape[-1])]
return tf.concat(one_hots, axis=-1)
else:
raise NotImplementedError | python | def encode_observation(ob_space, placeholder):
'''
Encode input in the way that is appropriate to the observation space
Parameters:
----------
ob_space: gym.Space observation space
placeholder: tf.placeholder observation input placeholder
'''
if isinstance(ob_space, Discrete):
return tf.to_float(tf.one_hot(placeholder, ob_space.n))
elif isinstance(ob_space, Box):
return tf.to_float(placeholder)
elif isinstance(ob_space, MultiDiscrete):
placeholder = tf.cast(placeholder, tf.int32)
one_hots = [tf.to_float(tf.one_hot(placeholder[..., i], ob_space.nvec[i])) for i in range(placeholder.shape[-1])]
return tf.concat(one_hots, axis=-1)
else:
raise NotImplementedError | [
"def",
"encode_observation",
"(",
"ob_space",
",",
"placeholder",
")",
":",
"if",
"isinstance",
"(",
"ob_space",
",",
"Discrete",
")",
":",
"return",
"tf",
".",
"to_float",
"(",
"tf",
".",
"one_hot",
"(",
"placeholder",
",",
"ob_space",
".",
"n",
")",
")",
"elif",
"isinstance",
"(",
"ob_space",
",",
"Box",
")",
":",
"return",
"tf",
".",
"to_float",
"(",
"placeholder",
")",
"elif",
"isinstance",
"(",
"ob_space",
",",
"MultiDiscrete",
")",
":",
"placeholder",
"=",
"tf",
".",
"cast",
"(",
"placeholder",
",",
"tf",
".",
"int32",
")",
"one_hots",
"=",
"[",
"tf",
".",
"to_float",
"(",
"tf",
".",
"one_hot",
"(",
"placeholder",
"[",
"...",
",",
"i",
"]",
",",
"ob_space",
".",
"nvec",
"[",
"i",
"]",
")",
")",
"for",
"i",
"in",
"range",
"(",
"placeholder",
".",
"shape",
"[",
"-",
"1",
"]",
")",
"]",
"return",
"tf",
".",
"concat",
"(",
"one_hots",
",",
"axis",
"=",
"-",
"1",
")",
"else",
":",
"raise",
"NotImplementedError"
] | Encode input in the way that is appropriate to the observation space
Parameters:
----------
ob_space: gym.Space observation space
placeholder: tf.placeholder observation input placeholder | [
"Encode",
"input",
"in",
"the",
"way",
"that",
"is",
"appropriate",
"to",
"the",
"observation",
"space"
] | 3301089b48c42b87b396e246ea3f56fa4bfc9678 | https://github.com/openai/baselines/blob/3301089b48c42b87b396e246ea3f56fa4bfc9678/baselines/common/input.py#L43-L63 | valid |
openai/baselines | baselines/her/rollout.py | RolloutWorker.generate_rollouts | def generate_rollouts(self):
"""Performs `rollout_batch_size` rollouts in parallel for time horizon `T` with the current
policy acting on it accordingly.
"""
self.reset_all_rollouts()
# compute observations
o = np.empty((self.rollout_batch_size, self.dims['o']), np.float32) # observations
ag = np.empty((self.rollout_batch_size, self.dims['g']), np.float32) # achieved goals
o[:] = self.initial_o
ag[:] = self.initial_ag
# generate episodes
obs, achieved_goals, acts, goals, successes = [], [], [], [], []
dones = []
info_values = [np.empty((self.T - 1, self.rollout_batch_size, self.dims['info_' + key]), np.float32) for key in self.info_keys]
Qs = []
for t in range(self.T):
policy_output = self.policy.get_actions(
o, ag, self.g,
compute_Q=self.compute_Q,
noise_eps=self.noise_eps if not self.exploit else 0.,
random_eps=self.random_eps if not self.exploit else 0.,
use_target_net=self.use_target_net)
if self.compute_Q:
u, Q = policy_output
Qs.append(Q)
else:
u = policy_output
if u.ndim == 1:
# The non-batched case should still have a reasonable shape.
u = u.reshape(1, -1)
o_new = np.empty((self.rollout_batch_size, self.dims['o']))
ag_new = np.empty((self.rollout_batch_size, self.dims['g']))
success = np.zeros(self.rollout_batch_size)
# compute new states and observations
obs_dict_new, _, done, info = self.venv.step(u)
o_new = obs_dict_new['observation']
ag_new = obs_dict_new['achieved_goal']
success = np.array([i.get('is_success', 0.0) for i in info])
if any(done):
# here we assume all environments are done is ~same number of steps, so we terminate rollouts whenever any of the envs returns done
# trick with using vecenvs is not to add the obs from the environments that are "done", because those are already observations
# after a reset
break
for i, info_dict in enumerate(info):
for idx, key in enumerate(self.info_keys):
info_values[idx][t, i] = info[i][key]
if np.isnan(o_new).any():
self.logger.warn('NaN caught during rollout generation. Trying again...')
self.reset_all_rollouts()
return self.generate_rollouts()
dones.append(done)
obs.append(o.copy())
achieved_goals.append(ag.copy())
successes.append(success.copy())
acts.append(u.copy())
goals.append(self.g.copy())
o[...] = o_new
ag[...] = ag_new
obs.append(o.copy())
achieved_goals.append(ag.copy())
episode = dict(o=obs,
u=acts,
g=goals,
ag=achieved_goals)
for key, value in zip(self.info_keys, info_values):
episode['info_{}'.format(key)] = value
# stats
successful = np.array(successes)[-1, :]
assert successful.shape == (self.rollout_batch_size,)
success_rate = np.mean(successful)
self.success_history.append(success_rate)
if self.compute_Q:
self.Q_history.append(np.mean(Qs))
self.n_episodes += self.rollout_batch_size
return convert_episode_to_batch_major(episode) | python | def generate_rollouts(self):
"""Performs `rollout_batch_size` rollouts in parallel for time horizon `T` with the current
policy acting on it accordingly.
"""
self.reset_all_rollouts()
# compute observations
o = np.empty((self.rollout_batch_size, self.dims['o']), np.float32) # observations
ag = np.empty((self.rollout_batch_size, self.dims['g']), np.float32) # achieved goals
o[:] = self.initial_o
ag[:] = self.initial_ag
# generate episodes
obs, achieved_goals, acts, goals, successes = [], [], [], [], []
dones = []
info_values = [np.empty((self.T - 1, self.rollout_batch_size, self.dims['info_' + key]), np.float32) for key in self.info_keys]
Qs = []
for t in range(self.T):
policy_output = self.policy.get_actions(
o, ag, self.g,
compute_Q=self.compute_Q,
noise_eps=self.noise_eps if not self.exploit else 0.,
random_eps=self.random_eps if not self.exploit else 0.,
use_target_net=self.use_target_net)
if self.compute_Q:
u, Q = policy_output
Qs.append(Q)
else:
u = policy_output
if u.ndim == 1:
# The non-batched case should still have a reasonable shape.
u = u.reshape(1, -1)
o_new = np.empty((self.rollout_batch_size, self.dims['o']))
ag_new = np.empty((self.rollout_batch_size, self.dims['g']))
success = np.zeros(self.rollout_batch_size)
# compute new states and observations
obs_dict_new, _, done, info = self.venv.step(u)
o_new = obs_dict_new['observation']
ag_new = obs_dict_new['achieved_goal']
success = np.array([i.get('is_success', 0.0) for i in info])
if any(done):
# here we assume all environments are done is ~same number of steps, so we terminate rollouts whenever any of the envs returns done
# trick with using vecenvs is not to add the obs from the environments that are "done", because those are already observations
# after a reset
break
for i, info_dict in enumerate(info):
for idx, key in enumerate(self.info_keys):
info_values[idx][t, i] = info[i][key]
if np.isnan(o_new).any():
self.logger.warn('NaN caught during rollout generation. Trying again...')
self.reset_all_rollouts()
return self.generate_rollouts()
dones.append(done)
obs.append(o.copy())
achieved_goals.append(ag.copy())
successes.append(success.copy())
acts.append(u.copy())
goals.append(self.g.copy())
o[...] = o_new
ag[...] = ag_new
obs.append(o.copy())
achieved_goals.append(ag.copy())
episode = dict(o=obs,
u=acts,
g=goals,
ag=achieved_goals)
for key, value in zip(self.info_keys, info_values):
episode['info_{}'.format(key)] = value
# stats
successful = np.array(successes)[-1, :]
assert successful.shape == (self.rollout_batch_size,)
success_rate = np.mean(successful)
self.success_history.append(success_rate)
if self.compute_Q:
self.Q_history.append(np.mean(Qs))
self.n_episodes += self.rollout_batch_size
return convert_episode_to_batch_major(episode) | [
"def",
"generate_rollouts",
"(",
"self",
")",
":",
"self",
".",
"reset_all_rollouts",
"(",
")",
"# compute observations",
"o",
"=",
"np",
".",
"empty",
"(",
"(",
"self",
".",
"rollout_batch_size",
",",
"self",
".",
"dims",
"[",
"'o'",
"]",
")",
",",
"np",
".",
"float32",
")",
"# observations",
"ag",
"=",
"np",
".",
"empty",
"(",
"(",
"self",
".",
"rollout_batch_size",
",",
"self",
".",
"dims",
"[",
"'g'",
"]",
")",
",",
"np",
".",
"float32",
")",
"# achieved goals",
"o",
"[",
":",
"]",
"=",
"self",
".",
"initial_o",
"ag",
"[",
":",
"]",
"=",
"self",
".",
"initial_ag",
"# generate episodes",
"obs",
",",
"achieved_goals",
",",
"acts",
",",
"goals",
",",
"successes",
"=",
"[",
"]",
",",
"[",
"]",
",",
"[",
"]",
",",
"[",
"]",
",",
"[",
"]",
"dones",
"=",
"[",
"]",
"info_values",
"=",
"[",
"np",
".",
"empty",
"(",
"(",
"self",
".",
"T",
"-",
"1",
",",
"self",
".",
"rollout_batch_size",
",",
"self",
".",
"dims",
"[",
"'info_'",
"+",
"key",
"]",
")",
",",
"np",
".",
"float32",
")",
"for",
"key",
"in",
"self",
".",
"info_keys",
"]",
"Qs",
"=",
"[",
"]",
"for",
"t",
"in",
"range",
"(",
"self",
".",
"T",
")",
":",
"policy_output",
"=",
"self",
".",
"policy",
".",
"get_actions",
"(",
"o",
",",
"ag",
",",
"self",
".",
"g",
",",
"compute_Q",
"=",
"self",
".",
"compute_Q",
",",
"noise_eps",
"=",
"self",
".",
"noise_eps",
"if",
"not",
"self",
".",
"exploit",
"else",
"0.",
",",
"random_eps",
"=",
"self",
".",
"random_eps",
"if",
"not",
"self",
".",
"exploit",
"else",
"0.",
",",
"use_target_net",
"=",
"self",
".",
"use_target_net",
")",
"if",
"self",
".",
"compute_Q",
":",
"u",
",",
"Q",
"=",
"policy_output",
"Qs",
".",
"append",
"(",
"Q",
")",
"else",
":",
"u",
"=",
"policy_output",
"if",
"u",
".",
"ndim",
"==",
"1",
":",
"# The non-batched case should still have a reasonable shape.",
"u",
"=",
"u",
".",
"reshape",
"(",
"1",
",",
"-",
"1",
")",
"o_new",
"=",
"np",
".",
"empty",
"(",
"(",
"self",
".",
"rollout_batch_size",
",",
"self",
".",
"dims",
"[",
"'o'",
"]",
")",
")",
"ag_new",
"=",
"np",
".",
"empty",
"(",
"(",
"self",
".",
"rollout_batch_size",
",",
"self",
".",
"dims",
"[",
"'g'",
"]",
")",
")",
"success",
"=",
"np",
".",
"zeros",
"(",
"self",
".",
"rollout_batch_size",
")",
"# compute new states and observations",
"obs_dict_new",
",",
"_",
",",
"done",
",",
"info",
"=",
"self",
".",
"venv",
".",
"step",
"(",
"u",
")",
"o_new",
"=",
"obs_dict_new",
"[",
"'observation'",
"]",
"ag_new",
"=",
"obs_dict_new",
"[",
"'achieved_goal'",
"]",
"success",
"=",
"np",
".",
"array",
"(",
"[",
"i",
".",
"get",
"(",
"'is_success'",
",",
"0.0",
")",
"for",
"i",
"in",
"info",
"]",
")",
"if",
"any",
"(",
"done",
")",
":",
"# here we assume all environments are done is ~same number of steps, so we terminate rollouts whenever any of the envs returns done",
"# trick with using vecenvs is not to add the obs from the environments that are \"done\", because those are already observations",
"# after a reset",
"break",
"for",
"i",
",",
"info_dict",
"in",
"enumerate",
"(",
"info",
")",
":",
"for",
"idx",
",",
"key",
"in",
"enumerate",
"(",
"self",
".",
"info_keys",
")",
":",
"info_values",
"[",
"idx",
"]",
"[",
"t",
",",
"i",
"]",
"=",
"info",
"[",
"i",
"]",
"[",
"key",
"]",
"if",
"np",
".",
"isnan",
"(",
"o_new",
")",
".",
"any",
"(",
")",
":",
"self",
".",
"logger",
".",
"warn",
"(",
"'NaN caught during rollout generation. Trying again...'",
")",
"self",
".",
"reset_all_rollouts",
"(",
")",
"return",
"self",
".",
"generate_rollouts",
"(",
")",
"dones",
".",
"append",
"(",
"done",
")",
"obs",
".",
"append",
"(",
"o",
".",
"copy",
"(",
")",
")",
"achieved_goals",
".",
"append",
"(",
"ag",
".",
"copy",
"(",
")",
")",
"successes",
".",
"append",
"(",
"success",
".",
"copy",
"(",
")",
")",
"acts",
".",
"append",
"(",
"u",
".",
"copy",
"(",
")",
")",
"goals",
".",
"append",
"(",
"self",
".",
"g",
".",
"copy",
"(",
")",
")",
"o",
"[",
"...",
"]",
"=",
"o_new",
"ag",
"[",
"...",
"]",
"=",
"ag_new",
"obs",
".",
"append",
"(",
"o",
".",
"copy",
"(",
")",
")",
"achieved_goals",
".",
"append",
"(",
"ag",
".",
"copy",
"(",
")",
")",
"episode",
"=",
"dict",
"(",
"o",
"=",
"obs",
",",
"u",
"=",
"acts",
",",
"g",
"=",
"goals",
",",
"ag",
"=",
"achieved_goals",
")",
"for",
"key",
",",
"value",
"in",
"zip",
"(",
"self",
".",
"info_keys",
",",
"info_values",
")",
":",
"episode",
"[",
"'info_{}'",
".",
"format",
"(",
"key",
")",
"]",
"=",
"value",
"# stats",
"successful",
"=",
"np",
".",
"array",
"(",
"successes",
")",
"[",
"-",
"1",
",",
":",
"]",
"assert",
"successful",
".",
"shape",
"==",
"(",
"self",
".",
"rollout_batch_size",
",",
")",
"success_rate",
"=",
"np",
".",
"mean",
"(",
"successful",
")",
"self",
".",
"success_history",
".",
"append",
"(",
"success_rate",
")",
"if",
"self",
".",
"compute_Q",
":",
"self",
".",
"Q_history",
".",
"append",
"(",
"np",
".",
"mean",
"(",
"Qs",
")",
")",
"self",
".",
"n_episodes",
"+=",
"self",
".",
"rollout_batch_size",
"return",
"convert_episode_to_batch_major",
"(",
"episode",
")"
] | Performs `rollout_batch_size` rollouts in parallel for time horizon `T` with the current
policy acting on it accordingly. | [
"Performs",
"rollout_batch_size",
"rollouts",
"in",
"parallel",
"for",
"time",
"horizon",
"T",
"with",
"the",
"current",
"policy",
"acting",
"on",
"it",
"accordingly",
"."
] | 3301089b48c42b87b396e246ea3f56fa4bfc9678 | https://github.com/openai/baselines/blob/3301089b48c42b87b396e246ea3f56fa4bfc9678/baselines/her/rollout.py#L51-L137 | valid |
openai/baselines | baselines/her/rollout.py | RolloutWorker.save_policy | def save_policy(self, path):
"""Pickles the current policy for later inspection.
"""
with open(path, 'wb') as f:
pickle.dump(self.policy, f) | python | def save_policy(self, path):
"""Pickles the current policy for later inspection.
"""
with open(path, 'wb') as f:
pickle.dump(self.policy, f) | [
"def",
"save_policy",
"(",
"self",
",",
"path",
")",
":",
"with",
"open",
"(",
"path",
",",
"'wb'",
")",
"as",
"f",
":",
"pickle",
".",
"dump",
"(",
"self",
".",
"policy",
",",
"f",
")"
] | Pickles the current policy for later inspection. | [
"Pickles",
"the",
"current",
"policy",
"for",
"later",
"inspection",
"."
] | 3301089b48c42b87b396e246ea3f56fa4bfc9678 | https://github.com/openai/baselines/blob/3301089b48c42b87b396e246ea3f56fa4bfc9678/baselines/her/rollout.py#L151-L155 | valid |
openai/baselines | baselines/her/rollout.py | RolloutWorker.logs | def logs(self, prefix='worker'):
"""Generates a dictionary that contains all collected statistics.
"""
logs = []
logs += [('success_rate', np.mean(self.success_history))]
if self.compute_Q:
logs += [('mean_Q', np.mean(self.Q_history))]
logs += [('episode', self.n_episodes)]
if prefix != '' and not prefix.endswith('/'):
return [(prefix + '/' + key, val) for key, val in logs]
else:
return logs | python | def logs(self, prefix='worker'):
"""Generates a dictionary that contains all collected statistics.
"""
logs = []
logs += [('success_rate', np.mean(self.success_history))]
if self.compute_Q:
logs += [('mean_Q', np.mean(self.Q_history))]
logs += [('episode', self.n_episodes)]
if prefix != '' and not prefix.endswith('/'):
return [(prefix + '/' + key, val) for key, val in logs]
else:
return logs | [
"def",
"logs",
"(",
"self",
",",
"prefix",
"=",
"'worker'",
")",
":",
"logs",
"=",
"[",
"]",
"logs",
"+=",
"[",
"(",
"'success_rate'",
",",
"np",
".",
"mean",
"(",
"self",
".",
"success_history",
")",
")",
"]",
"if",
"self",
".",
"compute_Q",
":",
"logs",
"+=",
"[",
"(",
"'mean_Q'",
",",
"np",
".",
"mean",
"(",
"self",
".",
"Q_history",
")",
")",
"]",
"logs",
"+=",
"[",
"(",
"'episode'",
",",
"self",
".",
"n_episodes",
")",
"]",
"if",
"prefix",
"!=",
"''",
"and",
"not",
"prefix",
".",
"endswith",
"(",
"'/'",
")",
":",
"return",
"[",
"(",
"prefix",
"+",
"'/'",
"+",
"key",
",",
"val",
")",
"for",
"key",
",",
"val",
"in",
"logs",
"]",
"else",
":",
"return",
"logs"
] | Generates a dictionary that contains all collected statistics. | [
"Generates",
"a",
"dictionary",
"that",
"contains",
"all",
"collected",
"statistics",
"."
] | 3301089b48c42b87b396e246ea3f56fa4bfc9678 | https://github.com/openai/baselines/blob/3301089b48c42b87b396e246ea3f56fa4bfc9678/baselines/her/rollout.py#L157-L169 | valid |
openai/baselines | baselines/common/plot_util.py | smooth | def smooth(y, radius, mode='two_sided', valid_only=False):
'''
Smooth signal y, where radius is determines the size of the window
mode='twosided':
average over the window [max(index - radius, 0), min(index + radius, len(y)-1)]
mode='causal':
average over the window [max(index - radius, 0), index]
valid_only: put nan in entries where the full-sized window is not available
'''
assert mode in ('two_sided', 'causal')
if len(y) < 2*radius+1:
return np.ones_like(y) * y.mean()
elif mode == 'two_sided':
convkernel = np.ones(2 * radius+1)
out = np.convolve(y, convkernel,mode='same') / np.convolve(np.ones_like(y), convkernel, mode='same')
if valid_only:
out[:radius] = out[-radius:] = np.nan
elif mode == 'causal':
convkernel = np.ones(radius)
out = np.convolve(y, convkernel,mode='full') / np.convolve(np.ones_like(y), convkernel, mode='full')
out = out[:-radius+1]
if valid_only:
out[:radius] = np.nan
return out | python | def smooth(y, radius, mode='two_sided', valid_only=False):
'''
Smooth signal y, where radius is determines the size of the window
mode='twosided':
average over the window [max(index - radius, 0), min(index + radius, len(y)-1)]
mode='causal':
average over the window [max(index - radius, 0), index]
valid_only: put nan in entries where the full-sized window is not available
'''
assert mode in ('two_sided', 'causal')
if len(y) < 2*radius+1:
return np.ones_like(y) * y.mean()
elif mode == 'two_sided':
convkernel = np.ones(2 * radius+1)
out = np.convolve(y, convkernel,mode='same') / np.convolve(np.ones_like(y), convkernel, mode='same')
if valid_only:
out[:radius] = out[-radius:] = np.nan
elif mode == 'causal':
convkernel = np.ones(radius)
out = np.convolve(y, convkernel,mode='full') / np.convolve(np.ones_like(y), convkernel, mode='full')
out = out[:-radius+1]
if valid_only:
out[:radius] = np.nan
return out | [
"def",
"smooth",
"(",
"y",
",",
"radius",
",",
"mode",
"=",
"'two_sided'",
",",
"valid_only",
"=",
"False",
")",
":",
"assert",
"mode",
"in",
"(",
"'two_sided'",
",",
"'causal'",
")",
"if",
"len",
"(",
"y",
")",
"<",
"2",
"*",
"radius",
"+",
"1",
":",
"return",
"np",
".",
"ones_like",
"(",
"y",
")",
"*",
"y",
".",
"mean",
"(",
")",
"elif",
"mode",
"==",
"'two_sided'",
":",
"convkernel",
"=",
"np",
".",
"ones",
"(",
"2",
"*",
"radius",
"+",
"1",
")",
"out",
"=",
"np",
".",
"convolve",
"(",
"y",
",",
"convkernel",
",",
"mode",
"=",
"'same'",
")",
"/",
"np",
".",
"convolve",
"(",
"np",
".",
"ones_like",
"(",
"y",
")",
",",
"convkernel",
",",
"mode",
"=",
"'same'",
")",
"if",
"valid_only",
":",
"out",
"[",
":",
"radius",
"]",
"=",
"out",
"[",
"-",
"radius",
":",
"]",
"=",
"np",
".",
"nan",
"elif",
"mode",
"==",
"'causal'",
":",
"convkernel",
"=",
"np",
".",
"ones",
"(",
"radius",
")",
"out",
"=",
"np",
".",
"convolve",
"(",
"y",
",",
"convkernel",
",",
"mode",
"=",
"'full'",
")",
"/",
"np",
".",
"convolve",
"(",
"np",
".",
"ones_like",
"(",
"y",
")",
",",
"convkernel",
",",
"mode",
"=",
"'full'",
")",
"out",
"=",
"out",
"[",
":",
"-",
"radius",
"+",
"1",
"]",
"if",
"valid_only",
":",
"out",
"[",
":",
"radius",
"]",
"=",
"np",
".",
"nan",
"return",
"out"
] | Smooth signal y, where radius is determines the size of the window
mode='twosided':
average over the window [max(index - radius, 0), min(index + radius, len(y)-1)]
mode='causal':
average over the window [max(index - radius, 0), index]
valid_only: put nan in entries where the full-sized window is not available | [
"Smooth",
"signal",
"y",
"where",
"radius",
"is",
"determines",
"the",
"size",
"of",
"the",
"window"
] | 3301089b48c42b87b396e246ea3f56fa4bfc9678 | https://github.com/openai/baselines/blob/3301089b48c42b87b396e246ea3f56fa4bfc9678/baselines/common/plot_util.py#L11-L37 | valid |
openai/baselines | baselines/common/plot_util.py | one_sided_ema | def one_sided_ema(xolds, yolds, low=None, high=None, n=512, decay_steps=1., low_counts_threshold=1e-8):
'''
perform one-sided (causal) EMA (exponential moving average)
smoothing and resampling to an even grid with n points.
Does not do extrapolation, so we assume
xolds[0] <= low && high <= xolds[-1]
Arguments:
xolds: array or list - x values of data. Needs to be sorted in ascending order
yolds: array of list - y values of data. Has to have the same length as xolds
low: float - min value of the new x grid. By default equals to xolds[0]
high: float - max value of the new x grid. By default equals to xolds[-1]
n: int - number of points in new x grid
decay_steps: float - EMA decay factor, expressed in new x grid steps.
low_counts_threshold: float or int
- y values with counts less than this value will be set to NaN
Returns:
tuple sum_ys, count_ys where
xs - array with new x grid
ys - array of EMA of y at each point of the new x grid
count_ys - array of EMA of y counts at each point of the new x grid
'''
low = xolds[0] if low is None else low
high = xolds[-1] if high is None else high
assert xolds[0] <= low, 'low = {} < xolds[0] = {} - extrapolation not permitted!'.format(low, xolds[0])
assert xolds[-1] >= high, 'high = {} > xolds[-1] = {} - extrapolation not permitted!'.format(high, xolds[-1])
assert len(xolds) == len(yolds), 'length of xolds ({}) and yolds ({}) do not match!'.format(len(xolds), len(yolds))
xolds = xolds.astype('float64')
yolds = yolds.astype('float64')
luoi = 0 # last unused old index
sum_y = 0.
count_y = 0.
xnews = np.linspace(low, high, n)
decay_period = (high - low) / (n - 1) * decay_steps
interstep_decay = np.exp(- 1. / decay_steps)
sum_ys = np.zeros_like(xnews)
count_ys = np.zeros_like(xnews)
for i in range(n):
xnew = xnews[i]
sum_y *= interstep_decay
count_y *= interstep_decay
while True:
xold = xolds[luoi]
if xold <= xnew:
decay = np.exp(- (xnew - xold) / decay_period)
sum_y += decay * yolds[luoi]
count_y += decay
luoi += 1
else:
break
if luoi >= len(xolds):
break
sum_ys[i] = sum_y
count_ys[i] = count_y
ys = sum_ys / count_ys
ys[count_ys < low_counts_threshold] = np.nan
return xnews, ys, count_ys | python | def one_sided_ema(xolds, yolds, low=None, high=None, n=512, decay_steps=1., low_counts_threshold=1e-8):
'''
perform one-sided (causal) EMA (exponential moving average)
smoothing and resampling to an even grid with n points.
Does not do extrapolation, so we assume
xolds[0] <= low && high <= xolds[-1]
Arguments:
xolds: array or list - x values of data. Needs to be sorted in ascending order
yolds: array of list - y values of data. Has to have the same length as xolds
low: float - min value of the new x grid. By default equals to xolds[0]
high: float - max value of the new x grid. By default equals to xolds[-1]
n: int - number of points in new x grid
decay_steps: float - EMA decay factor, expressed in new x grid steps.
low_counts_threshold: float or int
- y values with counts less than this value will be set to NaN
Returns:
tuple sum_ys, count_ys where
xs - array with new x grid
ys - array of EMA of y at each point of the new x grid
count_ys - array of EMA of y counts at each point of the new x grid
'''
low = xolds[0] if low is None else low
high = xolds[-1] if high is None else high
assert xolds[0] <= low, 'low = {} < xolds[0] = {} - extrapolation not permitted!'.format(low, xolds[0])
assert xolds[-1] >= high, 'high = {} > xolds[-1] = {} - extrapolation not permitted!'.format(high, xolds[-1])
assert len(xolds) == len(yolds), 'length of xolds ({}) and yolds ({}) do not match!'.format(len(xolds), len(yolds))
xolds = xolds.astype('float64')
yolds = yolds.astype('float64')
luoi = 0 # last unused old index
sum_y = 0.
count_y = 0.
xnews = np.linspace(low, high, n)
decay_period = (high - low) / (n - 1) * decay_steps
interstep_decay = np.exp(- 1. / decay_steps)
sum_ys = np.zeros_like(xnews)
count_ys = np.zeros_like(xnews)
for i in range(n):
xnew = xnews[i]
sum_y *= interstep_decay
count_y *= interstep_decay
while True:
xold = xolds[luoi]
if xold <= xnew:
decay = np.exp(- (xnew - xold) / decay_period)
sum_y += decay * yolds[luoi]
count_y += decay
luoi += 1
else:
break
if luoi >= len(xolds):
break
sum_ys[i] = sum_y
count_ys[i] = count_y
ys = sum_ys / count_ys
ys[count_ys < low_counts_threshold] = np.nan
return xnews, ys, count_ys | [
"def",
"one_sided_ema",
"(",
"xolds",
",",
"yolds",
",",
"low",
"=",
"None",
",",
"high",
"=",
"None",
",",
"n",
"=",
"512",
",",
"decay_steps",
"=",
"1.",
",",
"low_counts_threshold",
"=",
"1e-8",
")",
":",
"low",
"=",
"xolds",
"[",
"0",
"]",
"if",
"low",
"is",
"None",
"else",
"low",
"high",
"=",
"xolds",
"[",
"-",
"1",
"]",
"if",
"high",
"is",
"None",
"else",
"high",
"assert",
"xolds",
"[",
"0",
"]",
"<=",
"low",
",",
"'low = {} < xolds[0] = {} - extrapolation not permitted!'",
".",
"format",
"(",
"low",
",",
"xolds",
"[",
"0",
"]",
")",
"assert",
"xolds",
"[",
"-",
"1",
"]",
">=",
"high",
",",
"'high = {} > xolds[-1] = {} - extrapolation not permitted!'",
".",
"format",
"(",
"high",
",",
"xolds",
"[",
"-",
"1",
"]",
")",
"assert",
"len",
"(",
"xolds",
")",
"==",
"len",
"(",
"yolds",
")",
",",
"'length of xolds ({}) and yolds ({}) do not match!'",
".",
"format",
"(",
"len",
"(",
"xolds",
")",
",",
"len",
"(",
"yolds",
")",
")",
"xolds",
"=",
"xolds",
".",
"astype",
"(",
"'float64'",
")",
"yolds",
"=",
"yolds",
".",
"astype",
"(",
"'float64'",
")",
"luoi",
"=",
"0",
"# last unused old index",
"sum_y",
"=",
"0.",
"count_y",
"=",
"0.",
"xnews",
"=",
"np",
".",
"linspace",
"(",
"low",
",",
"high",
",",
"n",
")",
"decay_period",
"=",
"(",
"high",
"-",
"low",
")",
"/",
"(",
"n",
"-",
"1",
")",
"*",
"decay_steps",
"interstep_decay",
"=",
"np",
".",
"exp",
"(",
"-",
"1.",
"/",
"decay_steps",
")",
"sum_ys",
"=",
"np",
".",
"zeros_like",
"(",
"xnews",
")",
"count_ys",
"=",
"np",
".",
"zeros_like",
"(",
"xnews",
")",
"for",
"i",
"in",
"range",
"(",
"n",
")",
":",
"xnew",
"=",
"xnews",
"[",
"i",
"]",
"sum_y",
"*=",
"interstep_decay",
"count_y",
"*=",
"interstep_decay",
"while",
"True",
":",
"xold",
"=",
"xolds",
"[",
"luoi",
"]",
"if",
"xold",
"<=",
"xnew",
":",
"decay",
"=",
"np",
".",
"exp",
"(",
"-",
"(",
"xnew",
"-",
"xold",
")",
"/",
"decay_period",
")",
"sum_y",
"+=",
"decay",
"*",
"yolds",
"[",
"luoi",
"]",
"count_y",
"+=",
"decay",
"luoi",
"+=",
"1",
"else",
":",
"break",
"if",
"luoi",
">=",
"len",
"(",
"xolds",
")",
":",
"break",
"sum_ys",
"[",
"i",
"]",
"=",
"sum_y",
"count_ys",
"[",
"i",
"]",
"=",
"count_y",
"ys",
"=",
"sum_ys",
"/",
"count_ys",
"ys",
"[",
"count_ys",
"<",
"low_counts_threshold",
"]",
"=",
"np",
".",
"nan",
"return",
"xnews",
",",
"ys",
",",
"count_ys"
] | perform one-sided (causal) EMA (exponential moving average)
smoothing and resampling to an even grid with n points.
Does not do extrapolation, so we assume
xolds[0] <= low && high <= xolds[-1]
Arguments:
xolds: array or list - x values of data. Needs to be sorted in ascending order
yolds: array of list - y values of data. Has to have the same length as xolds
low: float - min value of the new x grid. By default equals to xolds[0]
high: float - max value of the new x grid. By default equals to xolds[-1]
n: int - number of points in new x grid
decay_steps: float - EMA decay factor, expressed in new x grid steps.
low_counts_threshold: float or int
- y values with counts less than this value will be set to NaN
Returns:
tuple sum_ys, count_ys where
xs - array with new x grid
ys - array of EMA of y at each point of the new x grid
count_ys - array of EMA of y counts at each point of the new x grid | [
"perform",
"one",
"-",
"sided",
"(",
"causal",
")",
"EMA",
"(",
"exponential",
"moving",
"average",
")",
"smoothing",
"and",
"resampling",
"to",
"an",
"even",
"grid",
"with",
"n",
"points",
".",
"Does",
"not",
"do",
"extrapolation",
"so",
"we",
"assume",
"xolds",
"[",
"0",
"]",
"<",
"=",
"low",
"&&",
"high",
"<",
"=",
"xolds",
"[",
"-",
"1",
"]"
] | 3301089b48c42b87b396e246ea3f56fa4bfc9678 | https://github.com/openai/baselines/blob/3301089b48c42b87b396e246ea3f56fa4bfc9678/baselines/common/plot_util.py#L39-L109 | valid |
openai/baselines | baselines/common/plot_util.py | symmetric_ema | def symmetric_ema(xolds, yolds, low=None, high=None, n=512, decay_steps=1., low_counts_threshold=1e-8):
'''
perform symmetric EMA (exponential moving average)
smoothing and resampling to an even grid with n points.
Does not do extrapolation, so we assume
xolds[0] <= low && high <= xolds[-1]
Arguments:
xolds: array or list - x values of data. Needs to be sorted in ascending order
yolds: array of list - y values of data. Has to have the same length as xolds
low: float - min value of the new x grid. By default equals to xolds[0]
high: float - max value of the new x grid. By default equals to xolds[-1]
n: int - number of points in new x grid
decay_steps: float - EMA decay factor, expressed in new x grid steps.
low_counts_threshold: float or int
- y values with counts less than this value will be set to NaN
Returns:
tuple sum_ys, count_ys where
xs - array with new x grid
ys - array of EMA of y at each point of the new x grid
count_ys - array of EMA of y counts at each point of the new x grid
'''
xs, ys1, count_ys1 = one_sided_ema(xolds, yolds, low, high, n, decay_steps, low_counts_threshold=0)
_, ys2, count_ys2 = one_sided_ema(-xolds[::-1], yolds[::-1], -high, -low, n, decay_steps, low_counts_threshold=0)
ys2 = ys2[::-1]
count_ys2 = count_ys2[::-1]
count_ys = count_ys1 + count_ys2
ys = (ys1 * count_ys1 + ys2 * count_ys2) / count_ys
ys[count_ys < low_counts_threshold] = np.nan
return xs, ys, count_ys | python | def symmetric_ema(xolds, yolds, low=None, high=None, n=512, decay_steps=1., low_counts_threshold=1e-8):
'''
perform symmetric EMA (exponential moving average)
smoothing and resampling to an even grid with n points.
Does not do extrapolation, so we assume
xolds[0] <= low && high <= xolds[-1]
Arguments:
xolds: array or list - x values of data. Needs to be sorted in ascending order
yolds: array of list - y values of data. Has to have the same length as xolds
low: float - min value of the new x grid. By default equals to xolds[0]
high: float - max value of the new x grid. By default equals to xolds[-1]
n: int - number of points in new x grid
decay_steps: float - EMA decay factor, expressed in new x grid steps.
low_counts_threshold: float or int
- y values with counts less than this value will be set to NaN
Returns:
tuple sum_ys, count_ys where
xs - array with new x grid
ys - array of EMA of y at each point of the new x grid
count_ys - array of EMA of y counts at each point of the new x grid
'''
xs, ys1, count_ys1 = one_sided_ema(xolds, yolds, low, high, n, decay_steps, low_counts_threshold=0)
_, ys2, count_ys2 = one_sided_ema(-xolds[::-1], yolds[::-1], -high, -low, n, decay_steps, low_counts_threshold=0)
ys2 = ys2[::-1]
count_ys2 = count_ys2[::-1]
count_ys = count_ys1 + count_ys2
ys = (ys1 * count_ys1 + ys2 * count_ys2) / count_ys
ys[count_ys < low_counts_threshold] = np.nan
return xs, ys, count_ys | [
"def",
"symmetric_ema",
"(",
"xolds",
",",
"yolds",
",",
"low",
"=",
"None",
",",
"high",
"=",
"None",
",",
"n",
"=",
"512",
",",
"decay_steps",
"=",
"1.",
",",
"low_counts_threshold",
"=",
"1e-8",
")",
":",
"xs",
",",
"ys1",
",",
"count_ys1",
"=",
"one_sided_ema",
"(",
"xolds",
",",
"yolds",
",",
"low",
",",
"high",
",",
"n",
",",
"decay_steps",
",",
"low_counts_threshold",
"=",
"0",
")",
"_",
",",
"ys2",
",",
"count_ys2",
"=",
"one_sided_ema",
"(",
"-",
"xolds",
"[",
":",
":",
"-",
"1",
"]",
",",
"yolds",
"[",
":",
":",
"-",
"1",
"]",
",",
"-",
"high",
",",
"-",
"low",
",",
"n",
",",
"decay_steps",
",",
"low_counts_threshold",
"=",
"0",
")",
"ys2",
"=",
"ys2",
"[",
":",
":",
"-",
"1",
"]",
"count_ys2",
"=",
"count_ys2",
"[",
":",
":",
"-",
"1",
"]",
"count_ys",
"=",
"count_ys1",
"+",
"count_ys2",
"ys",
"=",
"(",
"ys1",
"*",
"count_ys1",
"+",
"ys2",
"*",
"count_ys2",
")",
"/",
"count_ys",
"ys",
"[",
"count_ys",
"<",
"low_counts_threshold",
"]",
"=",
"np",
".",
"nan",
"return",
"xs",
",",
"ys",
",",
"count_ys"
] | perform symmetric EMA (exponential moving average)
smoothing and resampling to an even grid with n points.
Does not do extrapolation, so we assume
xolds[0] <= low && high <= xolds[-1]
Arguments:
xolds: array or list - x values of data. Needs to be sorted in ascending order
yolds: array of list - y values of data. Has to have the same length as xolds
low: float - min value of the new x grid. By default equals to xolds[0]
high: float - max value of the new x grid. By default equals to xolds[-1]
n: int - number of points in new x grid
decay_steps: float - EMA decay factor, expressed in new x grid steps.
low_counts_threshold: float or int
- y values with counts less than this value will be set to NaN
Returns:
tuple sum_ys, count_ys where
xs - array with new x grid
ys - array of EMA of y at each point of the new x grid
count_ys - array of EMA of y counts at each point of the new x grid | [
"perform",
"symmetric",
"EMA",
"(",
"exponential",
"moving",
"average",
")",
"smoothing",
"and",
"resampling",
"to",
"an",
"even",
"grid",
"with",
"n",
"points",
".",
"Does",
"not",
"do",
"extrapolation",
"so",
"we",
"assume",
"xolds",
"[",
"0",
"]",
"<",
"=",
"low",
"&&",
"high",
"<",
"=",
"xolds",
"[",
"-",
"1",
"]"
] | 3301089b48c42b87b396e246ea3f56fa4bfc9678 | https://github.com/openai/baselines/blob/3301089b48c42b87b396e246ea3f56fa4bfc9678/baselines/common/plot_util.py#L111-L147 | valid |
openai/baselines | baselines/common/plot_util.py | load_results | def load_results(root_dir_or_dirs, enable_progress=True, enable_monitor=True, verbose=False):
'''
load summaries of runs from a list of directories (including subdirectories)
Arguments:
enable_progress: bool - if True, will attempt to load data from progress.csv files (data saved by logger). Default: True
enable_monitor: bool - if True, will attempt to load data from monitor.csv files (data saved by Monitor environment wrapper). Default: True
verbose: bool - if True, will print out list of directories from which the data is loaded. Default: False
Returns:
List of Result objects with the following fields:
- dirname - path to the directory data was loaded from
- metadata - run metadata (such as command-line arguments and anything else in metadata.json file
- monitor - if enable_monitor is True, this field contains pandas dataframe with loaded monitor.csv file (or aggregate of all *.monitor.csv files in the directory)
- progress - if enable_progress is True, this field contains pandas dataframe with loaded progress.csv file
'''
import re
if isinstance(root_dir_or_dirs, str):
rootdirs = [osp.expanduser(root_dir_or_dirs)]
else:
rootdirs = [osp.expanduser(d) for d in root_dir_or_dirs]
allresults = []
for rootdir in rootdirs:
assert osp.exists(rootdir), "%s doesn't exist"%rootdir
for dirname, dirs, files in os.walk(rootdir):
if '-proc' in dirname:
files[:] = []
continue
monitor_re = re.compile(r'(\d+\.)?(\d+\.)?monitor\.csv')
if set(['metadata.json', 'monitor.json', 'progress.json', 'progress.csv']).intersection(files) or \
any([f for f in files if monitor_re.match(f)]): # also match monitor files like 0.1.monitor.csv
# used to be uncommented, which means do not go deeper than current directory if any of the data files
# are found
# dirs[:] = []
result = {'dirname' : dirname}
if "metadata.json" in files:
with open(osp.join(dirname, "metadata.json"), "r") as fh:
result['metadata'] = json.load(fh)
progjson = osp.join(dirname, "progress.json")
progcsv = osp.join(dirname, "progress.csv")
if enable_progress:
if osp.exists(progjson):
result['progress'] = pandas.DataFrame(read_json(progjson))
elif osp.exists(progcsv):
try:
result['progress'] = read_csv(progcsv)
except pandas.errors.EmptyDataError:
print('skipping progress file in ', dirname, 'empty data')
else:
if verbose: print('skipping %s: no progress file'%dirname)
if enable_monitor:
try:
result['monitor'] = pandas.DataFrame(monitor.load_results(dirname))
except monitor.LoadMonitorResultsError:
print('skipping %s: no monitor files'%dirname)
except Exception as e:
print('exception loading monitor file in %s: %s'%(dirname, e))
if result.get('monitor') is not None or result.get('progress') is not None:
allresults.append(Result(**result))
if verbose:
print('successfully loaded %s'%dirname)
if verbose: print('loaded %i results'%len(allresults))
return allresults | python | def load_results(root_dir_or_dirs, enable_progress=True, enable_monitor=True, verbose=False):
'''
load summaries of runs from a list of directories (including subdirectories)
Arguments:
enable_progress: bool - if True, will attempt to load data from progress.csv files (data saved by logger). Default: True
enable_monitor: bool - if True, will attempt to load data from monitor.csv files (data saved by Monitor environment wrapper). Default: True
verbose: bool - if True, will print out list of directories from which the data is loaded. Default: False
Returns:
List of Result objects with the following fields:
- dirname - path to the directory data was loaded from
- metadata - run metadata (such as command-line arguments and anything else in metadata.json file
- monitor - if enable_monitor is True, this field contains pandas dataframe with loaded monitor.csv file (or aggregate of all *.monitor.csv files in the directory)
- progress - if enable_progress is True, this field contains pandas dataframe with loaded progress.csv file
'''
import re
if isinstance(root_dir_or_dirs, str):
rootdirs = [osp.expanduser(root_dir_or_dirs)]
else:
rootdirs = [osp.expanduser(d) for d in root_dir_or_dirs]
allresults = []
for rootdir in rootdirs:
assert osp.exists(rootdir), "%s doesn't exist"%rootdir
for dirname, dirs, files in os.walk(rootdir):
if '-proc' in dirname:
files[:] = []
continue
monitor_re = re.compile(r'(\d+\.)?(\d+\.)?monitor\.csv')
if set(['metadata.json', 'monitor.json', 'progress.json', 'progress.csv']).intersection(files) or \
any([f for f in files if monitor_re.match(f)]): # also match monitor files like 0.1.monitor.csv
# used to be uncommented, which means do not go deeper than current directory if any of the data files
# are found
# dirs[:] = []
result = {'dirname' : dirname}
if "metadata.json" in files:
with open(osp.join(dirname, "metadata.json"), "r") as fh:
result['metadata'] = json.load(fh)
progjson = osp.join(dirname, "progress.json")
progcsv = osp.join(dirname, "progress.csv")
if enable_progress:
if osp.exists(progjson):
result['progress'] = pandas.DataFrame(read_json(progjson))
elif osp.exists(progcsv):
try:
result['progress'] = read_csv(progcsv)
except pandas.errors.EmptyDataError:
print('skipping progress file in ', dirname, 'empty data')
else:
if verbose: print('skipping %s: no progress file'%dirname)
if enable_monitor:
try:
result['monitor'] = pandas.DataFrame(monitor.load_results(dirname))
except monitor.LoadMonitorResultsError:
print('skipping %s: no monitor files'%dirname)
except Exception as e:
print('exception loading monitor file in %s: %s'%(dirname, e))
if result.get('monitor') is not None or result.get('progress') is not None:
allresults.append(Result(**result))
if verbose:
print('successfully loaded %s'%dirname)
if verbose: print('loaded %i results'%len(allresults))
return allresults | [
"def",
"load_results",
"(",
"root_dir_or_dirs",
",",
"enable_progress",
"=",
"True",
",",
"enable_monitor",
"=",
"True",
",",
"verbose",
"=",
"False",
")",
":",
"import",
"re",
"if",
"isinstance",
"(",
"root_dir_or_dirs",
",",
"str",
")",
":",
"rootdirs",
"=",
"[",
"osp",
".",
"expanduser",
"(",
"root_dir_or_dirs",
")",
"]",
"else",
":",
"rootdirs",
"=",
"[",
"osp",
".",
"expanduser",
"(",
"d",
")",
"for",
"d",
"in",
"root_dir_or_dirs",
"]",
"allresults",
"=",
"[",
"]",
"for",
"rootdir",
"in",
"rootdirs",
":",
"assert",
"osp",
".",
"exists",
"(",
"rootdir",
")",
",",
"\"%s doesn't exist\"",
"%",
"rootdir",
"for",
"dirname",
",",
"dirs",
",",
"files",
"in",
"os",
".",
"walk",
"(",
"rootdir",
")",
":",
"if",
"'-proc'",
"in",
"dirname",
":",
"files",
"[",
":",
"]",
"=",
"[",
"]",
"continue",
"monitor_re",
"=",
"re",
".",
"compile",
"(",
"r'(\\d+\\.)?(\\d+\\.)?monitor\\.csv'",
")",
"if",
"set",
"(",
"[",
"'metadata.json'",
",",
"'monitor.json'",
",",
"'progress.json'",
",",
"'progress.csv'",
"]",
")",
".",
"intersection",
"(",
"files",
")",
"or",
"any",
"(",
"[",
"f",
"for",
"f",
"in",
"files",
"if",
"monitor_re",
".",
"match",
"(",
"f",
")",
"]",
")",
":",
"# also match monitor files like 0.1.monitor.csv",
"# used to be uncommented, which means do not go deeper than current directory if any of the data files",
"# are found",
"# dirs[:] = []",
"result",
"=",
"{",
"'dirname'",
":",
"dirname",
"}",
"if",
"\"metadata.json\"",
"in",
"files",
":",
"with",
"open",
"(",
"osp",
".",
"join",
"(",
"dirname",
",",
"\"metadata.json\"",
")",
",",
"\"r\"",
")",
"as",
"fh",
":",
"result",
"[",
"'metadata'",
"]",
"=",
"json",
".",
"load",
"(",
"fh",
")",
"progjson",
"=",
"osp",
".",
"join",
"(",
"dirname",
",",
"\"progress.json\"",
")",
"progcsv",
"=",
"osp",
".",
"join",
"(",
"dirname",
",",
"\"progress.csv\"",
")",
"if",
"enable_progress",
":",
"if",
"osp",
".",
"exists",
"(",
"progjson",
")",
":",
"result",
"[",
"'progress'",
"]",
"=",
"pandas",
".",
"DataFrame",
"(",
"read_json",
"(",
"progjson",
")",
")",
"elif",
"osp",
".",
"exists",
"(",
"progcsv",
")",
":",
"try",
":",
"result",
"[",
"'progress'",
"]",
"=",
"read_csv",
"(",
"progcsv",
")",
"except",
"pandas",
".",
"errors",
".",
"EmptyDataError",
":",
"print",
"(",
"'skipping progress file in '",
",",
"dirname",
",",
"'empty data'",
")",
"else",
":",
"if",
"verbose",
":",
"print",
"(",
"'skipping %s: no progress file'",
"%",
"dirname",
")",
"if",
"enable_monitor",
":",
"try",
":",
"result",
"[",
"'monitor'",
"]",
"=",
"pandas",
".",
"DataFrame",
"(",
"monitor",
".",
"load_results",
"(",
"dirname",
")",
")",
"except",
"monitor",
".",
"LoadMonitorResultsError",
":",
"print",
"(",
"'skipping %s: no monitor files'",
"%",
"dirname",
")",
"except",
"Exception",
"as",
"e",
":",
"print",
"(",
"'exception loading monitor file in %s: %s'",
"%",
"(",
"dirname",
",",
"e",
")",
")",
"if",
"result",
".",
"get",
"(",
"'monitor'",
")",
"is",
"not",
"None",
"or",
"result",
".",
"get",
"(",
"'progress'",
")",
"is",
"not",
"None",
":",
"allresults",
".",
"append",
"(",
"Result",
"(",
"*",
"*",
"result",
")",
")",
"if",
"verbose",
":",
"print",
"(",
"'successfully loaded %s'",
"%",
"dirname",
")",
"if",
"verbose",
":",
"print",
"(",
"'loaded %i results'",
"%",
"len",
"(",
"allresults",
")",
")",
"return",
"allresults"
] | load summaries of runs from a list of directories (including subdirectories)
Arguments:
enable_progress: bool - if True, will attempt to load data from progress.csv files (data saved by logger). Default: True
enable_monitor: bool - if True, will attempt to load data from monitor.csv files (data saved by Monitor environment wrapper). Default: True
verbose: bool - if True, will print out list of directories from which the data is loaded. Default: False
Returns:
List of Result objects with the following fields:
- dirname - path to the directory data was loaded from
- metadata - run metadata (such as command-line arguments and anything else in metadata.json file
- monitor - if enable_monitor is True, this field contains pandas dataframe with loaded monitor.csv file (or aggregate of all *.monitor.csv files in the directory)
- progress - if enable_progress is True, this field contains pandas dataframe with loaded progress.csv file | [
"load",
"summaries",
"of",
"runs",
"from",
"a",
"list",
"of",
"directories",
"(",
"including",
"subdirectories",
")",
"Arguments",
":"
] | 3301089b48c42b87b396e246ea3f56fa4bfc9678 | https://github.com/openai/baselines/blob/3301089b48c42b87b396e246ea3f56fa4bfc9678/baselines/common/plot_util.py#L152-L220 | valid |
openai/baselines | baselines/common/plot_util.py | plot_results | def plot_results(
allresults, *,
xy_fn=default_xy_fn,
split_fn=default_split_fn,
group_fn=default_split_fn,
average_group=False,
shaded_std=True,
shaded_err=True,
figsize=None,
legend_outside=False,
resample=0,
smooth_step=1.0
):
'''
Plot multiple Results objects
xy_fn: function Result -> x,y - function that converts results objects into tuple of x and y values.
By default, x is cumsum of episode lengths, and y is episode rewards
split_fn: function Result -> hashable - function that converts results objects into keys to split curves into sub-panels by.
That is, the results r for which split_fn(r) is different will be put on different sub-panels.
By default, the portion of r.dirname between last / and -<digits> is returned. The sub-panels are
stacked vertically in the figure.
group_fn: function Result -> hashable - function that converts results objects into keys to group curves by.
That is, the results r for which group_fn(r) is the same will be put into the same group.
Curves in the same group have the same color (if average_group is False), or averaged over
(if average_group is True). The default value is the same as default value for split_fn
average_group: bool - if True, will average the curves in the same group and plot the mean. Enables resampling
(if resample = 0, will use 512 steps)
shaded_std: bool - if True (default), the shaded region corresponding to standard deviation of the group of curves will be
shown (only applicable if average_group = True)
shaded_err: bool - if True (default), the shaded region corresponding to error in mean estimate of the group of curves
(that is, standard deviation divided by square root of number of curves) will be
shown (only applicable if average_group = True)
figsize: tuple or None - size of the resulting figure (including sub-panels). By default, width is 6 and height is 6 times number of
sub-panels.
legend_outside: bool - if True, will place the legend outside of the sub-panels.
resample: int - if not zero, size of the uniform grid in x direction to resample onto. Resampling is performed via symmetric
EMA smoothing (see the docstring for symmetric_ema).
Default is zero (no resampling). Note that if average_group is True, resampling is necessary; in that case, default
value is 512.
smooth_step: float - when resampling (i.e. when resample > 0 or average_group is True), use this EMA decay parameter (in units of the new grid step).
See docstrings for decay_steps in symmetric_ema or one_sided_ema functions.
'''
if split_fn is None: split_fn = lambda _ : ''
if group_fn is None: group_fn = lambda _ : ''
sk2r = defaultdict(list) # splitkey2results
for result in allresults:
splitkey = split_fn(result)
sk2r[splitkey].append(result)
assert len(sk2r) > 0
assert isinstance(resample, int), "0: don't resample. <integer>: that many samples"
nrows = len(sk2r)
ncols = 1
figsize = figsize or (6, 6 * nrows)
f, axarr = plt.subplots(nrows, ncols, sharex=False, squeeze=False, figsize=figsize)
groups = list(set(group_fn(result) for result in allresults))
default_samples = 512
if average_group:
resample = resample or default_samples
for (isplit, sk) in enumerate(sorted(sk2r.keys())):
g2l = {}
g2c = defaultdict(int)
sresults = sk2r[sk]
gresults = defaultdict(list)
ax = axarr[isplit][0]
for result in sresults:
group = group_fn(result)
g2c[group] += 1
x, y = xy_fn(result)
if x is None: x = np.arange(len(y))
x, y = map(np.asarray, (x, y))
if average_group:
gresults[group].append((x,y))
else:
if resample:
x, y, counts = symmetric_ema(x, y, x[0], x[-1], resample, decay_steps=smooth_step)
l, = ax.plot(x, y, color=COLORS[groups.index(group) % len(COLORS)])
g2l[group] = l
if average_group:
for group in sorted(groups):
xys = gresults[group]
if not any(xys):
continue
color = COLORS[groups.index(group) % len(COLORS)]
origxs = [xy[0] for xy in xys]
minxlen = min(map(len, origxs))
def allequal(qs):
return all((q==qs[0]).all() for q in qs[1:])
if resample:
low = max(x[0] for x in origxs)
high = min(x[-1] for x in origxs)
usex = np.linspace(low, high, resample)
ys = []
for (x, y) in xys:
ys.append(symmetric_ema(x, y, low, high, resample, decay_steps=smooth_step)[1])
else:
assert allequal([x[:minxlen] for x in origxs]),\
'If you want to average unevenly sampled data, set resample=<number of samples you want>'
usex = origxs[0]
ys = [xy[1][:minxlen] for xy in xys]
ymean = np.mean(ys, axis=0)
ystd = np.std(ys, axis=0)
ystderr = ystd / np.sqrt(len(ys))
l, = axarr[isplit][0].plot(usex, ymean, color=color)
g2l[group] = l
if shaded_err:
ax.fill_between(usex, ymean - ystderr, ymean + ystderr, color=color, alpha=.4)
if shaded_std:
ax.fill_between(usex, ymean - ystd, ymean + ystd, color=color, alpha=.2)
# https://matplotlib.org/users/legend_guide.html
plt.tight_layout()
if any(g2l.keys()):
ax.legend(
g2l.values(),
['%s (%i)'%(g, g2c[g]) for g in g2l] if average_group else g2l.keys(),
loc=2 if legend_outside else None,
bbox_to_anchor=(1,1) if legend_outside else None)
ax.set_title(sk)
return f, axarr | python | def plot_results(
allresults, *,
xy_fn=default_xy_fn,
split_fn=default_split_fn,
group_fn=default_split_fn,
average_group=False,
shaded_std=True,
shaded_err=True,
figsize=None,
legend_outside=False,
resample=0,
smooth_step=1.0
):
'''
Plot multiple Results objects
xy_fn: function Result -> x,y - function that converts results objects into tuple of x and y values.
By default, x is cumsum of episode lengths, and y is episode rewards
split_fn: function Result -> hashable - function that converts results objects into keys to split curves into sub-panels by.
That is, the results r for which split_fn(r) is different will be put on different sub-panels.
By default, the portion of r.dirname between last / and -<digits> is returned. The sub-panels are
stacked vertically in the figure.
group_fn: function Result -> hashable - function that converts results objects into keys to group curves by.
That is, the results r for which group_fn(r) is the same will be put into the same group.
Curves in the same group have the same color (if average_group is False), or averaged over
(if average_group is True). The default value is the same as default value for split_fn
average_group: bool - if True, will average the curves in the same group and plot the mean. Enables resampling
(if resample = 0, will use 512 steps)
shaded_std: bool - if True (default), the shaded region corresponding to standard deviation of the group of curves will be
shown (only applicable if average_group = True)
shaded_err: bool - if True (default), the shaded region corresponding to error in mean estimate of the group of curves
(that is, standard deviation divided by square root of number of curves) will be
shown (only applicable if average_group = True)
figsize: tuple or None - size of the resulting figure (including sub-panels). By default, width is 6 and height is 6 times number of
sub-panels.
legend_outside: bool - if True, will place the legend outside of the sub-panels.
resample: int - if not zero, size of the uniform grid in x direction to resample onto. Resampling is performed via symmetric
EMA smoothing (see the docstring for symmetric_ema).
Default is zero (no resampling). Note that if average_group is True, resampling is necessary; in that case, default
value is 512.
smooth_step: float - when resampling (i.e. when resample > 0 or average_group is True), use this EMA decay parameter (in units of the new grid step).
See docstrings for decay_steps in symmetric_ema or one_sided_ema functions.
'''
if split_fn is None: split_fn = lambda _ : ''
if group_fn is None: group_fn = lambda _ : ''
sk2r = defaultdict(list) # splitkey2results
for result in allresults:
splitkey = split_fn(result)
sk2r[splitkey].append(result)
assert len(sk2r) > 0
assert isinstance(resample, int), "0: don't resample. <integer>: that many samples"
nrows = len(sk2r)
ncols = 1
figsize = figsize or (6, 6 * nrows)
f, axarr = plt.subplots(nrows, ncols, sharex=False, squeeze=False, figsize=figsize)
groups = list(set(group_fn(result) for result in allresults))
default_samples = 512
if average_group:
resample = resample or default_samples
for (isplit, sk) in enumerate(sorted(sk2r.keys())):
g2l = {}
g2c = defaultdict(int)
sresults = sk2r[sk]
gresults = defaultdict(list)
ax = axarr[isplit][0]
for result in sresults:
group = group_fn(result)
g2c[group] += 1
x, y = xy_fn(result)
if x is None: x = np.arange(len(y))
x, y = map(np.asarray, (x, y))
if average_group:
gresults[group].append((x,y))
else:
if resample:
x, y, counts = symmetric_ema(x, y, x[0], x[-1], resample, decay_steps=smooth_step)
l, = ax.plot(x, y, color=COLORS[groups.index(group) % len(COLORS)])
g2l[group] = l
if average_group:
for group in sorted(groups):
xys = gresults[group]
if not any(xys):
continue
color = COLORS[groups.index(group) % len(COLORS)]
origxs = [xy[0] for xy in xys]
minxlen = min(map(len, origxs))
def allequal(qs):
return all((q==qs[0]).all() for q in qs[1:])
if resample:
low = max(x[0] for x in origxs)
high = min(x[-1] for x in origxs)
usex = np.linspace(low, high, resample)
ys = []
for (x, y) in xys:
ys.append(symmetric_ema(x, y, low, high, resample, decay_steps=smooth_step)[1])
else:
assert allequal([x[:minxlen] for x in origxs]),\
'If you want to average unevenly sampled data, set resample=<number of samples you want>'
usex = origxs[0]
ys = [xy[1][:minxlen] for xy in xys]
ymean = np.mean(ys, axis=0)
ystd = np.std(ys, axis=0)
ystderr = ystd / np.sqrt(len(ys))
l, = axarr[isplit][0].plot(usex, ymean, color=color)
g2l[group] = l
if shaded_err:
ax.fill_between(usex, ymean - ystderr, ymean + ystderr, color=color, alpha=.4)
if shaded_std:
ax.fill_between(usex, ymean - ystd, ymean + ystd, color=color, alpha=.2)
# https://matplotlib.org/users/legend_guide.html
plt.tight_layout()
if any(g2l.keys()):
ax.legend(
g2l.values(),
['%s (%i)'%(g, g2c[g]) for g in g2l] if average_group else g2l.keys(),
loc=2 if legend_outside else None,
bbox_to_anchor=(1,1) if legend_outside else None)
ax.set_title(sk)
return f, axarr | [
"def",
"plot_results",
"(",
"allresults",
",",
"*",
",",
"xy_fn",
"=",
"default_xy_fn",
",",
"split_fn",
"=",
"default_split_fn",
",",
"group_fn",
"=",
"default_split_fn",
",",
"average_group",
"=",
"False",
",",
"shaded_std",
"=",
"True",
",",
"shaded_err",
"=",
"True",
",",
"figsize",
"=",
"None",
",",
"legend_outside",
"=",
"False",
",",
"resample",
"=",
"0",
",",
"smooth_step",
"=",
"1.0",
")",
":",
"if",
"split_fn",
"is",
"None",
":",
"split_fn",
"=",
"lambda",
"_",
":",
"''",
"if",
"group_fn",
"is",
"None",
":",
"group_fn",
"=",
"lambda",
"_",
":",
"''",
"sk2r",
"=",
"defaultdict",
"(",
"list",
")",
"# splitkey2results",
"for",
"result",
"in",
"allresults",
":",
"splitkey",
"=",
"split_fn",
"(",
"result",
")",
"sk2r",
"[",
"splitkey",
"]",
".",
"append",
"(",
"result",
")",
"assert",
"len",
"(",
"sk2r",
")",
">",
"0",
"assert",
"isinstance",
"(",
"resample",
",",
"int",
")",
",",
"\"0: don't resample. <integer>: that many samples\"",
"nrows",
"=",
"len",
"(",
"sk2r",
")",
"ncols",
"=",
"1",
"figsize",
"=",
"figsize",
"or",
"(",
"6",
",",
"6",
"*",
"nrows",
")",
"f",
",",
"axarr",
"=",
"plt",
".",
"subplots",
"(",
"nrows",
",",
"ncols",
",",
"sharex",
"=",
"False",
",",
"squeeze",
"=",
"False",
",",
"figsize",
"=",
"figsize",
")",
"groups",
"=",
"list",
"(",
"set",
"(",
"group_fn",
"(",
"result",
")",
"for",
"result",
"in",
"allresults",
")",
")",
"default_samples",
"=",
"512",
"if",
"average_group",
":",
"resample",
"=",
"resample",
"or",
"default_samples",
"for",
"(",
"isplit",
",",
"sk",
")",
"in",
"enumerate",
"(",
"sorted",
"(",
"sk2r",
".",
"keys",
"(",
")",
")",
")",
":",
"g2l",
"=",
"{",
"}",
"g2c",
"=",
"defaultdict",
"(",
"int",
")",
"sresults",
"=",
"sk2r",
"[",
"sk",
"]",
"gresults",
"=",
"defaultdict",
"(",
"list",
")",
"ax",
"=",
"axarr",
"[",
"isplit",
"]",
"[",
"0",
"]",
"for",
"result",
"in",
"sresults",
":",
"group",
"=",
"group_fn",
"(",
"result",
")",
"g2c",
"[",
"group",
"]",
"+=",
"1",
"x",
",",
"y",
"=",
"xy_fn",
"(",
"result",
")",
"if",
"x",
"is",
"None",
":",
"x",
"=",
"np",
".",
"arange",
"(",
"len",
"(",
"y",
")",
")",
"x",
",",
"y",
"=",
"map",
"(",
"np",
".",
"asarray",
",",
"(",
"x",
",",
"y",
")",
")",
"if",
"average_group",
":",
"gresults",
"[",
"group",
"]",
".",
"append",
"(",
"(",
"x",
",",
"y",
")",
")",
"else",
":",
"if",
"resample",
":",
"x",
",",
"y",
",",
"counts",
"=",
"symmetric_ema",
"(",
"x",
",",
"y",
",",
"x",
"[",
"0",
"]",
",",
"x",
"[",
"-",
"1",
"]",
",",
"resample",
",",
"decay_steps",
"=",
"smooth_step",
")",
"l",
",",
"=",
"ax",
".",
"plot",
"(",
"x",
",",
"y",
",",
"color",
"=",
"COLORS",
"[",
"groups",
".",
"index",
"(",
"group",
")",
"%",
"len",
"(",
"COLORS",
")",
"]",
")",
"g2l",
"[",
"group",
"]",
"=",
"l",
"if",
"average_group",
":",
"for",
"group",
"in",
"sorted",
"(",
"groups",
")",
":",
"xys",
"=",
"gresults",
"[",
"group",
"]",
"if",
"not",
"any",
"(",
"xys",
")",
":",
"continue",
"color",
"=",
"COLORS",
"[",
"groups",
".",
"index",
"(",
"group",
")",
"%",
"len",
"(",
"COLORS",
")",
"]",
"origxs",
"=",
"[",
"xy",
"[",
"0",
"]",
"for",
"xy",
"in",
"xys",
"]",
"minxlen",
"=",
"min",
"(",
"map",
"(",
"len",
",",
"origxs",
")",
")",
"def",
"allequal",
"(",
"qs",
")",
":",
"return",
"all",
"(",
"(",
"q",
"==",
"qs",
"[",
"0",
"]",
")",
".",
"all",
"(",
")",
"for",
"q",
"in",
"qs",
"[",
"1",
":",
"]",
")",
"if",
"resample",
":",
"low",
"=",
"max",
"(",
"x",
"[",
"0",
"]",
"for",
"x",
"in",
"origxs",
")",
"high",
"=",
"min",
"(",
"x",
"[",
"-",
"1",
"]",
"for",
"x",
"in",
"origxs",
")",
"usex",
"=",
"np",
".",
"linspace",
"(",
"low",
",",
"high",
",",
"resample",
")",
"ys",
"=",
"[",
"]",
"for",
"(",
"x",
",",
"y",
")",
"in",
"xys",
":",
"ys",
".",
"append",
"(",
"symmetric_ema",
"(",
"x",
",",
"y",
",",
"low",
",",
"high",
",",
"resample",
",",
"decay_steps",
"=",
"smooth_step",
")",
"[",
"1",
"]",
")",
"else",
":",
"assert",
"allequal",
"(",
"[",
"x",
"[",
":",
"minxlen",
"]",
"for",
"x",
"in",
"origxs",
"]",
")",
",",
"'If you want to average unevenly sampled data, set resample=<number of samples you want>'",
"usex",
"=",
"origxs",
"[",
"0",
"]",
"ys",
"=",
"[",
"xy",
"[",
"1",
"]",
"[",
":",
"minxlen",
"]",
"for",
"xy",
"in",
"xys",
"]",
"ymean",
"=",
"np",
".",
"mean",
"(",
"ys",
",",
"axis",
"=",
"0",
")",
"ystd",
"=",
"np",
".",
"std",
"(",
"ys",
",",
"axis",
"=",
"0",
")",
"ystderr",
"=",
"ystd",
"/",
"np",
".",
"sqrt",
"(",
"len",
"(",
"ys",
")",
")",
"l",
",",
"=",
"axarr",
"[",
"isplit",
"]",
"[",
"0",
"]",
".",
"plot",
"(",
"usex",
",",
"ymean",
",",
"color",
"=",
"color",
")",
"g2l",
"[",
"group",
"]",
"=",
"l",
"if",
"shaded_err",
":",
"ax",
".",
"fill_between",
"(",
"usex",
",",
"ymean",
"-",
"ystderr",
",",
"ymean",
"+",
"ystderr",
",",
"color",
"=",
"color",
",",
"alpha",
"=",
".4",
")",
"if",
"shaded_std",
":",
"ax",
".",
"fill_between",
"(",
"usex",
",",
"ymean",
"-",
"ystd",
",",
"ymean",
"+",
"ystd",
",",
"color",
"=",
"color",
",",
"alpha",
"=",
".2",
")",
"# https://matplotlib.org/users/legend_guide.html",
"plt",
".",
"tight_layout",
"(",
")",
"if",
"any",
"(",
"g2l",
".",
"keys",
"(",
")",
")",
":",
"ax",
".",
"legend",
"(",
"g2l",
".",
"values",
"(",
")",
",",
"[",
"'%s (%i)'",
"%",
"(",
"g",
",",
"g2c",
"[",
"g",
"]",
")",
"for",
"g",
"in",
"g2l",
"]",
"if",
"average_group",
"else",
"g2l",
".",
"keys",
"(",
")",
",",
"loc",
"=",
"2",
"if",
"legend_outside",
"else",
"None",
",",
"bbox_to_anchor",
"=",
"(",
"1",
",",
"1",
")",
"if",
"legend_outside",
"else",
"None",
")",
"ax",
".",
"set_title",
"(",
"sk",
")",
"return",
"f",
",",
"axarr"
] | Plot multiple Results objects
xy_fn: function Result -> x,y - function that converts results objects into tuple of x and y values.
By default, x is cumsum of episode lengths, and y is episode rewards
split_fn: function Result -> hashable - function that converts results objects into keys to split curves into sub-panels by.
That is, the results r for which split_fn(r) is different will be put on different sub-panels.
By default, the portion of r.dirname between last / and -<digits> is returned. The sub-panels are
stacked vertically in the figure.
group_fn: function Result -> hashable - function that converts results objects into keys to group curves by.
That is, the results r for which group_fn(r) is the same will be put into the same group.
Curves in the same group have the same color (if average_group is False), or averaged over
(if average_group is True). The default value is the same as default value for split_fn
average_group: bool - if True, will average the curves in the same group and plot the mean. Enables resampling
(if resample = 0, will use 512 steps)
shaded_std: bool - if True (default), the shaded region corresponding to standard deviation of the group of curves will be
shown (only applicable if average_group = True)
shaded_err: bool - if True (default), the shaded region corresponding to error in mean estimate of the group of curves
(that is, standard deviation divided by square root of number of curves) will be
shown (only applicable if average_group = True)
figsize: tuple or None - size of the resulting figure (including sub-panels). By default, width is 6 and height is 6 times number of
sub-panels.
legend_outside: bool - if True, will place the legend outside of the sub-panels.
resample: int - if not zero, size of the uniform grid in x direction to resample onto. Resampling is performed via symmetric
EMA smoothing (see the docstring for symmetric_ema).
Default is zero (no resampling). Note that if average_group is True, resampling is necessary; in that case, default
value is 512.
smooth_step: float - when resampling (i.e. when resample > 0 or average_group is True), use this EMA decay parameter (in units of the new grid step).
See docstrings for decay_steps in symmetric_ema or one_sided_ema functions. | [
"Plot",
"multiple",
"Results",
"objects"
] | 3301089b48c42b87b396e246ea3f56fa4bfc9678 | https://github.com/openai/baselines/blob/3301089b48c42b87b396e246ea3f56fa4bfc9678/baselines/common/plot_util.py#L240-L375 | valid |
openai/baselines | baselines/common/mpi_adam_optimizer.py | check_synced | def check_synced(localval, comm=None):
"""
It's common to forget to initialize your variables to the same values, or
(less commonly) if you update them in some other way than adam, to get them out of sync.
This function checks that variables on all MPI workers are the same, and raises
an AssertionError otherwise
Arguments:
comm: MPI communicator
localval: list of local variables (list of variables on current worker to be compared with the other workers)
"""
comm = comm or MPI.COMM_WORLD
vals = comm.gather(localval)
if comm.rank == 0:
assert all(val==vals[0] for val in vals[1:]) | python | def check_synced(localval, comm=None):
"""
It's common to forget to initialize your variables to the same values, or
(less commonly) if you update them in some other way than adam, to get them out of sync.
This function checks that variables on all MPI workers are the same, and raises
an AssertionError otherwise
Arguments:
comm: MPI communicator
localval: list of local variables (list of variables on current worker to be compared with the other workers)
"""
comm = comm or MPI.COMM_WORLD
vals = comm.gather(localval)
if comm.rank == 0:
assert all(val==vals[0] for val in vals[1:]) | [
"def",
"check_synced",
"(",
"localval",
",",
"comm",
"=",
"None",
")",
":",
"comm",
"=",
"comm",
"or",
"MPI",
".",
"COMM_WORLD",
"vals",
"=",
"comm",
".",
"gather",
"(",
"localval",
")",
"if",
"comm",
".",
"rank",
"==",
"0",
":",
"assert",
"all",
"(",
"val",
"==",
"vals",
"[",
"0",
"]",
"for",
"val",
"in",
"vals",
"[",
"1",
":",
"]",
")"
] | It's common to forget to initialize your variables to the same values, or
(less commonly) if you update them in some other way than adam, to get them out of sync.
This function checks that variables on all MPI workers are the same, and raises
an AssertionError otherwise
Arguments:
comm: MPI communicator
localval: list of local variables (list of variables on current worker to be compared with the other workers) | [
"It",
"s",
"common",
"to",
"forget",
"to",
"initialize",
"your",
"variables",
"to",
"the",
"same",
"values",
"or",
"(",
"less",
"commonly",
")",
"if",
"you",
"update",
"them",
"in",
"some",
"other",
"way",
"than",
"adam",
"to",
"get",
"them",
"out",
"of",
"sync",
".",
"This",
"function",
"checks",
"that",
"variables",
"on",
"all",
"MPI",
"workers",
"are",
"the",
"same",
"and",
"raises",
"an",
"AssertionError",
"otherwise"
] | 3301089b48c42b87b396e246ea3f56fa4bfc9678 | https://github.com/openai/baselines/blob/3301089b48c42b87b396e246ea3f56fa4bfc9678/baselines/common/mpi_adam_optimizer.py#L40-L54 | valid |
openai/baselines | baselines/common/vec_env/util.py | copy_obs_dict | def copy_obs_dict(obs):
"""
Deep-copy an observation dict.
"""
return {k: np.copy(v) for k, v in obs.items()} | python | def copy_obs_dict(obs):
"""
Deep-copy an observation dict.
"""
return {k: np.copy(v) for k, v in obs.items()} | [
"def",
"copy_obs_dict",
"(",
"obs",
")",
":",
"return",
"{",
"k",
":",
"np",
".",
"copy",
"(",
"v",
")",
"for",
"k",
",",
"v",
"in",
"obs",
".",
"items",
"(",
")",
"}"
] | Deep-copy an observation dict. | [
"Deep",
"-",
"copy",
"an",
"observation",
"dict",
"."
] | 3301089b48c42b87b396e246ea3f56fa4bfc9678 | https://github.com/openai/baselines/blob/3301089b48c42b87b396e246ea3f56fa4bfc9678/baselines/common/vec_env/util.py#L11-L15 | valid |
End of preview. Expand
in Dataset Viewer.
No dataset card yet
- Downloads last month
- 9