repo
stringclasses 679
values | path
stringlengths 6
122
| func_name
stringlengths 2
76
| original_string
stringlengths 87
70.9k
| language
stringclasses 1
value | code
stringlengths 87
70.9k
| code_tokens
sequencelengths 20
6.91k
| docstring
stringlengths 1
21.7k
| docstring_tokens
sequencelengths 1
1.6k
| sha
stringclasses 679
values | url
stringlengths 92
213
| partition
stringclasses 1
value |
---|---|---|---|---|---|---|---|---|---|---|---|
openai/baselines | baselines/deepq/deepq.py | learn | def learn(env,
network,
seed=None,
lr=5e-4,
total_timesteps=100000,
buffer_size=50000,
exploration_fraction=0.1,
exploration_final_eps=0.02,
train_freq=1,
batch_size=32,
print_freq=100,
checkpoint_freq=10000,
checkpoint_path=None,
learning_starts=1000,
gamma=1.0,
target_network_update_freq=500,
prioritized_replay=False,
prioritized_replay_alpha=0.6,
prioritized_replay_beta0=0.4,
prioritized_replay_beta_iters=None,
prioritized_replay_eps=1e-6,
param_noise=False,
callback=None,
load_path=None,
**network_kwargs
):
"""Train a deepq model.
Parameters
-------
env: gym.Env
environment to train on
network: string or a function
neural network to use as a q function approximator. If string, has to be one of the names of registered models in baselines.common.models
(mlp, cnn, conv_only). If a function, should take an observation tensor and return a latent variable tensor, which
will be mapped to the Q function heads (see build_q_func in baselines.deepq.models for details on that)
seed: int or None
prng seed. The runs with the same seed "should" give the same results. If None, no seeding is used.
lr: float
learning rate for adam optimizer
total_timesteps: int
number of env steps to optimizer for
buffer_size: int
size of the replay buffer
exploration_fraction: float
fraction of entire training period over which the exploration rate is annealed
exploration_final_eps: float
final value of random action probability
train_freq: int
update the model every `train_freq` steps.
set to None to disable printing
batch_size: int
size of a batched sampled from replay buffer for training
print_freq: int
how often to print out training progress
set to None to disable printing
checkpoint_freq: int
how often to save the model. This is so that the best version is restored
at the end of the training. If you do not wish to restore the best version at
the end of the training set this variable to None.
learning_starts: int
how many steps of the model to collect transitions for before learning starts
gamma: float
discount factor
target_network_update_freq: int
update the target network every `target_network_update_freq` steps.
prioritized_replay: True
if True prioritized replay buffer will be used.
prioritized_replay_alpha: float
alpha parameter for prioritized replay buffer
prioritized_replay_beta0: float
initial value of beta for prioritized replay buffer
prioritized_replay_beta_iters: int
number of iterations over which beta will be annealed from initial value
to 1.0. If set to None equals to total_timesteps.
prioritized_replay_eps: float
epsilon to add to the TD errors when updating priorities.
param_noise: bool
whether or not to use parameter space noise (https://arxiv.org/abs/1706.01905)
callback: (locals, globals) -> None
function called at every steps with state of the algorithm.
If callback returns true training stops.
load_path: str
path to load the model from. (default: None)
**network_kwargs
additional keyword arguments to pass to the network builder.
Returns
-------
act: ActWrapper
Wrapper over act function. Adds ability to save it and load it.
See header of baselines/deepq/categorical.py for details on the act function.
"""
# Create all the functions necessary to train the model
sess = get_session()
set_global_seeds(seed)
q_func = build_q_func(network, **network_kwargs)
# capture the shape outside the closure so that the env object is not serialized
# by cloudpickle when serializing make_obs_ph
observation_space = env.observation_space
def make_obs_ph(name):
return ObservationInput(observation_space, name=name)
act, train, update_target, debug = deepq.build_train(
make_obs_ph=make_obs_ph,
q_func=q_func,
num_actions=env.action_space.n,
optimizer=tf.train.AdamOptimizer(learning_rate=lr),
gamma=gamma,
grad_norm_clipping=10,
param_noise=param_noise
)
act_params = {
'make_obs_ph': make_obs_ph,
'q_func': q_func,
'num_actions': env.action_space.n,
}
act = ActWrapper(act, act_params)
# Create the replay buffer
if prioritized_replay:
replay_buffer = PrioritizedReplayBuffer(buffer_size, alpha=prioritized_replay_alpha)
if prioritized_replay_beta_iters is None:
prioritized_replay_beta_iters = total_timesteps
beta_schedule = LinearSchedule(prioritized_replay_beta_iters,
initial_p=prioritized_replay_beta0,
final_p=1.0)
else:
replay_buffer = ReplayBuffer(buffer_size)
beta_schedule = None
# Create the schedule for exploration starting from 1.
exploration = LinearSchedule(schedule_timesteps=int(exploration_fraction * total_timesteps),
initial_p=1.0,
final_p=exploration_final_eps)
# Initialize the parameters and copy them to the target network.
U.initialize()
update_target()
episode_rewards = [0.0]
saved_mean_reward = None
obs = env.reset()
reset = True
with tempfile.TemporaryDirectory() as td:
td = checkpoint_path or td
model_file = os.path.join(td, "model")
model_saved = False
if tf.train.latest_checkpoint(td) is not None:
load_variables(model_file)
logger.log('Loaded model from {}'.format(model_file))
model_saved = True
elif load_path is not None:
load_variables(load_path)
logger.log('Loaded model from {}'.format(load_path))
for t in range(total_timesteps):
if callback is not None:
if callback(locals(), globals()):
break
# Take action and update exploration to the newest value
kwargs = {}
if not param_noise:
update_eps = exploration.value(t)
update_param_noise_threshold = 0.
else:
update_eps = 0.
# Compute the threshold such that the KL divergence between perturbed and non-perturbed
# policy is comparable to eps-greedy exploration with eps = exploration.value(t).
# See Appendix C.1 in Parameter Space Noise for Exploration, Plappert et al., 2017
# for detailed explanation.
update_param_noise_threshold = -np.log(1. - exploration.value(t) + exploration.value(t) / float(env.action_space.n))
kwargs['reset'] = reset
kwargs['update_param_noise_threshold'] = update_param_noise_threshold
kwargs['update_param_noise_scale'] = True
action = act(np.array(obs)[None], update_eps=update_eps, **kwargs)[0]
env_action = action
reset = False
new_obs, rew, done, _ = env.step(env_action)
# Store transition in the replay buffer.
replay_buffer.add(obs, action, rew, new_obs, float(done))
obs = new_obs
episode_rewards[-1] += rew
if done:
obs = env.reset()
episode_rewards.append(0.0)
reset = True
if t > learning_starts and t % train_freq == 0:
# Minimize the error in Bellman's equation on a batch sampled from replay buffer.
if prioritized_replay:
experience = replay_buffer.sample(batch_size, beta=beta_schedule.value(t))
(obses_t, actions, rewards, obses_tp1, dones, weights, batch_idxes) = experience
else:
obses_t, actions, rewards, obses_tp1, dones = replay_buffer.sample(batch_size)
weights, batch_idxes = np.ones_like(rewards), None
td_errors = train(obses_t, actions, rewards, obses_tp1, dones, weights)
if prioritized_replay:
new_priorities = np.abs(td_errors) + prioritized_replay_eps
replay_buffer.update_priorities(batch_idxes, new_priorities)
if t > learning_starts and t % target_network_update_freq == 0:
# Update target network periodically.
update_target()
mean_100ep_reward = round(np.mean(episode_rewards[-101:-1]), 1)
num_episodes = len(episode_rewards)
if done and print_freq is not None and len(episode_rewards) % print_freq == 0:
logger.record_tabular("steps", t)
logger.record_tabular("episodes", num_episodes)
logger.record_tabular("mean 100 episode reward", mean_100ep_reward)
logger.record_tabular("% time spent exploring", int(100 * exploration.value(t)))
logger.dump_tabular()
if (checkpoint_freq is not None and t > learning_starts and
num_episodes > 100 and t % checkpoint_freq == 0):
if saved_mean_reward is None or mean_100ep_reward > saved_mean_reward:
if print_freq is not None:
logger.log("Saving model due to mean reward increase: {} -> {}".format(
saved_mean_reward, mean_100ep_reward))
save_variables(model_file)
model_saved = True
saved_mean_reward = mean_100ep_reward
if model_saved:
if print_freq is not None:
logger.log("Restored model with mean reward: {}".format(saved_mean_reward))
load_variables(model_file)
return act | python | def learn(env,
network,
seed=None,
lr=5e-4,
total_timesteps=100000,
buffer_size=50000,
exploration_fraction=0.1,
exploration_final_eps=0.02,
train_freq=1,
batch_size=32,
print_freq=100,
checkpoint_freq=10000,
checkpoint_path=None,
learning_starts=1000,
gamma=1.0,
target_network_update_freq=500,
prioritized_replay=False,
prioritized_replay_alpha=0.6,
prioritized_replay_beta0=0.4,
prioritized_replay_beta_iters=None,
prioritized_replay_eps=1e-6,
param_noise=False,
callback=None,
load_path=None,
**network_kwargs
):
"""Train a deepq model.
Parameters
-------
env: gym.Env
environment to train on
network: string or a function
neural network to use as a q function approximator. If string, has to be one of the names of registered models in baselines.common.models
(mlp, cnn, conv_only). If a function, should take an observation tensor and return a latent variable tensor, which
will be mapped to the Q function heads (see build_q_func in baselines.deepq.models for details on that)
seed: int or None
prng seed. The runs with the same seed "should" give the same results. If None, no seeding is used.
lr: float
learning rate for adam optimizer
total_timesteps: int
number of env steps to optimizer for
buffer_size: int
size of the replay buffer
exploration_fraction: float
fraction of entire training period over which the exploration rate is annealed
exploration_final_eps: float
final value of random action probability
train_freq: int
update the model every `train_freq` steps.
set to None to disable printing
batch_size: int
size of a batched sampled from replay buffer for training
print_freq: int
how often to print out training progress
set to None to disable printing
checkpoint_freq: int
how often to save the model. This is so that the best version is restored
at the end of the training. If you do not wish to restore the best version at
the end of the training set this variable to None.
learning_starts: int
how many steps of the model to collect transitions for before learning starts
gamma: float
discount factor
target_network_update_freq: int
update the target network every `target_network_update_freq` steps.
prioritized_replay: True
if True prioritized replay buffer will be used.
prioritized_replay_alpha: float
alpha parameter for prioritized replay buffer
prioritized_replay_beta0: float
initial value of beta for prioritized replay buffer
prioritized_replay_beta_iters: int
number of iterations over which beta will be annealed from initial value
to 1.0. If set to None equals to total_timesteps.
prioritized_replay_eps: float
epsilon to add to the TD errors when updating priorities.
param_noise: bool
whether or not to use parameter space noise (https://arxiv.org/abs/1706.01905)
callback: (locals, globals) -> None
function called at every steps with state of the algorithm.
If callback returns true training stops.
load_path: str
path to load the model from. (default: None)
**network_kwargs
additional keyword arguments to pass to the network builder.
Returns
-------
act: ActWrapper
Wrapper over act function. Adds ability to save it and load it.
See header of baselines/deepq/categorical.py for details on the act function.
"""
# Create all the functions necessary to train the model
sess = get_session()
set_global_seeds(seed)
q_func = build_q_func(network, **network_kwargs)
# capture the shape outside the closure so that the env object is not serialized
# by cloudpickle when serializing make_obs_ph
observation_space = env.observation_space
def make_obs_ph(name):
return ObservationInput(observation_space, name=name)
act, train, update_target, debug = deepq.build_train(
make_obs_ph=make_obs_ph,
q_func=q_func,
num_actions=env.action_space.n,
optimizer=tf.train.AdamOptimizer(learning_rate=lr),
gamma=gamma,
grad_norm_clipping=10,
param_noise=param_noise
)
act_params = {
'make_obs_ph': make_obs_ph,
'q_func': q_func,
'num_actions': env.action_space.n,
}
act = ActWrapper(act, act_params)
# Create the replay buffer
if prioritized_replay:
replay_buffer = PrioritizedReplayBuffer(buffer_size, alpha=prioritized_replay_alpha)
if prioritized_replay_beta_iters is None:
prioritized_replay_beta_iters = total_timesteps
beta_schedule = LinearSchedule(prioritized_replay_beta_iters,
initial_p=prioritized_replay_beta0,
final_p=1.0)
else:
replay_buffer = ReplayBuffer(buffer_size)
beta_schedule = None
# Create the schedule for exploration starting from 1.
exploration = LinearSchedule(schedule_timesteps=int(exploration_fraction * total_timesteps),
initial_p=1.0,
final_p=exploration_final_eps)
# Initialize the parameters and copy them to the target network.
U.initialize()
update_target()
episode_rewards = [0.0]
saved_mean_reward = None
obs = env.reset()
reset = True
with tempfile.TemporaryDirectory() as td:
td = checkpoint_path or td
model_file = os.path.join(td, "model")
model_saved = False
if tf.train.latest_checkpoint(td) is not None:
load_variables(model_file)
logger.log('Loaded model from {}'.format(model_file))
model_saved = True
elif load_path is not None:
load_variables(load_path)
logger.log('Loaded model from {}'.format(load_path))
for t in range(total_timesteps):
if callback is not None:
if callback(locals(), globals()):
break
# Take action and update exploration to the newest value
kwargs = {}
if not param_noise:
update_eps = exploration.value(t)
update_param_noise_threshold = 0.
else:
update_eps = 0.
# Compute the threshold such that the KL divergence between perturbed and non-perturbed
# policy is comparable to eps-greedy exploration with eps = exploration.value(t).
# See Appendix C.1 in Parameter Space Noise for Exploration, Plappert et al., 2017
# for detailed explanation.
update_param_noise_threshold = -np.log(1. - exploration.value(t) + exploration.value(t) / float(env.action_space.n))
kwargs['reset'] = reset
kwargs['update_param_noise_threshold'] = update_param_noise_threshold
kwargs['update_param_noise_scale'] = True
action = act(np.array(obs)[None], update_eps=update_eps, **kwargs)[0]
env_action = action
reset = False
new_obs, rew, done, _ = env.step(env_action)
# Store transition in the replay buffer.
replay_buffer.add(obs, action, rew, new_obs, float(done))
obs = new_obs
episode_rewards[-1] += rew
if done:
obs = env.reset()
episode_rewards.append(0.0)
reset = True
if t > learning_starts and t % train_freq == 0:
# Minimize the error in Bellman's equation on a batch sampled from replay buffer.
if prioritized_replay:
experience = replay_buffer.sample(batch_size, beta=beta_schedule.value(t))
(obses_t, actions, rewards, obses_tp1, dones, weights, batch_idxes) = experience
else:
obses_t, actions, rewards, obses_tp1, dones = replay_buffer.sample(batch_size)
weights, batch_idxes = np.ones_like(rewards), None
td_errors = train(obses_t, actions, rewards, obses_tp1, dones, weights)
if prioritized_replay:
new_priorities = np.abs(td_errors) + prioritized_replay_eps
replay_buffer.update_priorities(batch_idxes, new_priorities)
if t > learning_starts and t % target_network_update_freq == 0:
# Update target network periodically.
update_target()
mean_100ep_reward = round(np.mean(episode_rewards[-101:-1]), 1)
num_episodes = len(episode_rewards)
if done and print_freq is not None and len(episode_rewards) % print_freq == 0:
logger.record_tabular("steps", t)
logger.record_tabular("episodes", num_episodes)
logger.record_tabular("mean 100 episode reward", mean_100ep_reward)
logger.record_tabular("% time spent exploring", int(100 * exploration.value(t)))
logger.dump_tabular()
if (checkpoint_freq is not None and t > learning_starts and
num_episodes > 100 and t % checkpoint_freq == 0):
if saved_mean_reward is None or mean_100ep_reward > saved_mean_reward:
if print_freq is not None:
logger.log("Saving model due to mean reward increase: {} -> {}".format(
saved_mean_reward, mean_100ep_reward))
save_variables(model_file)
model_saved = True
saved_mean_reward = mean_100ep_reward
if model_saved:
if print_freq is not None:
logger.log("Restored model with mean reward: {}".format(saved_mean_reward))
load_variables(model_file)
return act | [
"def",
"learn",
"(",
"env",
",",
"network",
",",
"seed",
"=",
"None",
",",
"lr",
"=",
"5e-4",
",",
"total_timesteps",
"=",
"100000",
",",
"buffer_size",
"=",
"50000",
",",
"exploration_fraction",
"=",
"0.1",
",",
"exploration_final_eps",
"=",
"0.02",
",",
"train_freq",
"=",
"1",
",",
"batch_size",
"=",
"32",
",",
"print_freq",
"=",
"100",
",",
"checkpoint_freq",
"=",
"10000",
",",
"checkpoint_path",
"=",
"None",
",",
"learning_starts",
"=",
"1000",
",",
"gamma",
"=",
"1.0",
",",
"target_network_update_freq",
"=",
"500",
",",
"prioritized_replay",
"=",
"False",
",",
"prioritized_replay_alpha",
"=",
"0.6",
",",
"prioritized_replay_beta0",
"=",
"0.4",
",",
"prioritized_replay_beta_iters",
"=",
"None",
",",
"prioritized_replay_eps",
"=",
"1e-6",
",",
"param_noise",
"=",
"False",
",",
"callback",
"=",
"None",
",",
"load_path",
"=",
"None",
",",
"*",
"*",
"network_kwargs",
")",
":",
"# Create all the functions necessary to train the model",
"sess",
"=",
"get_session",
"(",
")",
"set_global_seeds",
"(",
"seed",
")",
"q_func",
"=",
"build_q_func",
"(",
"network",
",",
"*",
"*",
"network_kwargs",
")",
"# capture the shape outside the closure so that the env object is not serialized",
"# by cloudpickle when serializing make_obs_ph",
"observation_space",
"=",
"env",
".",
"observation_space",
"def",
"make_obs_ph",
"(",
"name",
")",
":",
"return",
"ObservationInput",
"(",
"observation_space",
",",
"name",
"=",
"name",
")",
"act",
",",
"train",
",",
"update_target",
",",
"debug",
"=",
"deepq",
".",
"build_train",
"(",
"make_obs_ph",
"=",
"make_obs_ph",
",",
"q_func",
"=",
"q_func",
",",
"num_actions",
"=",
"env",
".",
"action_space",
".",
"n",
",",
"optimizer",
"=",
"tf",
".",
"train",
".",
"AdamOptimizer",
"(",
"learning_rate",
"=",
"lr",
")",
",",
"gamma",
"=",
"gamma",
",",
"grad_norm_clipping",
"=",
"10",
",",
"param_noise",
"=",
"param_noise",
")",
"act_params",
"=",
"{",
"'make_obs_ph'",
":",
"make_obs_ph",
",",
"'q_func'",
":",
"q_func",
",",
"'num_actions'",
":",
"env",
".",
"action_space",
".",
"n",
",",
"}",
"act",
"=",
"ActWrapper",
"(",
"act",
",",
"act_params",
")",
"# Create the replay buffer",
"if",
"prioritized_replay",
":",
"replay_buffer",
"=",
"PrioritizedReplayBuffer",
"(",
"buffer_size",
",",
"alpha",
"=",
"prioritized_replay_alpha",
")",
"if",
"prioritized_replay_beta_iters",
"is",
"None",
":",
"prioritized_replay_beta_iters",
"=",
"total_timesteps",
"beta_schedule",
"=",
"LinearSchedule",
"(",
"prioritized_replay_beta_iters",
",",
"initial_p",
"=",
"prioritized_replay_beta0",
",",
"final_p",
"=",
"1.0",
")",
"else",
":",
"replay_buffer",
"=",
"ReplayBuffer",
"(",
"buffer_size",
")",
"beta_schedule",
"=",
"None",
"# Create the schedule for exploration starting from 1.",
"exploration",
"=",
"LinearSchedule",
"(",
"schedule_timesteps",
"=",
"int",
"(",
"exploration_fraction",
"*",
"total_timesteps",
")",
",",
"initial_p",
"=",
"1.0",
",",
"final_p",
"=",
"exploration_final_eps",
")",
"# Initialize the parameters and copy them to the target network.",
"U",
".",
"initialize",
"(",
")",
"update_target",
"(",
")",
"episode_rewards",
"=",
"[",
"0.0",
"]",
"saved_mean_reward",
"=",
"None",
"obs",
"=",
"env",
".",
"reset",
"(",
")",
"reset",
"=",
"True",
"with",
"tempfile",
".",
"TemporaryDirectory",
"(",
")",
"as",
"td",
":",
"td",
"=",
"checkpoint_path",
"or",
"td",
"model_file",
"=",
"os",
".",
"path",
".",
"join",
"(",
"td",
",",
"\"model\"",
")",
"model_saved",
"=",
"False",
"if",
"tf",
".",
"train",
".",
"latest_checkpoint",
"(",
"td",
")",
"is",
"not",
"None",
":",
"load_variables",
"(",
"model_file",
")",
"logger",
".",
"log",
"(",
"'Loaded model from {}'",
".",
"format",
"(",
"model_file",
")",
")",
"model_saved",
"=",
"True",
"elif",
"load_path",
"is",
"not",
"None",
":",
"load_variables",
"(",
"load_path",
")",
"logger",
".",
"log",
"(",
"'Loaded model from {}'",
".",
"format",
"(",
"load_path",
")",
")",
"for",
"t",
"in",
"range",
"(",
"total_timesteps",
")",
":",
"if",
"callback",
"is",
"not",
"None",
":",
"if",
"callback",
"(",
"locals",
"(",
")",
",",
"globals",
"(",
")",
")",
":",
"break",
"# Take action and update exploration to the newest value",
"kwargs",
"=",
"{",
"}",
"if",
"not",
"param_noise",
":",
"update_eps",
"=",
"exploration",
".",
"value",
"(",
"t",
")",
"update_param_noise_threshold",
"=",
"0.",
"else",
":",
"update_eps",
"=",
"0.",
"# Compute the threshold such that the KL divergence between perturbed and non-perturbed",
"# policy is comparable to eps-greedy exploration with eps = exploration.value(t).",
"# See Appendix C.1 in Parameter Space Noise for Exploration, Plappert et al., 2017",
"# for detailed explanation.",
"update_param_noise_threshold",
"=",
"-",
"np",
".",
"log",
"(",
"1.",
"-",
"exploration",
".",
"value",
"(",
"t",
")",
"+",
"exploration",
".",
"value",
"(",
"t",
")",
"/",
"float",
"(",
"env",
".",
"action_space",
".",
"n",
")",
")",
"kwargs",
"[",
"'reset'",
"]",
"=",
"reset",
"kwargs",
"[",
"'update_param_noise_threshold'",
"]",
"=",
"update_param_noise_threshold",
"kwargs",
"[",
"'update_param_noise_scale'",
"]",
"=",
"True",
"action",
"=",
"act",
"(",
"np",
".",
"array",
"(",
"obs",
")",
"[",
"None",
"]",
",",
"update_eps",
"=",
"update_eps",
",",
"*",
"*",
"kwargs",
")",
"[",
"0",
"]",
"env_action",
"=",
"action",
"reset",
"=",
"False",
"new_obs",
",",
"rew",
",",
"done",
",",
"_",
"=",
"env",
".",
"step",
"(",
"env_action",
")",
"# Store transition in the replay buffer.",
"replay_buffer",
".",
"add",
"(",
"obs",
",",
"action",
",",
"rew",
",",
"new_obs",
",",
"float",
"(",
"done",
")",
")",
"obs",
"=",
"new_obs",
"episode_rewards",
"[",
"-",
"1",
"]",
"+=",
"rew",
"if",
"done",
":",
"obs",
"=",
"env",
".",
"reset",
"(",
")",
"episode_rewards",
".",
"append",
"(",
"0.0",
")",
"reset",
"=",
"True",
"if",
"t",
">",
"learning_starts",
"and",
"t",
"%",
"train_freq",
"==",
"0",
":",
"# Minimize the error in Bellman's equation on a batch sampled from replay buffer.",
"if",
"prioritized_replay",
":",
"experience",
"=",
"replay_buffer",
".",
"sample",
"(",
"batch_size",
",",
"beta",
"=",
"beta_schedule",
".",
"value",
"(",
"t",
")",
")",
"(",
"obses_t",
",",
"actions",
",",
"rewards",
",",
"obses_tp1",
",",
"dones",
",",
"weights",
",",
"batch_idxes",
")",
"=",
"experience",
"else",
":",
"obses_t",
",",
"actions",
",",
"rewards",
",",
"obses_tp1",
",",
"dones",
"=",
"replay_buffer",
".",
"sample",
"(",
"batch_size",
")",
"weights",
",",
"batch_idxes",
"=",
"np",
".",
"ones_like",
"(",
"rewards",
")",
",",
"None",
"td_errors",
"=",
"train",
"(",
"obses_t",
",",
"actions",
",",
"rewards",
",",
"obses_tp1",
",",
"dones",
",",
"weights",
")",
"if",
"prioritized_replay",
":",
"new_priorities",
"=",
"np",
".",
"abs",
"(",
"td_errors",
")",
"+",
"prioritized_replay_eps",
"replay_buffer",
".",
"update_priorities",
"(",
"batch_idxes",
",",
"new_priorities",
")",
"if",
"t",
">",
"learning_starts",
"and",
"t",
"%",
"target_network_update_freq",
"==",
"0",
":",
"# Update target network periodically.",
"update_target",
"(",
")",
"mean_100ep_reward",
"=",
"round",
"(",
"np",
".",
"mean",
"(",
"episode_rewards",
"[",
"-",
"101",
":",
"-",
"1",
"]",
")",
",",
"1",
")",
"num_episodes",
"=",
"len",
"(",
"episode_rewards",
")",
"if",
"done",
"and",
"print_freq",
"is",
"not",
"None",
"and",
"len",
"(",
"episode_rewards",
")",
"%",
"print_freq",
"==",
"0",
":",
"logger",
".",
"record_tabular",
"(",
"\"steps\"",
",",
"t",
")",
"logger",
".",
"record_tabular",
"(",
"\"episodes\"",
",",
"num_episodes",
")",
"logger",
".",
"record_tabular",
"(",
"\"mean 100 episode reward\"",
",",
"mean_100ep_reward",
")",
"logger",
".",
"record_tabular",
"(",
"\"% time spent exploring\"",
",",
"int",
"(",
"100",
"*",
"exploration",
".",
"value",
"(",
"t",
")",
")",
")",
"logger",
".",
"dump_tabular",
"(",
")",
"if",
"(",
"checkpoint_freq",
"is",
"not",
"None",
"and",
"t",
">",
"learning_starts",
"and",
"num_episodes",
">",
"100",
"and",
"t",
"%",
"checkpoint_freq",
"==",
"0",
")",
":",
"if",
"saved_mean_reward",
"is",
"None",
"or",
"mean_100ep_reward",
">",
"saved_mean_reward",
":",
"if",
"print_freq",
"is",
"not",
"None",
":",
"logger",
".",
"log",
"(",
"\"Saving model due to mean reward increase: {} -> {}\"",
".",
"format",
"(",
"saved_mean_reward",
",",
"mean_100ep_reward",
")",
")",
"save_variables",
"(",
"model_file",
")",
"model_saved",
"=",
"True",
"saved_mean_reward",
"=",
"mean_100ep_reward",
"if",
"model_saved",
":",
"if",
"print_freq",
"is",
"not",
"None",
":",
"logger",
".",
"log",
"(",
"\"Restored model with mean reward: {}\"",
".",
"format",
"(",
"saved_mean_reward",
")",
")",
"load_variables",
"(",
"model_file",
")",
"return",
"act"
] | Train a deepq model.
Parameters
-------
env: gym.Env
environment to train on
network: string or a function
neural network to use as a q function approximator. If string, has to be one of the names of registered models in baselines.common.models
(mlp, cnn, conv_only). If a function, should take an observation tensor and return a latent variable tensor, which
will be mapped to the Q function heads (see build_q_func in baselines.deepq.models for details on that)
seed: int or None
prng seed. The runs with the same seed "should" give the same results. If None, no seeding is used.
lr: float
learning rate for adam optimizer
total_timesteps: int
number of env steps to optimizer for
buffer_size: int
size of the replay buffer
exploration_fraction: float
fraction of entire training period over which the exploration rate is annealed
exploration_final_eps: float
final value of random action probability
train_freq: int
update the model every `train_freq` steps.
set to None to disable printing
batch_size: int
size of a batched sampled from replay buffer for training
print_freq: int
how often to print out training progress
set to None to disable printing
checkpoint_freq: int
how often to save the model. This is so that the best version is restored
at the end of the training. If you do not wish to restore the best version at
the end of the training set this variable to None.
learning_starts: int
how many steps of the model to collect transitions for before learning starts
gamma: float
discount factor
target_network_update_freq: int
update the target network every `target_network_update_freq` steps.
prioritized_replay: True
if True prioritized replay buffer will be used.
prioritized_replay_alpha: float
alpha parameter for prioritized replay buffer
prioritized_replay_beta0: float
initial value of beta for prioritized replay buffer
prioritized_replay_beta_iters: int
number of iterations over which beta will be annealed from initial value
to 1.0. If set to None equals to total_timesteps.
prioritized_replay_eps: float
epsilon to add to the TD errors when updating priorities.
param_noise: bool
whether or not to use parameter space noise (https://arxiv.org/abs/1706.01905)
callback: (locals, globals) -> None
function called at every steps with state of the algorithm.
If callback returns true training stops.
load_path: str
path to load the model from. (default: None)
**network_kwargs
additional keyword arguments to pass to the network builder.
Returns
-------
act: ActWrapper
Wrapper over act function. Adds ability to save it and load it.
See header of baselines/deepq/categorical.py for details on the act function. | [
"Train",
"a",
"deepq",
"model",
"."
] | 3301089b48c42b87b396e246ea3f56fa4bfc9678 | https://github.com/openai/baselines/blob/3301089b48c42b87b396e246ea3f56fa4bfc9678/baselines/deepq/deepq.py#L95-L333 | valid |
openai/baselines | baselines/deepq/deepq.py | ActWrapper.save_act | def save_act(self, path=None):
"""Save model to a pickle located at `path`"""
if path is None:
path = os.path.join(logger.get_dir(), "model.pkl")
with tempfile.TemporaryDirectory() as td:
save_variables(os.path.join(td, "model"))
arc_name = os.path.join(td, "packed.zip")
with zipfile.ZipFile(arc_name, 'w') as zipf:
for root, dirs, files in os.walk(td):
for fname in files:
file_path = os.path.join(root, fname)
if file_path != arc_name:
zipf.write(file_path, os.path.relpath(file_path, td))
with open(arc_name, "rb") as f:
model_data = f.read()
with open(path, "wb") as f:
cloudpickle.dump((model_data, self._act_params), f) | python | def save_act(self, path=None):
"""Save model to a pickle located at `path`"""
if path is None:
path = os.path.join(logger.get_dir(), "model.pkl")
with tempfile.TemporaryDirectory() as td:
save_variables(os.path.join(td, "model"))
arc_name = os.path.join(td, "packed.zip")
with zipfile.ZipFile(arc_name, 'w') as zipf:
for root, dirs, files in os.walk(td):
for fname in files:
file_path = os.path.join(root, fname)
if file_path != arc_name:
zipf.write(file_path, os.path.relpath(file_path, td))
with open(arc_name, "rb") as f:
model_data = f.read()
with open(path, "wb") as f:
cloudpickle.dump((model_data, self._act_params), f) | [
"def",
"save_act",
"(",
"self",
",",
"path",
"=",
"None",
")",
":",
"if",
"path",
"is",
"None",
":",
"path",
"=",
"os",
".",
"path",
".",
"join",
"(",
"logger",
".",
"get_dir",
"(",
")",
",",
"\"model.pkl\"",
")",
"with",
"tempfile",
".",
"TemporaryDirectory",
"(",
")",
"as",
"td",
":",
"save_variables",
"(",
"os",
".",
"path",
".",
"join",
"(",
"td",
",",
"\"model\"",
")",
")",
"arc_name",
"=",
"os",
".",
"path",
".",
"join",
"(",
"td",
",",
"\"packed.zip\"",
")",
"with",
"zipfile",
".",
"ZipFile",
"(",
"arc_name",
",",
"'w'",
")",
"as",
"zipf",
":",
"for",
"root",
",",
"dirs",
",",
"files",
"in",
"os",
".",
"walk",
"(",
"td",
")",
":",
"for",
"fname",
"in",
"files",
":",
"file_path",
"=",
"os",
".",
"path",
".",
"join",
"(",
"root",
",",
"fname",
")",
"if",
"file_path",
"!=",
"arc_name",
":",
"zipf",
".",
"write",
"(",
"file_path",
",",
"os",
".",
"path",
".",
"relpath",
"(",
"file_path",
",",
"td",
")",
")",
"with",
"open",
"(",
"arc_name",
",",
"\"rb\"",
")",
"as",
"f",
":",
"model_data",
"=",
"f",
".",
"read",
"(",
")",
"with",
"open",
"(",
"path",
",",
"\"wb\"",
")",
"as",
"f",
":",
"cloudpickle",
".",
"dump",
"(",
"(",
"model_data",
",",
"self",
".",
"_act_params",
")",
",",
"f",
")"
] | Save model to a pickle located at `path` | [
"Save",
"model",
"to",
"a",
"pickle",
"located",
"at",
"path"
] | 3301089b48c42b87b396e246ea3f56fa4bfc9678 | https://github.com/openai/baselines/blob/3301089b48c42b87b396e246ea3f56fa4bfc9678/baselines/deepq/deepq.py#L55-L72 | valid |
openai/baselines | baselines/common/models.py | nature_cnn | def nature_cnn(unscaled_images, **conv_kwargs):
"""
CNN from Nature paper.
"""
scaled_images = tf.cast(unscaled_images, tf.float32) / 255.
activ = tf.nn.relu
h = activ(conv(scaled_images, 'c1', nf=32, rf=8, stride=4, init_scale=np.sqrt(2),
**conv_kwargs))
h2 = activ(conv(h, 'c2', nf=64, rf=4, stride=2, init_scale=np.sqrt(2), **conv_kwargs))
h3 = activ(conv(h2, 'c3', nf=64, rf=3, stride=1, init_scale=np.sqrt(2), **conv_kwargs))
h3 = conv_to_fc(h3)
return activ(fc(h3, 'fc1', nh=512, init_scale=np.sqrt(2))) | python | def nature_cnn(unscaled_images, **conv_kwargs):
"""
CNN from Nature paper.
"""
scaled_images = tf.cast(unscaled_images, tf.float32) / 255.
activ = tf.nn.relu
h = activ(conv(scaled_images, 'c1', nf=32, rf=8, stride=4, init_scale=np.sqrt(2),
**conv_kwargs))
h2 = activ(conv(h, 'c2', nf=64, rf=4, stride=2, init_scale=np.sqrt(2), **conv_kwargs))
h3 = activ(conv(h2, 'c3', nf=64, rf=3, stride=1, init_scale=np.sqrt(2), **conv_kwargs))
h3 = conv_to_fc(h3)
return activ(fc(h3, 'fc1', nh=512, init_scale=np.sqrt(2))) | [
"def",
"nature_cnn",
"(",
"unscaled_images",
",",
"*",
"*",
"conv_kwargs",
")",
":",
"scaled_images",
"=",
"tf",
".",
"cast",
"(",
"unscaled_images",
",",
"tf",
".",
"float32",
")",
"/",
"255.",
"activ",
"=",
"tf",
".",
"nn",
".",
"relu",
"h",
"=",
"activ",
"(",
"conv",
"(",
"scaled_images",
",",
"'c1'",
",",
"nf",
"=",
"32",
",",
"rf",
"=",
"8",
",",
"stride",
"=",
"4",
",",
"init_scale",
"=",
"np",
".",
"sqrt",
"(",
"2",
")",
",",
"*",
"*",
"conv_kwargs",
")",
")",
"h2",
"=",
"activ",
"(",
"conv",
"(",
"h",
",",
"'c2'",
",",
"nf",
"=",
"64",
",",
"rf",
"=",
"4",
",",
"stride",
"=",
"2",
",",
"init_scale",
"=",
"np",
".",
"sqrt",
"(",
"2",
")",
",",
"*",
"*",
"conv_kwargs",
")",
")",
"h3",
"=",
"activ",
"(",
"conv",
"(",
"h2",
",",
"'c3'",
",",
"nf",
"=",
"64",
",",
"rf",
"=",
"3",
",",
"stride",
"=",
"1",
",",
"init_scale",
"=",
"np",
".",
"sqrt",
"(",
"2",
")",
",",
"*",
"*",
"conv_kwargs",
")",
")",
"h3",
"=",
"conv_to_fc",
"(",
"h3",
")",
"return",
"activ",
"(",
"fc",
"(",
"h3",
",",
"'fc1'",
",",
"nh",
"=",
"512",
",",
"init_scale",
"=",
"np",
".",
"sqrt",
"(",
"2",
")",
")",
")"
] | CNN from Nature paper. | [
"CNN",
"from",
"Nature",
"paper",
"."
] | 3301089b48c42b87b396e246ea3f56fa4bfc9678 | https://github.com/openai/baselines/blob/3301089b48c42b87b396e246ea3f56fa4bfc9678/baselines/common/models.py#L16-L27 | valid |
openai/baselines | baselines/common/models.py | mlp | def mlp(num_layers=2, num_hidden=64, activation=tf.tanh, layer_norm=False):
"""
Stack of fully-connected layers to be used in a policy / q-function approximator
Parameters:
----------
num_layers: int number of fully-connected layers (default: 2)
num_hidden: int size of fully-connected layers (default: 64)
activation: activation function (default: tf.tanh)
Returns:
-------
function that builds fully connected network with a given input tensor / placeholder
"""
def network_fn(X):
h = tf.layers.flatten(X)
for i in range(num_layers):
h = fc(h, 'mlp_fc{}'.format(i), nh=num_hidden, init_scale=np.sqrt(2))
if layer_norm:
h = tf.contrib.layers.layer_norm(h, center=True, scale=True)
h = activation(h)
return h
return network_fn | python | def mlp(num_layers=2, num_hidden=64, activation=tf.tanh, layer_norm=False):
"""
Stack of fully-connected layers to be used in a policy / q-function approximator
Parameters:
----------
num_layers: int number of fully-connected layers (default: 2)
num_hidden: int size of fully-connected layers (default: 64)
activation: activation function (default: tf.tanh)
Returns:
-------
function that builds fully connected network with a given input tensor / placeholder
"""
def network_fn(X):
h = tf.layers.flatten(X)
for i in range(num_layers):
h = fc(h, 'mlp_fc{}'.format(i), nh=num_hidden, init_scale=np.sqrt(2))
if layer_norm:
h = tf.contrib.layers.layer_norm(h, center=True, scale=True)
h = activation(h)
return h
return network_fn | [
"def",
"mlp",
"(",
"num_layers",
"=",
"2",
",",
"num_hidden",
"=",
"64",
",",
"activation",
"=",
"tf",
".",
"tanh",
",",
"layer_norm",
"=",
"False",
")",
":",
"def",
"network_fn",
"(",
"X",
")",
":",
"h",
"=",
"tf",
".",
"layers",
".",
"flatten",
"(",
"X",
")",
"for",
"i",
"in",
"range",
"(",
"num_layers",
")",
":",
"h",
"=",
"fc",
"(",
"h",
",",
"'mlp_fc{}'",
".",
"format",
"(",
"i",
")",
",",
"nh",
"=",
"num_hidden",
",",
"init_scale",
"=",
"np",
".",
"sqrt",
"(",
"2",
")",
")",
"if",
"layer_norm",
":",
"h",
"=",
"tf",
".",
"contrib",
".",
"layers",
".",
"layer_norm",
"(",
"h",
",",
"center",
"=",
"True",
",",
"scale",
"=",
"True",
")",
"h",
"=",
"activation",
"(",
"h",
")",
"return",
"h",
"return",
"network_fn"
] | Stack of fully-connected layers to be used in a policy / q-function approximator
Parameters:
----------
num_layers: int number of fully-connected layers (default: 2)
num_hidden: int size of fully-connected layers (default: 64)
activation: activation function (default: tf.tanh)
Returns:
-------
function that builds fully connected network with a given input tensor / placeholder | [
"Stack",
"of",
"fully",
"-",
"connected",
"layers",
"to",
"be",
"used",
"in",
"a",
"policy",
"/",
"q",
"-",
"function",
"approximator"
] | 3301089b48c42b87b396e246ea3f56fa4bfc9678 | https://github.com/openai/baselines/blob/3301089b48c42b87b396e246ea3f56fa4bfc9678/baselines/common/models.py#L31-L59 | valid |
openai/baselines | baselines/common/models.py | lstm | def lstm(nlstm=128, layer_norm=False):
"""
Builds LSTM (Long-Short Term Memory) network to be used in a policy.
Note that the resulting function returns not only the output of the LSTM
(i.e. hidden state of lstm for each step in the sequence), but also a dictionary
with auxiliary tensors to be set as policy attributes.
Specifically,
S is a placeholder to feed current state (LSTM state has to be managed outside policy)
M is a placeholder for the mask (used to mask out observations after the end of the episode, but can be used for other purposes too)
initial_state is a numpy array containing initial lstm state (usually zeros)
state is the output LSTM state (to be fed into S at the next call)
An example of usage of lstm-based policy can be found here: common/tests/test_doc_examples.py/test_lstm_example
Parameters:
----------
nlstm: int LSTM hidden state size
layer_norm: bool if True, layer-normalized version of LSTM is used
Returns:
-------
function that builds LSTM with a given input tensor / placeholder
"""
def network_fn(X, nenv=1):
nbatch = X.shape[0]
nsteps = nbatch // nenv
h = tf.layers.flatten(X)
M = tf.placeholder(tf.float32, [nbatch]) #mask (done t-1)
S = tf.placeholder(tf.float32, [nenv, 2*nlstm]) #states
xs = batch_to_seq(h, nenv, nsteps)
ms = batch_to_seq(M, nenv, nsteps)
if layer_norm:
h5, snew = utils.lnlstm(xs, ms, S, scope='lnlstm', nh=nlstm)
else:
h5, snew = utils.lstm(xs, ms, S, scope='lstm', nh=nlstm)
h = seq_to_batch(h5)
initial_state = np.zeros(S.shape.as_list(), dtype=float)
return h, {'S':S, 'M':M, 'state':snew, 'initial_state':initial_state}
return network_fn | python | def lstm(nlstm=128, layer_norm=False):
"""
Builds LSTM (Long-Short Term Memory) network to be used in a policy.
Note that the resulting function returns not only the output of the LSTM
(i.e. hidden state of lstm for each step in the sequence), but also a dictionary
with auxiliary tensors to be set as policy attributes.
Specifically,
S is a placeholder to feed current state (LSTM state has to be managed outside policy)
M is a placeholder for the mask (used to mask out observations after the end of the episode, but can be used for other purposes too)
initial_state is a numpy array containing initial lstm state (usually zeros)
state is the output LSTM state (to be fed into S at the next call)
An example of usage of lstm-based policy can be found here: common/tests/test_doc_examples.py/test_lstm_example
Parameters:
----------
nlstm: int LSTM hidden state size
layer_norm: bool if True, layer-normalized version of LSTM is used
Returns:
-------
function that builds LSTM with a given input tensor / placeholder
"""
def network_fn(X, nenv=1):
nbatch = X.shape[0]
nsteps = nbatch // nenv
h = tf.layers.flatten(X)
M = tf.placeholder(tf.float32, [nbatch]) #mask (done t-1)
S = tf.placeholder(tf.float32, [nenv, 2*nlstm]) #states
xs = batch_to_seq(h, nenv, nsteps)
ms = batch_to_seq(M, nenv, nsteps)
if layer_norm:
h5, snew = utils.lnlstm(xs, ms, S, scope='lnlstm', nh=nlstm)
else:
h5, snew = utils.lstm(xs, ms, S, scope='lstm', nh=nlstm)
h = seq_to_batch(h5)
initial_state = np.zeros(S.shape.as_list(), dtype=float)
return h, {'S':S, 'M':M, 'state':snew, 'initial_state':initial_state}
return network_fn | [
"def",
"lstm",
"(",
"nlstm",
"=",
"128",
",",
"layer_norm",
"=",
"False",
")",
":",
"def",
"network_fn",
"(",
"X",
",",
"nenv",
"=",
"1",
")",
":",
"nbatch",
"=",
"X",
".",
"shape",
"[",
"0",
"]",
"nsteps",
"=",
"nbatch",
"//",
"nenv",
"h",
"=",
"tf",
".",
"layers",
".",
"flatten",
"(",
"X",
")",
"M",
"=",
"tf",
".",
"placeholder",
"(",
"tf",
".",
"float32",
",",
"[",
"nbatch",
"]",
")",
"#mask (done t-1)",
"S",
"=",
"tf",
".",
"placeholder",
"(",
"tf",
".",
"float32",
",",
"[",
"nenv",
",",
"2",
"*",
"nlstm",
"]",
")",
"#states",
"xs",
"=",
"batch_to_seq",
"(",
"h",
",",
"nenv",
",",
"nsteps",
")",
"ms",
"=",
"batch_to_seq",
"(",
"M",
",",
"nenv",
",",
"nsteps",
")",
"if",
"layer_norm",
":",
"h5",
",",
"snew",
"=",
"utils",
".",
"lnlstm",
"(",
"xs",
",",
"ms",
",",
"S",
",",
"scope",
"=",
"'lnlstm'",
",",
"nh",
"=",
"nlstm",
")",
"else",
":",
"h5",
",",
"snew",
"=",
"utils",
".",
"lstm",
"(",
"xs",
",",
"ms",
",",
"S",
",",
"scope",
"=",
"'lstm'",
",",
"nh",
"=",
"nlstm",
")",
"h",
"=",
"seq_to_batch",
"(",
"h5",
")",
"initial_state",
"=",
"np",
".",
"zeros",
"(",
"S",
".",
"shape",
".",
"as_list",
"(",
")",
",",
"dtype",
"=",
"float",
")",
"return",
"h",
",",
"{",
"'S'",
":",
"S",
",",
"'M'",
":",
"M",
",",
"'state'",
":",
"snew",
",",
"'initial_state'",
":",
"initial_state",
"}",
"return",
"network_fn"
] | Builds LSTM (Long-Short Term Memory) network to be used in a policy.
Note that the resulting function returns not only the output of the LSTM
(i.e. hidden state of lstm for each step in the sequence), but also a dictionary
with auxiliary tensors to be set as policy attributes.
Specifically,
S is a placeholder to feed current state (LSTM state has to be managed outside policy)
M is a placeholder for the mask (used to mask out observations after the end of the episode, but can be used for other purposes too)
initial_state is a numpy array containing initial lstm state (usually zeros)
state is the output LSTM state (to be fed into S at the next call)
An example of usage of lstm-based policy can be found here: common/tests/test_doc_examples.py/test_lstm_example
Parameters:
----------
nlstm: int LSTM hidden state size
layer_norm: bool if True, layer-normalized version of LSTM is used
Returns:
-------
function that builds LSTM with a given input tensor / placeholder | [
"Builds",
"LSTM",
"(",
"Long",
"-",
"Short",
"Term",
"Memory",
")",
"network",
"to",
"be",
"used",
"in",
"a",
"policy",
".",
"Note",
"that",
"the",
"resulting",
"function",
"returns",
"not",
"only",
"the",
"output",
"of",
"the",
"LSTM",
"(",
"i",
".",
"e",
".",
"hidden",
"state",
"of",
"lstm",
"for",
"each",
"step",
"in",
"the",
"sequence",
")",
"but",
"also",
"a",
"dictionary",
"with",
"auxiliary",
"tensors",
"to",
"be",
"set",
"as",
"policy",
"attributes",
"."
] | 3301089b48c42b87b396e246ea3f56fa4bfc9678 | https://github.com/openai/baselines/blob/3301089b48c42b87b396e246ea3f56fa4bfc9678/baselines/common/models.py#L84-L135 | valid |
openai/baselines | baselines/common/models.py | conv_only | def conv_only(convs=[(32, 8, 4), (64, 4, 2), (64, 3, 1)], **conv_kwargs):
'''
convolutions-only net
Parameters:
----------
conv: list of triples (filter_number, filter_size, stride) specifying parameters for each layer.
Returns:
function that takes tensorflow tensor as input and returns the output of the last convolutional layer
'''
def network_fn(X):
out = tf.cast(X, tf.float32) / 255.
with tf.variable_scope("convnet"):
for num_outputs, kernel_size, stride in convs:
out = layers.convolution2d(out,
num_outputs=num_outputs,
kernel_size=kernel_size,
stride=stride,
activation_fn=tf.nn.relu,
**conv_kwargs)
return out
return network_fn | python | def conv_only(convs=[(32, 8, 4), (64, 4, 2), (64, 3, 1)], **conv_kwargs):
'''
convolutions-only net
Parameters:
----------
conv: list of triples (filter_number, filter_size, stride) specifying parameters for each layer.
Returns:
function that takes tensorflow tensor as input and returns the output of the last convolutional layer
'''
def network_fn(X):
out = tf.cast(X, tf.float32) / 255.
with tf.variable_scope("convnet"):
for num_outputs, kernel_size, stride in convs:
out = layers.convolution2d(out,
num_outputs=num_outputs,
kernel_size=kernel_size,
stride=stride,
activation_fn=tf.nn.relu,
**conv_kwargs)
return out
return network_fn | [
"def",
"conv_only",
"(",
"convs",
"=",
"[",
"(",
"32",
",",
"8",
",",
"4",
")",
",",
"(",
"64",
",",
"4",
",",
"2",
")",
",",
"(",
"64",
",",
"3",
",",
"1",
")",
"]",
",",
"*",
"*",
"conv_kwargs",
")",
":",
"def",
"network_fn",
"(",
"X",
")",
":",
"out",
"=",
"tf",
".",
"cast",
"(",
"X",
",",
"tf",
".",
"float32",
")",
"/",
"255.",
"with",
"tf",
".",
"variable_scope",
"(",
"\"convnet\"",
")",
":",
"for",
"num_outputs",
",",
"kernel_size",
",",
"stride",
"in",
"convs",
":",
"out",
"=",
"layers",
".",
"convolution2d",
"(",
"out",
",",
"num_outputs",
"=",
"num_outputs",
",",
"kernel_size",
"=",
"kernel_size",
",",
"stride",
"=",
"stride",
",",
"activation_fn",
"=",
"tf",
".",
"nn",
".",
"relu",
",",
"*",
"*",
"conv_kwargs",
")",
"return",
"out",
"return",
"network_fn"
] | convolutions-only net
Parameters:
----------
conv: list of triples (filter_number, filter_size, stride) specifying parameters for each layer.
Returns:
function that takes tensorflow tensor as input and returns the output of the last convolutional layer | [
"convolutions",
"-",
"only",
"net"
] | 3301089b48c42b87b396e246ea3f56fa4bfc9678 | https://github.com/openai/baselines/blob/3301089b48c42b87b396e246ea3f56fa4bfc9678/baselines/common/models.py#L171-L198 | valid |
openai/baselines | baselines/common/models.py | get_network_builder | def get_network_builder(name):
"""
If you want to register your own network outside models.py, you just need:
Usage Example:
-------------
from baselines.common.models import register
@register("your_network_name")
def your_network_define(**net_kwargs):
...
return network_fn
"""
if callable(name):
return name
elif name in mapping:
return mapping[name]
else:
raise ValueError('Unknown network type: {}'.format(name)) | python | def get_network_builder(name):
"""
If you want to register your own network outside models.py, you just need:
Usage Example:
-------------
from baselines.common.models import register
@register("your_network_name")
def your_network_define(**net_kwargs):
...
return network_fn
"""
if callable(name):
return name
elif name in mapping:
return mapping[name]
else:
raise ValueError('Unknown network type: {}'.format(name)) | [
"def",
"get_network_builder",
"(",
"name",
")",
":",
"if",
"callable",
"(",
"name",
")",
":",
"return",
"name",
"elif",
"name",
"in",
"mapping",
":",
"return",
"mapping",
"[",
"name",
"]",
"else",
":",
"raise",
"ValueError",
"(",
"'Unknown network type: {}'",
".",
"format",
"(",
"name",
")",
")"
] | If you want to register your own network outside models.py, you just need:
Usage Example:
-------------
from baselines.common.models import register
@register("your_network_name")
def your_network_define(**net_kwargs):
...
return network_fn | [
"If",
"you",
"want",
"to",
"register",
"your",
"own",
"network",
"outside",
"models",
".",
"py",
"you",
"just",
"need",
":"
] | 3301089b48c42b87b396e246ea3f56fa4bfc9678 | https://github.com/openai/baselines/blob/3301089b48c42b87b396e246ea3f56fa4bfc9678/baselines/common/models.py#L206-L224 | valid |
openai/baselines | baselines/deepq/models.py | mlp | def mlp(hiddens=[], layer_norm=False):
"""This model takes as input an observation and returns values of all actions.
Parameters
----------
hiddens: [int]
list of sizes of hidden layers
layer_norm: bool
if true applies layer normalization for every layer
as described in https://arxiv.org/abs/1607.06450
Returns
-------
q_func: function
q_function for DQN algorithm.
"""
return lambda *args, **kwargs: _mlp(hiddens, layer_norm=layer_norm, *args, **kwargs) | python | def mlp(hiddens=[], layer_norm=False):
"""This model takes as input an observation and returns values of all actions.
Parameters
----------
hiddens: [int]
list of sizes of hidden layers
layer_norm: bool
if true applies layer normalization for every layer
as described in https://arxiv.org/abs/1607.06450
Returns
-------
q_func: function
q_function for DQN algorithm.
"""
return lambda *args, **kwargs: _mlp(hiddens, layer_norm=layer_norm, *args, **kwargs) | [
"def",
"mlp",
"(",
"hiddens",
"=",
"[",
"]",
",",
"layer_norm",
"=",
"False",
")",
":",
"return",
"lambda",
"*",
"args",
",",
"*",
"*",
"kwargs",
":",
"_mlp",
"(",
"hiddens",
",",
"layer_norm",
"=",
"layer_norm",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")"
] | This model takes as input an observation and returns values of all actions.
Parameters
----------
hiddens: [int]
list of sizes of hidden layers
layer_norm: bool
if true applies layer normalization for every layer
as described in https://arxiv.org/abs/1607.06450
Returns
-------
q_func: function
q_function for DQN algorithm. | [
"This",
"model",
"takes",
"as",
"input",
"an",
"observation",
"and",
"returns",
"values",
"of",
"all",
"actions",
"."
] | 3301089b48c42b87b396e246ea3f56fa4bfc9678 | https://github.com/openai/baselines/blob/3301089b48c42b87b396e246ea3f56fa4bfc9678/baselines/deepq/models.py#L17-L33 | valid |
openai/baselines | baselines/deepq/models.py | cnn_to_mlp | def cnn_to_mlp(convs, hiddens, dueling=False, layer_norm=False):
"""This model takes as input an observation and returns values of all actions.
Parameters
----------
convs: [(int, int, int)]
list of convolutional layers in form of
(num_outputs, kernel_size, stride)
hiddens: [int]
list of sizes of hidden layers
dueling: bool
if true double the output MLP to compute a baseline
for action scores
layer_norm: bool
if true applies layer normalization for every layer
as described in https://arxiv.org/abs/1607.06450
Returns
-------
q_func: function
q_function for DQN algorithm.
"""
return lambda *args, **kwargs: _cnn_to_mlp(convs, hiddens, dueling, layer_norm=layer_norm, *args, **kwargs) | python | def cnn_to_mlp(convs, hiddens, dueling=False, layer_norm=False):
"""This model takes as input an observation and returns values of all actions.
Parameters
----------
convs: [(int, int, int)]
list of convolutional layers in form of
(num_outputs, kernel_size, stride)
hiddens: [int]
list of sizes of hidden layers
dueling: bool
if true double the output MLP to compute a baseline
for action scores
layer_norm: bool
if true applies layer normalization for every layer
as described in https://arxiv.org/abs/1607.06450
Returns
-------
q_func: function
q_function for DQN algorithm.
"""
return lambda *args, **kwargs: _cnn_to_mlp(convs, hiddens, dueling, layer_norm=layer_norm, *args, **kwargs) | [
"def",
"cnn_to_mlp",
"(",
"convs",
",",
"hiddens",
",",
"dueling",
"=",
"False",
",",
"layer_norm",
"=",
"False",
")",
":",
"return",
"lambda",
"*",
"args",
",",
"*",
"*",
"kwargs",
":",
"_cnn_to_mlp",
"(",
"convs",
",",
"hiddens",
",",
"dueling",
",",
"layer_norm",
"=",
"layer_norm",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")"
] | This model takes as input an observation and returns values of all actions.
Parameters
----------
convs: [(int, int, int)]
list of convolutional layers in form of
(num_outputs, kernel_size, stride)
hiddens: [int]
list of sizes of hidden layers
dueling: bool
if true double the output MLP to compute a baseline
for action scores
layer_norm: bool
if true applies layer normalization for every layer
as described in https://arxiv.org/abs/1607.06450
Returns
-------
q_func: function
q_function for DQN algorithm. | [
"This",
"model",
"takes",
"as",
"input",
"an",
"observation",
"and",
"returns",
"values",
"of",
"all",
"actions",
"."
] | 3301089b48c42b87b396e246ea3f56fa4bfc9678 | https://github.com/openai/baselines/blob/3301089b48c42b87b396e246ea3f56fa4bfc9678/baselines/deepq/models.py#L73-L96 | valid |
openai/baselines | baselines/common/cmd_util.py | make_vec_env | def make_vec_env(env_id, env_type, num_env, seed,
wrapper_kwargs=None,
start_index=0,
reward_scale=1.0,
flatten_dict_observations=True,
gamestate=None):
"""
Create a wrapped, monitored SubprocVecEnv for Atari and MuJoCo.
"""
wrapper_kwargs = wrapper_kwargs or {}
mpi_rank = MPI.COMM_WORLD.Get_rank() if MPI else 0
seed = seed + 10000 * mpi_rank if seed is not None else None
logger_dir = logger.get_dir()
def make_thunk(rank):
return lambda: make_env(
env_id=env_id,
env_type=env_type,
mpi_rank=mpi_rank,
subrank=rank,
seed=seed,
reward_scale=reward_scale,
gamestate=gamestate,
flatten_dict_observations=flatten_dict_observations,
wrapper_kwargs=wrapper_kwargs,
logger_dir=logger_dir
)
set_global_seeds(seed)
if num_env > 1:
return SubprocVecEnv([make_thunk(i + start_index) for i in range(num_env)])
else:
return DummyVecEnv([make_thunk(start_index)]) | python | def make_vec_env(env_id, env_type, num_env, seed,
wrapper_kwargs=None,
start_index=0,
reward_scale=1.0,
flatten_dict_observations=True,
gamestate=None):
"""
Create a wrapped, monitored SubprocVecEnv for Atari and MuJoCo.
"""
wrapper_kwargs = wrapper_kwargs or {}
mpi_rank = MPI.COMM_WORLD.Get_rank() if MPI else 0
seed = seed + 10000 * mpi_rank if seed is not None else None
logger_dir = logger.get_dir()
def make_thunk(rank):
return lambda: make_env(
env_id=env_id,
env_type=env_type,
mpi_rank=mpi_rank,
subrank=rank,
seed=seed,
reward_scale=reward_scale,
gamestate=gamestate,
flatten_dict_observations=flatten_dict_observations,
wrapper_kwargs=wrapper_kwargs,
logger_dir=logger_dir
)
set_global_seeds(seed)
if num_env > 1:
return SubprocVecEnv([make_thunk(i + start_index) for i in range(num_env)])
else:
return DummyVecEnv([make_thunk(start_index)]) | [
"def",
"make_vec_env",
"(",
"env_id",
",",
"env_type",
",",
"num_env",
",",
"seed",
",",
"wrapper_kwargs",
"=",
"None",
",",
"start_index",
"=",
"0",
",",
"reward_scale",
"=",
"1.0",
",",
"flatten_dict_observations",
"=",
"True",
",",
"gamestate",
"=",
"None",
")",
":",
"wrapper_kwargs",
"=",
"wrapper_kwargs",
"or",
"{",
"}",
"mpi_rank",
"=",
"MPI",
".",
"COMM_WORLD",
".",
"Get_rank",
"(",
")",
"if",
"MPI",
"else",
"0",
"seed",
"=",
"seed",
"+",
"10000",
"*",
"mpi_rank",
"if",
"seed",
"is",
"not",
"None",
"else",
"None",
"logger_dir",
"=",
"logger",
".",
"get_dir",
"(",
")",
"def",
"make_thunk",
"(",
"rank",
")",
":",
"return",
"lambda",
":",
"make_env",
"(",
"env_id",
"=",
"env_id",
",",
"env_type",
"=",
"env_type",
",",
"mpi_rank",
"=",
"mpi_rank",
",",
"subrank",
"=",
"rank",
",",
"seed",
"=",
"seed",
",",
"reward_scale",
"=",
"reward_scale",
",",
"gamestate",
"=",
"gamestate",
",",
"flatten_dict_observations",
"=",
"flatten_dict_observations",
",",
"wrapper_kwargs",
"=",
"wrapper_kwargs",
",",
"logger_dir",
"=",
"logger_dir",
")",
"set_global_seeds",
"(",
"seed",
")",
"if",
"num_env",
">",
"1",
":",
"return",
"SubprocVecEnv",
"(",
"[",
"make_thunk",
"(",
"i",
"+",
"start_index",
")",
"for",
"i",
"in",
"range",
"(",
"num_env",
")",
"]",
")",
"else",
":",
"return",
"DummyVecEnv",
"(",
"[",
"make_thunk",
"(",
"start_index",
")",
"]",
")"
] | Create a wrapped, monitored SubprocVecEnv for Atari and MuJoCo. | [
"Create",
"a",
"wrapped",
"monitored",
"SubprocVecEnv",
"for",
"Atari",
"and",
"MuJoCo",
"."
] | 3301089b48c42b87b396e246ea3f56fa4bfc9678 | https://github.com/openai/baselines/blob/3301089b48c42b87b396e246ea3f56fa4bfc9678/baselines/common/cmd_util.py#L21-L52 | valid |
openai/baselines | baselines/common/cmd_util.py | make_mujoco_env | def make_mujoco_env(env_id, seed, reward_scale=1.0):
"""
Create a wrapped, monitored gym.Env for MuJoCo.
"""
rank = MPI.COMM_WORLD.Get_rank()
myseed = seed + 1000 * rank if seed is not None else None
set_global_seeds(myseed)
env = gym.make(env_id)
logger_path = None if logger.get_dir() is None else os.path.join(logger.get_dir(), str(rank))
env = Monitor(env, logger_path, allow_early_resets=True)
env.seed(seed)
if reward_scale != 1.0:
from baselines.common.retro_wrappers import RewardScaler
env = RewardScaler(env, reward_scale)
return env | python | def make_mujoco_env(env_id, seed, reward_scale=1.0):
"""
Create a wrapped, monitored gym.Env for MuJoCo.
"""
rank = MPI.COMM_WORLD.Get_rank()
myseed = seed + 1000 * rank if seed is not None else None
set_global_seeds(myseed)
env = gym.make(env_id)
logger_path = None if logger.get_dir() is None else os.path.join(logger.get_dir(), str(rank))
env = Monitor(env, logger_path, allow_early_resets=True)
env.seed(seed)
if reward_scale != 1.0:
from baselines.common.retro_wrappers import RewardScaler
env = RewardScaler(env, reward_scale)
return env | [
"def",
"make_mujoco_env",
"(",
"env_id",
",",
"seed",
",",
"reward_scale",
"=",
"1.0",
")",
":",
"rank",
"=",
"MPI",
".",
"COMM_WORLD",
".",
"Get_rank",
"(",
")",
"myseed",
"=",
"seed",
"+",
"1000",
"*",
"rank",
"if",
"seed",
"is",
"not",
"None",
"else",
"None",
"set_global_seeds",
"(",
"myseed",
")",
"env",
"=",
"gym",
".",
"make",
"(",
"env_id",
")",
"logger_path",
"=",
"None",
"if",
"logger",
".",
"get_dir",
"(",
")",
"is",
"None",
"else",
"os",
".",
"path",
".",
"join",
"(",
"logger",
".",
"get_dir",
"(",
")",
",",
"str",
"(",
"rank",
")",
")",
"env",
"=",
"Monitor",
"(",
"env",
",",
"logger_path",
",",
"allow_early_resets",
"=",
"True",
")",
"env",
".",
"seed",
"(",
"seed",
")",
"if",
"reward_scale",
"!=",
"1.0",
":",
"from",
"baselines",
".",
"common",
".",
"retro_wrappers",
"import",
"RewardScaler",
"env",
"=",
"RewardScaler",
"(",
"env",
",",
"reward_scale",
")",
"return",
"env"
] | Create a wrapped, monitored gym.Env for MuJoCo. | [
"Create",
"a",
"wrapped",
"monitored",
"gym",
".",
"Env",
"for",
"MuJoCo",
"."
] | 3301089b48c42b87b396e246ea3f56fa4bfc9678 | https://github.com/openai/baselines/blob/3301089b48c42b87b396e246ea3f56fa4bfc9678/baselines/common/cmd_util.py#L88-L102 | valid |
openai/baselines | baselines/common/cmd_util.py | make_robotics_env | def make_robotics_env(env_id, seed, rank=0):
"""
Create a wrapped, monitored gym.Env for MuJoCo.
"""
set_global_seeds(seed)
env = gym.make(env_id)
env = FlattenDictWrapper(env, ['observation', 'desired_goal'])
env = Monitor(
env, logger.get_dir() and os.path.join(logger.get_dir(), str(rank)),
info_keywords=('is_success',))
env.seed(seed)
return env | python | def make_robotics_env(env_id, seed, rank=0):
"""
Create a wrapped, monitored gym.Env for MuJoCo.
"""
set_global_seeds(seed)
env = gym.make(env_id)
env = FlattenDictWrapper(env, ['observation', 'desired_goal'])
env = Monitor(
env, logger.get_dir() and os.path.join(logger.get_dir(), str(rank)),
info_keywords=('is_success',))
env.seed(seed)
return env | [
"def",
"make_robotics_env",
"(",
"env_id",
",",
"seed",
",",
"rank",
"=",
"0",
")",
":",
"set_global_seeds",
"(",
"seed",
")",
"env",
"=",
"gym",
".",
"make",
"(",
"env_id",
")",
"env",
"=",
"FlattenDictWrapper",
"(",
"env",
",",
"[",
"'observation'",
",",
"'desired_goal'",
"]",
")",
"env",
"=",
"Monitor",
"(",
"env",
",",
"logger",
".",
"get_dir",
"(",
")",
"and",
"os",
".",
"path",
".",
"join",
"(",
"logger",
".",
"get_dir",
"(",
")",
",",
"str",
"(",
"rank",
")",
")",
",",
"info_keywords",
"=",
"(",
"'is_success'",
",",
")",
")",
"env",
".",
"seed",
"(",
"seed",
")",
"return",
"env"
] | Create a wrapped, monitored gym.Env for MuJoCo. | [
"Create",
"a",
"wrapped",
"monitored",
"gym",
".",
"Env",
"for",
"MuJoCo",
"."
] | 3301089b48c42b87b396e246ea3f56fa4bfc9678 | https://github.com/openai/baselines/blob/3301089b48c42b87b396e246ea3f56fa4bfc9678/baselines/common/cmd_util.py#L104-L115 | valid |
openai/baselines | baselines/common/cmd_util.py | common_arg_parser | def common_arg_parser():
"""
Create an argparse.ArgumentParser for run_mujoco.py.
"""
parser = arg_parser()
parser.add_argument('--env', help='environment ID', type=str, default='Reacher-v2')
parser.add_argument('--env_type', help='type of environment, used when the environment type cannot be automatically determined', type=str)
parser.add_argument('--seed', help='RNG seed', type=int, default=None)
parser.add_argument('--alg', help='Algorithm', type=str, default='ppo2')
parser.add_argument('--num_timesteps', type=float, default=1e6),
parser.add_argument('--network', help='network type (mlp, cnn, lstm, cnn_lstm, conv_only)', default=None)
parser.add_argument('--gamestate', help='game state to load (so far only used in retro games)', default=None)
parser.add_argument('--num_env', help='Number of environment copies being run in parallel. When not specified, set to number of cpus for Atari, and to 1 for Mujoco', default=None, type=int)
parser.add_argument('--reward_scale', help='Reward scale factor. Default: 1.0', default=1.0, type=float)
parser.add_argument('--save_path', help='Path to save trained model to', default=None, type=str)
parser.add_argument('--save_video_interval', help='Save video every x steps (0 = disabled)', default=0, type=int)
parser.add_argument('--save_video_length', help='Length of recorded video. Default: 200', default=200, type=int)
parser.add_argument('--play', default=False, action='store_true')
return parser | python | def common_arg_parser():
"""
Create an argparse.ArgumentParser for run_mujoco.py.
"""
parser = arg_parser()
parser.add_argument('--env', help='environment ID', type=str, default='Reacher-v2')
parser.add_argument('--env_type', help='type of environment, used when the environment type cannot be automatically determined', type=str)
parser.add_argument('--seed', help='RNG seed', type=int, default=None)
parser.add_argument('--alg', help='Algorithm', type=str, default='ppo2')
parser.add_argument('--num_timesteps', type=float, default=1e6),
parser.add_argument('--network', help='network type (mlp, cnn, lstm, cnn_lstm, conv_only)', default=None)
parser.add_argument('--gamestate', help='game state to load (so far only used in retro games)', default=None)
parser.add_argument('--num_env', help='Number of environment copies being run in parallel. When not specified, set to number of cpus for Atari, and to 1 for Mujoco', default=None, type=int)
parser.add_argument('--reward_scale', help='Reward scale factor. Default: 1.0', default=1.0, type=float)
parser.add_argument('--save_path', help='Path to save trained model to', default=None, type=str)
parser.add_argument('--save_video_interval', help='Save video every x steps (0 = disabled)', default=0, type=int)
parser.add_argument('--save_video_length', help='Length of recorded video. Default: 200', default=200, type=int)
parser.add_argument('--play', default=False, action='store_true')
return parser | [
"def",
"common_arg_parser",
"(",
")",
":",
"parser",
"=",
"arg_parser",
"(",
")",
"parser",
".",
"add_argument",
"(",
"'--env'",
",",
"help",
"=",
"'environment ID'",
",",
"type",
"=",
"str",
",",
"default",
"=",
"'Reacher-v2'",
")",
"parser",
".",
"add_argument",
"(",
"'--env_type'",
",",
"help",
"=",
"'type of environment, used when the environment type cannot be automatically determined'",
",",
"type",
"=",
"str",
")",
"parser",
".",
"add_argument",
"(",
"'--seed'",
",",
"help",
"=",
"'RNG seed'",
",",
"type",
"=",
"int",
",",
"default",
"=",
"None",
")",
"parser",
".",
"add_argument",
"(",
"'--alg'",
",",
"help",
"=",
"'Algorithm'",
",",
"type",
"=",
"str",
",",
"default",
"=",
"'ppo2'",
")",
"parser",
".",
"add_argument",
"(",
"'--num_timesteps'",
",",
"type",
"=",
"float",
",",
"default",
"=",
"1e6",
")",
",",
"parser",
".",
"add_argument",
"(",
"'--network'",
",",
"help",
"=",
"'network type (mlp, cnn, lstm, cnn_lstm, conv_only)'",
",",
"default",
"=",
"None",
")",
"parser",
".",
"add_argument",
"(",
"'--gamestate'",
",",
"help",
"=",
"'game state to load (so far only used in retro games)'",
",",
"default",
"=",
"None",
")",
"parser",
".",
"add_argument",
"(",
"'--num_env'",
",",
"help",
"=",
"'Number of environment copies being run in parallel. When not specified, set to number of cpus for Atari, and to 1 for Mujoco'",
",",
"default",
"=",
"None",
",",
"type",
"=",
"int",
")",
"parser",
".",
"add_argument",
"(",
"'--reward_scale'",
",",
"help",
"=",
"'Reward scale factor. Default: 1.0'",
",",
"default",
"=",
"1.0",
",",
"type",
"=",
"float",
")",
"parser",
".",
"add_argument",
"(",
"'--save_path'",
",",
"help",
"=",
"'Path to save trained model to'",
",",
"default",
"=",
"None",
",",
"type",
"=",
"str",
")",
"parser",
".",
"add_argument",
"(",
"'--save_video_interval'",
",",
"help",
"=",
"'Save video every x steps (0 = disabled)'",
",",
"default",
"=",
"0",
",",
"type",
"=",
"int",
")",
"parser",
".",
"add_argument",
"(",
"'--save_video_length'",
",",
"help",
"=",
"'Length of recorded video. Default: 200'",
",",
"default",
"=",
"200",
",",
"type",
"=",
"int",
")",
"parser",
".",
"add_argument",
"(",
"'--play'",
",",
"default",
"=",
"False",
",",
"action",
"=",
"'store_true'",
")",
"return",
"parser"
] | Create an argparse.ArgumentParser for run_mujoco.py. | [
"Create",
"an",
"argparse",
".",
"ArgumentParser",
"for",
"run_mujoco",
".",
"py",
"."
] | 3301089b48c42b87b396e246ea3f56fa4bfc9678 | https://github.com/openai/baselines/blob/3301089b48c42b87b396e246ea3f56fa4bfc9678/baselines/common/cmd_util.py#L135-L153 | valid |
openai/baselines | baselines/common/cmd_util.py | robotics_arg_parser | def robotics_arg_parser():
"""
Create an argparse.ArgumentParser for run_mujoco.py.
"""
parser = arg_parser()
parser.add_argument('--env', help='environment ID', type=str, default='FetchReach-v0')
parser.add_argument('--seed', help='RNG seed', type=int, default=None)
parser.add_argument('--num-timesteps', type=int, default=int(1e6))
return parser | python | def robotics_arg_parser():
"""
Create an argparse.ArgumentParser for run_mujoco.py.
"""
parser = arg_parser()
parser.add_argument('--env', help='environment ID', type=str, default='FetchReach-v0')
parser.add_argument('--seed', help='RNG seed', type=int, default=None)
parser.add_argument('--num-timesteps', type=int, default=int(1e6))
return parser | [
"def",
"robotics_arg_parser",
"(",
")",
":",
"parser",
"=",
"arg_parser",
"(",
")",
"parser",
".",
"add_argument",
"(",
"'--env'",
",",
"help",
"=",
"'environment ID'",
",",
"type",
"=",
"str",
",",
"default",
"=",
"'FetchReach-v0'",
")",
"parser",
".",
"add_argument",
"(",
"'--seed'",
",",
"help",
"=",
"'RNG seed'",
",",
"type",
"=",
"int",
",",
"default",
"=",
"None",
")",
"parser",
".",
"add_argument",
"(",
"'--num-timesteps'",
",",
"type",
"=",
"int",
",",
"default",
"=",
"int",
"(",
"1e6",
")",
")",
"return",
"parser"
] | Create an argparse.ArgumentParser for run_mujoco.py. | [
"Create",
"an",
"argparse",
".",
"ArgumentParser",
"for",
"run_mujoco",
".",
"py",
"."
] | 3301089b48c42b87b396e246ea3f56fa4bfc9678 | https://github.com/openai/baselines/blob/3301089b48c42b87b396e246ea3f56fa4bfc9678/baselines/common/cmd_util.py#L155-L163 | valid |
openai/baselines | baselines/common/cmd_util.py | parse_unknown_args | def parse_unknown_args(args):
"""
Parse arguments not consumed by arg parser into a dicitonary
"""
retval = {}
preceded_by_key = False
for arg in args:
if arg.startswith('--'):
if '=' in arg:
key = arg.split('=')[0][2:]
value = arg.split('=')[1]
retval[key] = value
else:
key = arg[2:]
preceded_by_key = True
elif preceded_by_key:
retval[key] = arg
preceded_by_key = False
return retval | python | def parse_unknown_args(args):
"""
Parse arguments not consumed by arg parser into a dicitonary
"""
retval = {}
preceded_by_key = False
for arg in args:
if arg.startswith('--'):
if '=' in arg:
key = arg.split('=')[0][2:]
value = arg.split('=')[1]
retval[key] = value
else:
key = arg[2:]
preceded_by_key = True
elif preceded_by_key:
retval[key] = arg
preceded_by_key = False
return retval | [
"def",
"parse_unknown_args",
"(",
"args",
")",
":",
"retval",
"=",
"{",
"}",
"preceded_by_key",
"=",
"False",
"for",
"arg",
"in",
"args",
":",
"if",
"arg",
".",
"startswith",
"(",
"'--'",
")",
":",
"if",
"'='",
"in",
"arg",
":",
"key",
"=",
"arg",
".",
"split",
"(",
"'='",
")",
"[",
"0",
"]",
"[",
"2",
":",
"]",
"value",
"=",
"arg",
".",
"split",
"(",
"'='",
")",
"[",
"1",
"]",
"retval",
"[",
"key",
"]",
"=",
"value",
"else",
":",
"key",
"=",
"arg",
"[",
"2",
":",
"]",
"preceded_by_key",
"=",
"True",
"elif",
"preceded_by_key",
":",
"retval",
"[",
"key",
"]",
"=",
"arg",
"preceded_by_key",
"=",
"False",
"return",
"retval"
] | Parse arguments not consumed by arg parser into a dicitonary | [
"Parse",
"arguments",
"not",
"consumed",
"by",
"arg",
"parser",
"into",
"a",
"dicitonary"
] | 3301089b48c42b87b396e246ea3f56fa4bfc9678 | https://github.com/openai/baselines/blob/3301089b48c42b87b396e246ea3f56fa4bfc9678/baselines/common/cmd_util.py#L166-L185 | valid |
openai/baselines | baselines/common/vec_env/vec_env.py | clear_mpi_env_vars | def clear_mpi_env_vars():
"""
from mpi4py import MPI will call MPI_Init by default. If the child process has MPI environment variables, MPI will think that the child process is an MPI process just like the parent and do bad things such as hang.
This context manager is a hacky way to clear those environment variables temporarily such as when we are starting multiprocessing
Processes.
"""
removed_environment = {}
for k, v in list(os.environ.items()):
for prefix in ['OMPI_', 'PMI_']:
if k.startswith(prefix):
removed_environment[k] = v
del os.environ[k]
try:
yield
finally:
os.environ.update(removed_environment) | python | def clear_mpi_env_vars():
"""
from mpi4py import MPI will call MPI_Init by default. If the child process has MPI environment variables, MPI will think that the child process is an MPI process just like the parent and do bad things such as hang.
This context manager is a hacky way to clear those environment variables temporarily such as when we are starting multiprocessing
Processes.
"""
removed_environment = {}
for k, v in list(os.environ.items()):
for prefix in ['OMPI_', 'PMI_']:
if k.startswith(prefix):
removed_environment[k] = v
del os.environ[k]
try:
yield
finally:
os.environ.update(removed_environment) | [
"def",
"clear_mpi_env_vars",
"(",
")",
":",
"removed_environment",
"=",
"{",
"}",
"for",
"k",
",",
"v",
"in",
"list",
"(",
"os",
".",
"environ",
".",
"items",
"(",
")",
")",
":",
"for",
"prefix",
"in",
"[",
"'OMPI_'",
",",
"'PMI_'",
"]",
":",
"if",
"k",
".",
"startswith",
"(",
"prefix",
")",
":",
"removed_environment",
"[",
"k",
"]",
"=",
"v",
"del",
"os",
".",
"environ",
"[",
"k",
"]",
"try",
":",
"yield",
"finally",
":",
"os",
".",
"environ",
".",
"update",
"(",
"removed_environment",
")"
] | from mpi4py import MPI will call MPI_Init by default. If the child process has MPI environment variables, MPI will think that the child process is an MPI process just like the parent and do bad things such as hang.
This context manager is a hacky way to clear those environment variables temporarily such as when we are starting multiprocessing
Processes. | [
"from",
"mpi4py",
"import",
"MPI",
"will",
"call",
"MPI_Init",
"by",
"default",
".",
"If",
"the",
"child",
"process",
"has",
"MPI",
"environment",
"variables",
"MPI",
"will",
"think",
"that",
"the",
"child",
"process",
"is",
"an",
"MPI",
"process",
"just",
"like",
"the",
"parent",
"and",
"do",
"bad",
"things",
"such",
"as",
"hang",
".",
"This",
"context",
"manager",
"is",
"a",
"hacky",
"way",
"to",
"clear",
"those",
"environment",
"variables",
"temporarily",
"such",
"as",
"when",
"we",
"are",
"starting",
"multiprocessing",
"Processes",
"."
] | 3301089b48c42b87b396e246ea3f56fa4bfc9678 | https://github.com/openai/baselines/blob/3301089b48c42b87b396e246ea3f56fa4bfc9678/baselines/common/vec_env/vec_env.py#L204-L219 | valid |
openai/baselines | baselines/ppo2/ppo2.py | learn | def learn(*, network, env, total_timesteps, eval_env = None, seed=None, nsteps=2048, ent_coef=0.0, lr=3e-4,
vf_coef=0.5, max_grad_norm=0.5, gamma=0.99, lam=0.95,
log_interval=10, nminibatches=4, noptepochs=4, cliprange=0.2,
save_interval=0, load_path=None, model_fn=None, **network_kwargs):
'''
Learn policy using PPO algorithm (https://arxiv.org/abs/1707.06347)
Parameters:
----------
network: policy network architecture. Either string (mlp, lstm, lnlstm, cnn_lstm, cnn, cnn_small, conv_only - see baselines.common/models.py for full list)
specifying the standard network architecture, or a function that takes tensorflow tensor as input and returns
tuple (output_tensor, extra_feed) where output tensor is the last network layer output, extra_feed is None for feed-forward
neural nets, and extra_feed is a dictionary describing how to feed state into the network for recurrent neural nets.
See common/models.py/lstm for more details on using recurrent nets in policies
env: baselines.common.vec_env.VecEnv environment. Needs to be vectorized for parallel environment simulation.
The environments produced by gym.make can be wrapped using baselines.common.vec_env.DummyVecEnv class.
nsteps: int number of steps of the vectorized environment per update (i.e. batch size is nsteps * nenv where
nenv is number of environment copies simulated in parallel)
total_timesteps: int number of timesteps (i.e. number of actions taken in the environment)
ent_coef: float policy entropy coefficient in the optimization objective
lr: float or function learning rate, constant or a schedule function [0,1] -> R+ where 1 is beginning of the
training and 0 is the end of the training.
vf_coef: float value function loss coefficient in the optimization objective
max_grad_norm: float or None gradient norm clipping coefficient
gamma: float discounting factor
lam: float advantage estimation discounting factor (lambda in the paper)
log_interval: int number of timesteps between logging events
nminibatches: int number of training minibatches per update. For recurrent policies,
should be smaller or equal than number of environments run in parallel.
noptepochs: int number of training epochs per update
cliprange: float or function clipping range, constant or schedule function [0,1] -> R+ where 1 is beginning of the training
and 0 is the end of the training
save_interval: int number of timesteps between saving events
load_path: str path to load the model from
**network_kwargs: keyword arguments to the policy / network builder. See baselines.common/policies.py/build_policy and arguments to a particular type of network
For instance, 'mlp' network architecture has arguments num_hidden and num_layers.
'''
set_global_seeds(seed)
if isinstance(lr, float): lr = constfn(lr)
else: assert callable(lr)
if isinstance(cliprange, float): cliprange = constfn(cliprange)
else: assert callable(cliprange)
total_timesteps = int(total_timesteps)
policy = build_policy(env, network, **network_kwargs)
# Get the nb of env
nenvs = env.num_envs
# Get state_space and action_space
ob_space = env.observation_space
ac_space = env.action_space
# Calculate the batch_size
nbatch = nenvs * nsteps
nbatch_train = nbatch // nminibatches
# Instantiate the model object (that creates act_model and train_model)
if model_fn is None:
from baselines.ppo2.model import Model
model_fn = Model
model = model_fn(policy=policy, ob_space=ob_space, ac_space=ac_space, nbatch_act=nenvs, nbatch_train=nbatch_train,
nsteps=nsteps, ent_coef=ent_coef, vf_coef=vf_coef,
max_grad_norm=max_grad_norm)
if load_path is not None:
model.load(load_path)
# Instantiate the runner object
runner = Runner(env=env, model=model, nsteps=nsteps, gamma=gamma, lam=lam)
if eval_env is not None:
eval_runner = Runner(env = eval_env, model = model, nsteps = nsteps, gamma = gamma, lam= lam)
epinfobuf = deque(maxlen=100)
if eval_env is not None:
eval_epinfobuf = deque(maxlen=100)
# Start total timer
tfirststart = time.perf_counter()
nupdates = total_timesteps//nbatch
for update in range(1, nupdates+1):
assert nbatch % nminibatches == 0
# Start timer
tstart = time.perf_counter()
frac = 1.0 - (update - 1.0) / nupdates
# Calculate the learning rate
lrnow = lr(frac)
# Calculate the cliprange
cliprangenow = cliprange(frac)
# Get minibatch
obs, returns, masks, actions, values, neglogpacs, states, epinfos = runner.run() #pylint: disable=E0632
if eval_env is not None:
eval_obs, eval_returns, eval_masks, eval_actions, eval_values, eval_neglogpacs, eval_states, eval_epinfos = eval_runner.run() #pylint: disable=E0632
epinfobuf.extend(epinfos)
if eval_env is not None:
eval_epinfobuf.extend(eval_epinfos)
# Here what we're going to do is for each minibatch calculate the loss and append it.
mblossvals = []
if states is None: # nonrecurrent version
# Index of each element of batch_size
# Create the indices array
inds = np.arange(nbatch)
for _ in range(noptepochs):
# Randomize the indexes
np.random.shuffle(inds)
# 0 to batch_size with batch_train_size step
for start in range(0, nbatch, nbatch_train):
end = start + nbatch_train
mbinds = inds[start:end]
slices = (arr[mbinds] for arr in (obs, returns, masks, actions, values, neglogpacs))
mblossvals.append(model.train(lrnow, cliprangenow, *slices))
else: # recurrent version
assert nenvs % nminibatches == 0
envsperbatch = nenvs // nminibatches
envinds = np.arange(nenvs)
flatinds = np.arange(nenvs * nsteps).reshape(nenvs, nsteps)
for _ in range(noptepochs):
np.random.shuffle(envinds)
for start in range(0, nenvs, envsperbatch):
end = start + envsperbatch
mbenvinds = envinds[start:end]
mbflatinds = flatinds[mbenvinds].ravel()
slices = (arr[mbflatinds] for arr in (obs, returns, masks, actions, values, neglogpacs))
mbstates = states[mbenvinds]
mblossvals.append(model.train(lrnow, cliprangenow, *slices, mbstates))
# Feedforward --> get losses --> update
lossvals = np.mean(mblossvals, axis=0)
# End timer
tnow = time.perf_counter()
# Calculate the fps (frame per second)
fps = int(nbatch / (tnow - tstart))
if update % log_interval == 0 or update == 1:
# Calculates if value function is a good predicator of the returns (ev > 1)
# or if it's just worse than predicting nothing (ev =< 0)
ev = explained_variance(values, returns)
logger.logkv("serial_timesteps", update*nsteps)
logger.logkv("nupdates", update)
logger.logkv("total_timesteps", update*nbatch)
logger.logkv("fps", fps)
logger.logkv("explained_variance", float(ev))
logger.logkv('eprewmean', safemean([epinfo['r'] for epinfo in epinfobuf]))
logger.logkv('eplenmean', safemean([epinfo['l'] for epinfo in epinfobuf]))
if eval_env is not None:
logger.logkv('eval_eprewmean', safemean([epinfo['r'] for epinfo in eval_epinfobuf]) )
logger.logkv('eval_eplenmean', safemean([epinfo['l'] for epinfo in eval_epinfobuf]) )
logger.logkv('time_elapsed', tnow - tfirststart)
for (lossval, lossname) in zip(lossvals, model.loss_names):
logger.logkv(lossname, lossval)
if MPI is None or MPI.COMM_WORLD.Get_rank() == 0:
logger.dumpkvs()
if save_interval and (update % save_interval == 0 or update == 1) and logger.get_dir() and (MPI is None or MPI.COMM_WORLD.Get_rank() == 0):
checkdir = osp.join(logger.get_dir(), 'checkpoints')
os.makedirs(checkdir, exist_ok=True)
savepath = osp.join(checkdir, '%.5i'%update)
print('Saving to', savepath)
model.save(savepath)
return model | python | def learn(*, network, env, total_timesteps, eval_env = None, seed=None, nsteps=2048, ent_coef=0.0, lr=3e-4,
vf_coef=0.5, max_grad_norm=0.5, gamma=0.99, lam=0.95,
log_interval=10, nminibatches=4, noptepochs=4, cliprange=0.2,
save_interval=0, load_path=None, model_fn=None, **network_kwargs):
'''
Learn policy using PPO algorithm (https://arxiv.org/abs/1707.06347)
Parameters:
----------
network: policy network architecture. Either string (mlp, lstm, lnlstm, cnn_lstm, cnn, cnn_small, conv_only - see baselines.common/models.py for full list)
specifying the standard network architecture, or a function that takes tensorflow tensor as input and returns
tuple (output_tensor, extra_feed) where output tensor is the last network layer output, extra_feed is None for feed-forward
neural nets, and extra_feed is a dictionary describing how to feed state into the network for recurrent neural nets.
See common/models.py/lstm for more details on using recurrent nets in policies
env: baselines.common.vec_env.VecEnv environment. Needs to be vectorized for parallel environment simulation.
The environments produced by gym.make can be wrapped using baselines.common.vec_env.DummyVecEnv class.
nsteps: int number of steps of the vectorized environment per update (i.e. batch size is nsteps * nenv where
nenv is number of environment copies simulated in parallel)
total_timesteps: int number of timesteps (i.e. number of actions taken in the environment)
ent_coef: float policy entropy coefficient in the optimization objective
lr: float or function learning rate, constant or a schedule function [0,1] -> R+ where 1 is beginning of the
training and 0 is the end of the training.
vf_coef: float value function loss coefficient in the optimization objective
max_grad_norm: float or None gradient norm clipping coefficient
gamma: float discounting factor
lam: float advantage estimation discounting factor (lambda in the paper)
log_interval: int number of timesteps between logging events
nminibatches: int number of training minibatches per update. For recurrent policies,
should be smaller or equal than number of environments run in parallel.
noptepochs: int number of training epochs per update
cliprange: float or function clipping range, constant or schedule function [0,1] -> R+ where 1 is beginning of the training
and 0 is the end of the training
save_interval: int number of timesteps between saving events
load_path: str path to load the model from
**network_kwargs: keyword arguments to the policy / network builder. See baselines.common/policies.py/build_policy and arguments to a particular type of network
For instance, 'mlp' network architecture has arguments num_hidden and num_layers.
'''
set_global_seeds(seed)
if isinstance(lr, float): lr = constfn(lr)
else: assert callable(lr)
if isinstance(cliprange, float): cliprange = constfn(cliprange)
else: assert callable(cliprange)
total_timesteps = int(total_timesteps)
policy = build_policy(env, network, **network_kwargs)
# Get the nb of env
nenvs = env.num_envs
# Get state_space and action_space
ob_space = env.observation_space
ac_space = env.action_space
# Calculate the batch_size
nbatch = nenvs * nsteps
nbatch_train = nbatch // nminibatches
# Instantiate the model object (that creates act_model and train_model)
if model_fn is None:
from baselines.ppo2.model import Model
model_fn = Model
model = model_fn(policy=policy, ob_space=ob_space, ac_space=ac_space, nbatch_act=nenvs, nbatch_train=nbatch_train,
nsteps=nsteps, ent_coef=ent_coef, vf_coef=vf_coef,
max_grad_norm=max_grad_norm)
if load_path is not None:
model.load(load_path)
# Instantiate the runner object
runner = Runner(env=env, model=model, nsteps=nsteps, gamma=gamma, lam=lam)
if eval_env is not None:
eval_runner = Runner(env = eval_env, model = model, nsteps = nsteps, gamma = gamma, lam= lam)
epinfobuf = deque(maxlen=100)
if eval_env is not None:
eval_epinfobuf = deque(maxlen=100)
# Start total timer
tfirststart = time.perf_counter()
nupdates = total_timesteps//nbatch
for update in range(1, nupdates+1):
assert nbatch % nminibatches == 0
# Start timer
tstart = time.perf_counter()
frac = 1.0 - (update - 1.0) / nupdates
# Calculate the learning rate
lrnow = lr(frac)
# Calculate the cliprange
cliprangenow = cliprange(frac)
# Get minibatch
obs, returns, masks, actions, values, neglogpacs, states, epinfos = runner.run() #pylint: disable=E0632
if eval_env is not None:
eval_obs, eval_returns, eval_masks, eval_actions, eval_values, eval_neglogpacs, eval_states, eval_epinfos = eval_runner.run() #pylint: disable=E0632
epinfobuf.extend(epinfos)
if eval_env is not None:
eval_epinfobuf.extend(eval_epinfos)
# Here what we're going to do is for each minibatch calculate the loss and append it.
mblossvals = []
if states is None: # nonrecurrent version
# Index of each element of batch_size
# Create the indices array
inds = np.arange(nbatch)
for _ in range(noptepochs):
# Randomize the indexes
np.random.shuffle(inds)
# 0 to batch_size with batch_train_size step
for start in range(0, nbatch, nbatch_train):
end = start + nbatch_train
mbinds = inds[start:end]
slices = (arr[mbinds] for arr in (obs, returns, masks, actions, values, neglogpacs))
mblossvals.append(model.train(lrnow, cliprangenow, *slices))
else: # recurrent version
assert nenvs % nminibatches == 0
envsperbatch = nenvs // nminibatches
envinds = np.arange(nenvs)
flatinds = np.arange(nenvs * nsteps).reshape(nenvs, nsteps)
for _ in range(noptepochs):
np.random.shuffle(envinds)
for start in range(0, nenvs, envsperbatch):
end = start + envsperbatch
mbenvinds = envinds[start:end]
mbflatinds = flatinds[mbenvinds].ravel()
slices = (arr[mbflatinds] for arr in (obs, returns, masks, actions, values, neglogpacs))
mbstates = states[mbenvinds]
mblossvals.append(model.train(lrnow, cliprangenow, *slices, mbstates))
# Feedforward --> get losses --> update
lossvals = np.mean(mblossvals, axis=0)
# End timer
tnow = time.perf_counter()
# Calculate the fps (frame per second)
fps = int(nbatch / (tnow - tstart))
if update % log_interval == 0 or update == 1:
# Calculates if value function is a good predicator of the returns (ev > 1)
# or if it's just worse than predicting nothing (ev =< 0)
ev = explained_variance(values, returns)
logger.logkv("serial_timesteps", update*nsteps)
logger.logkv("nupdates", update)
logger.logkv("total_timesteps", update*nbatch)
logger.logkv("fps", fps)
logger.logkv("explained_variance", float(ev))
logger.logkv('eprewmean', safemean([epinfo['r'] for epinfo in epinfobuf]))
logger.logkv('eplenmean', safemean([epinfo['l'] for epinfo in epinfobuf]))
if eval_env is not None:
logger.logkv('eval_eprewmean', safemean([epinfo['r'] for epinfo in eval_epinfobuf]) )
logger.logkv('eval_eplenmean', safemean([epinfo['l'] for epinfo in eval_epinfobuf]) )
logger.logkv('time_elapsed', tnow - tfirststart)
for (lossval, lossname) in zip(lossvals, model.loss_names):
logger.logkv(lossname, lossval)
if MPI is None or MPI.COMM_WORLD.Get_rank() == 0:
logger.dumpkvs()
if save_interval and (update % save_interval == 0 or update == 1) and logger.get_dir() and (MPI is None or MPI.COMM_WORLD.Get_rank() == 0):
checkdir = osp.join(logger.get_dir(), 'checkpoints')
os.makedirs(checkdir, exist_ok=True)
savepath = osp.join(checkdir, '%.5i'%update)
print('Saving to', savepath)
model.save(savepath)
return model | [
"def",
"learn",
"(",
"*",
",",
"network",
",",
"env",
",",
"total_timesteps",
",",
"eval_env",
"=",
"None",
",",
"seed",
"=",
"None",
",",
"nsteps",
"=",
"2048",
",",
"ent_coef",
"=",
"0.0",
",",
"lr",
"=",
"3e-4",
",",
"vf_coef",
"=",
"0.5",
",",
"max_grad_norm",
"=",
"0.5",
",",
"gamma",
"=",
"0.99",
",",
"lam",
"=",
"0.95",
",",
"log_interval",
"=",
"10",
",",
"nminibatches",
"=",
"4",
",",
"noptepochs",
"=",
"4",
",",
"cliprange",
"=",
"0.2",
",",
"save_interval",
"=",
"0",
",",
"load_path",
"=",
"None",
",",
"model_fn",
"=",
"None",
",",
"*",
"*",
"network_kwargs",
")",
":",
"set_global_seeds",
"(",
"seed",
")",
"if",
"isinstance",
"(",
"lr",
",",
"float",
")",
":",
"lr",
"=",
"constfn",
"(",
"lr",
")",
"else",
":",
"assert",
"callable",
"(",
"lr",
")",
"if",
"isinstance",
"(",
"cliprange",
",",
"float",
")",
":",
"cliprange",
"=",
"constfn",
"(",
"cliprange",
")",
"else",
":",
"assert",
"callable",
"(",
"cliprange",
")",
"total_timesteps",
"=",
"int",
"(",
"total_timesteps",
")",
"policy",
"=",
"build_policy",
"(",
"env",
",",
"network",
",",
"*",
"*",
"network_kwargs",
")",
"# Get the nb of env",
"nenvs",
"=",
"env",
".",
"num_envs",
"# Get state_space and action_space",
"ob_space",
"=",
"env",
".",
"observation_space",
"ac_space",
"=",
"env",
".",
"action_space",
"# Calculate the batch_size",
"nbatch",
"=",
"nenvs",
"*",
"nsteps",
"nbatch_train",
"=",
"nbatch",
"//",
"nminibatches",
"# Instantiate the model object (that creates act_model and train_model)",
"if",
"model_fn",
"is",
"None",
":",
"from",
"baselines",
".",
"ppo2",
".",
"model",
"import",
"Model",
"model_fn",
"=",
"Model",
"model",
"=",
"model_fn",
"(",
"policy",
"=",
"policy",
",",
"ob_space",
"=",
"ob_space",
",",
"ac_space",
"=",
"ac_space",
",",
"nbatch_act",
"=",
"nenvs",
",",
"nbatch_train",
"=",
"nbatch_train",
",",
"nsteps",
"=",
"nsteps",
",",
"ent_coef",
"=",
"ent_coef",
",",
"vf_coef",
"=",
"vf_coef",
",",
"max_grad_norm",
"=",
"max_grad_norm",
")",
"if",
"load_path",
"is",
"not",
"None",
":",
"model",
".",
"load",
"(",
"load_path",
")",
"# Instantiate the runner object",
"runner",
"=",
"Runner",
"(",
"env",
"=",
"env",
",",
"model",
"=",
"model",
",",
"nsteps",
"=",
"nsteps",
",",
"gamma",
"=",
"gamma",
",",
"lam",
"=",
"lam",
")",
"if",
"eval_env",
"is",
"not",
"None",
":",
"eval_runner",
"=",
"Runner",
"(",
"env",
"=",
"eval_env",
",",
"model",
"=",
"model",
",",
"nsteps",
"=",
"nsteps",
",",
"gamma",
"=",
"gamma",
",",
"lam",
"=",
"lam",
")",
"epinfobuf",
"=",
"deque",
"(",
"maxlen",
"=",
"100",
")",
"if",
"eval_env",
"is",
"not",
"None",
":",
"eval_epinfobuf",
"=",
"deque",
"(",
"maxlen",
"=",
"100",
")",
"# Start total timer",
"tfirststart",
"=",
"time",
".",
"perf_counter",
"(",
")",
"nupdates",
"=",
"total_timesteps",
"//",
"nbatch",
"for",
"update",
"in",
"range",
"(",
"1",
",",
"nupdates",
"+",
"1",
")",
":",
"assert",
"nbatch",
"%",
"nminibatches",
"==",
"0",
"# Start timer",
"tstart",
"=",
"time",
".",
"perf_counter",
"(",
")",
"frac",
"=",
"1.0",
"-",
"(",
"update",
"-",
"1.0",
")",
"/",
"nupdates",
"# Calculate the learning rate",
"lrnow",
"=",
"lr",
"(",
"frac",
")",
"# Calculate the cliprange",
"cliprangenow",
"=",
"cliprange",
"(",
"frac",
")",
"# Get minibatch",
"obs",
",",
"returns",
",",
"masks",
",",
"actions",
",",
"values",
",",
"neglogpacs",
",",
"states",
",",
"epinfos",
"=",
"runner",
".",
"run",
"(",
")",
"#pylint: disable=E0632",
"if",
"eval_env",
"is",
"not",
"None",
":",
"eval_obs",
",",
"eval_returns",
",",
"eval_masks",
",",
"eval_actions",
",",
"eval_values",
",",
"eval_neglogpacs",
",",
"eval_states",
",",
"eval_epinfos",
"=",
"eval_runner",
".",
"run",
"(",
")",
"#pylint: disable=E0632",
"epinfobuf",
".",
"extend",
"(",
"epinfos",
")",
"if",
"eval_env",
"is",
"not",
"None",
":",
"eval_epinfobuf",
".",
"extend",
"(",
"eval_epinfos",
")",
"# Here what we're going to do is for each minibatch calculate the loss and append it.",
"mblossvals",
"=",
"[",
"]",
"if",
"states",
"is",
"None",
":",
"# nonrecurrent version",
"# Index of each element of batch_size",
"# Create the indices array",
"inds",
"=",
"np",
".",
"arange",
"(",
"nbatch",
")",
"for",
"_",
"in",
"range",
"(",
"noptepochs",
")",
":",
"# Randomize the indexes",
"np",
".",
"random",
".",
"shuffle",
"(",
"inds",
")",
"# 0 to batch_size with batch_train_size step",
"for",
"start",
"in",
"range",
"(",
"0",
",",
"nbatch",
",",
"nbatch_train",
")",
":",
"end",
"=",
"start",
"+",
"nbatch_train",
"mbinds",
"=",
"inds",
"[",
"start",
":",
"end",
"]",
"slices",
"=",
"(",
"arr",
"[",
"mbinds",
"]",
"for",
"arr",
"in",
"(",
"obs",
",",
"returns",
",",
"masks",
",",
"actions",
",",
"values",
",",
"neglogpacs",
")",
")",
"mblossvals",
".",
"append",
"(",
"model",
".",
"train",
"(",
"lrnow",
",",
"cliprangenow",
",",
"*",
"slices",
")",
")",
"else",
":",
"# recurrent version",
"assert",
"nenvs",
"%",
"nminibatches",
"==",
"0",
"envsperbatch",
"=",
"nenvs",
"//",
"nminibatches",
"envinds",
"=",
"np",
".",
"arange",
"(",
"nenvs",
")",
"flatinds",
"=",
"np",
".",
"arange",
"(",
"nenvs",
"*",
"nsteps",
")",
".",
"reshape",
"(",
"nenvs",
",",
"nsteps",
")",
"for",
"_",
"in",
"range",
"(",
"noptepochs",
")",
":",
"np",
".",
"random",
".",
"shuffle",
"(",
"envinds",
")",
"for",
"start",
"in",
"range",
"(",
"0",
",",
"nenvs",
",",
"envsperbatch",
")",
":",
"end",
"=",
"start",
"+",
"envsperbatch",
"mbenvinds",
"=",
"envinds",
"[",
"start",
":",
"end",
"]",
"mbflatinds",
"=",
"flatinds",
"[",
"mbenvinds",
"]",
".",
"ravel",
"(",
")",
"slices",
"=",
"(",
"arr",
"[",
"mbflatinds",
"]",
"for",
"arr",
"in",
"(",
"obs",
",",
"returns",
",",
"masks",
",",
"actions",
",",
"values",
",",
"neglogpacs",
")",
")",
"mbstates",
"=",
"states",
"[",
"mbenvinds",
"]",
"mblossvals",
".",
"append",
"(",
"model",
".",
"train",
"(",
"lrnow",
",",
"cliprangenow",
",",
"*",
"slices",
",",
"mbstates",
")",
")",
"# Feedforward --> get losses --> update",
"lossvals",
"=",
"np",
".",
"mean",
"(",
"mblossvals",
",",
"axis",
"=",
"0",
")",
"# End timer",
"tnow",
"=",
"time",
".",
"perf_counter",
"(",
")",
"# Calculate the fps (frame per second)",
"fps",
"=",
"int",
"(",
"nbatch",
"/",
"(",
"tnow",
"-",
"tstart",
")",
")",
"if",
"update",
"%",
"log_interval",
"==",
"0",
"or",
"update",
"==",
"1",
":",
"# Calculates if value function is a good predicator of the returns (ev > 1)",
"# or if it's just worse than predicting nothing (ev =< 0)",
"ev",
"=",
"explained_variance",
"(",
"values",
",",
"returns",
")",
"logger",
".",
"logkv",
"(",
"\"serial_timesteps\"",
",",
"update",
"*",
"nsteps",
")",
"logger",
".",
"logkv",
"(",
"\"nupdates\"",
",",
"update",
")",
"logger",
".",
"logkv",
"(",
"\"total_timesteps\"",
",",
"update",
"*",
"nbatch",
")",
"logger",
".",
"logkv",
"(",
"\"fps\"",
",",
"fps",
")",
"logger",
".",
"logkv",
"(",
"\"explained_variance\"",
",",
"float",
"(",
"ev",
")",
")",
"logger",
".",
"logkv",
"(",
"'eprewmean'",
",",
"safemean",
"(",
"[",
"epinfo",
"[",
"'r'",
"]",
"for",
"epinfo",
"in",
"epinfobuf",
"]",
")",
")",
"logger",
".",
"logkv",
"(",
"'eplenmean'",
",",
"safemean",
"(",
"[",
"epinfo",
"[",
"'l'",
"]",
"for",
"epinfo",
"in",
"epinfobuf",
"]",
")",
")",
"if",
"eval_env",
"is",
"not",
"None",
":",
"logger",
".",
"logkv",
"(",
"'eval_eprewmean'",
",",
"safemean",
"(",
"[",
"epinfo",
"[",
"'r'",
"]",
"for",
"epinfo",
"in",
"eval_epinfobuf",
"]",
")",
")",
"logger",
".",
"logkv",
"(",
"'eval_eplenmean'",
",",
"safemean",
"(",
"[",
"epinfo",
"[",
"'l'",
"]",
"for",
"epinfo",
"in",
"eval_epinfobuf",
"]",
")",
")",
"logger",
".",
"logkv",
"(",
"'time_elapsed'",
",",
"tnow",
"-",
"tfirststart",
")",
"for",
"(",
"lossval",
",",
"lossname",
")",
"in",
"zip",
"(",
"lossvals",
",",
"model",
".",
"loss_names",
")",
":",
"logger",
".",
"logkv",
"(",
"lossname",
",",
"lossval",
")",
"if",
"MPI",
"is",
"None",
"or",
"MPI",
".",
"COMM_WORLD",
".",
"Get_rank",
"(",
")",
"==",
"0",
":",
"logger",
".",
"dumpkvs",
"(",
")",
"if",
"save_interval",
"and",
"(",
"update",
"%",
"save_interval",
"==",
"0",
"or",
"update",
"==",
"1",
")",
"and",
"logger",
".",
"get_dir",
"(",
")",
"and",
"(",
"MPI",
"is",
"None",
"or",
"MPI",
".",
"COMM_WORLD",
".",
"Get_rank",
"(",
")",
"==",
"0",
")",
":",
"checkdir",
"=",
"osp",
".",
"join",
"(",
"logger",
".",
"get_dir",
"(",
")",
",",
"'checkpoints'",
")",
"os",
".",
"makedirs",
"(",
"checkdir",
",",
"exist_ok",
"=",
"True",
")",
"savepath",
"=",
"osp",
".",
"join",
"(",
"checkdir",
",",
"'%.5i'",
"%",
"update",
")",
"print",
"(",
"'Saving to'",
",",
"savepath",
")",
"model",
".",
"save",
"(",
"savepath",
")",
"return",
"model"
] | Learn policy using PPO algorithm (https://arxiv.org/abs/1707.06347)
Parameters:
----------
network: policy network architecture. Either string (mlp, lstm, lnlstm, cnn_lstm, cnn, cnn_small, conv_only - see baselines.common/models.py for full list)
specifying the standard network architecture, or a function that takes tensorflow tensor as input and returns
tuple (output_tensor, extra_feed) where output tensor is the last network layer output, extra_feed is None for feed-forward
neural nets, and extra_feed is a dictionary describing how to feed state into the network for recurrent neural nets.
See common/models.py/lstm for more details on using recurrent nets in policies
env: baselines.common.vec_env.VecEnv environment. Needs to be vectorized for parallel environment simulation.
The environments produced by gym.make can be wrapped using baselines.common.vec_env.DummyVecEnv class.
nsteps: int number of steps of the vectorized environment per update (i.e. batch size is nsteps * nenv where
nenv is number of environment copies simulated in parallel)
total_timesteps: int number of timesteps (i.e. number of actions taken in the environment)
ent_coef: float policy entropy coefficient in the optimization objective
lr: float or function learning rate, constant or a schedule function [0,1] -> R+ where 1 is beginning of the
training and 0 is the end of the training.
vf_coef: float value function loss coefficient in the optimization objective
max_grad_norm: float or None gradient norm clipping coefficient
gamma: float discounting factor
lam: float advantage estimation discounting factor (lambda in the paper)
log_interval: int number of timesteps between logging events
nminibatches: int number of training minibatches per update. For recurrent policies,
should be smaller or equal than number of environments run in parallel.
noptepochs: int number of training epochs per update
cliprange: float or function clipping range, constant or schedule function [0,1] -> R+ where 1 is beginning of the training
and 0 is the end of the training
save_interval: int number of timesteps between saving events
load_path: str path to load the model from
**network_kwargs: keyword arguments to the policy / network builder. See baselines.common/policies.py/build_policy and arguments to a particular type of network
For instance, 'mlp' network architecture has arguments num_hidden and num_layers. | [
"Learn",
"policy",
"using",
"PPO",
"algorithm",
"(",
"https",
":",
"//",
"arxiv",
".",
"org",
"/",
"abs",
"/",
"1707",
".",
"06347",
")"
] | 3301089b48c42b87b396e246ea3f56fa4bfc9678 | https://github.com/openai/baselines/blob/3301089b48c42b87b396e246ea3f56fa4bfc9678/baselines/ppo2/ppo2.py#L21-L204 | valid |
openai/baselines | baselines/common/cg.py | cg | def cg(f_Ax, b, cg_iters=10, callback=None, verbose=False, residual_tol=1e-10):
"""
Demmel p 312
"""
p = b.copy()
r = b.copy()
x = np.zeros_like(b)
rdotr = r.dot(r)
fmtstr = "%10i %10.3g %10.3g"
titlestr = "%10s %10s %10s"
if verbose: print(titlestr % ("iter", "residual norm", "soln norm"))
for i in range(cg_iters):
if callback is not None:
callback(x)
if verbose: print(fmtstr % (i, rdotr, np.linalg.norm(x)))
z = f_Ax(p)
v = rdotr / p.dot(z)
x += v*p
r -= v*z
newrdotr = r.dot(r)
mu = newrdotr/rdotr
p = r + mu*p
rdotr = newrdotr
if rdotr < residual_tol:
break
if callback is not None:
callback(x)
if verbose: print(fmtstr % (i+1, rdotr, np.linalg.norm(x))) # pylint: disable=W0631
return x | python | def cg(f_Ax, b, cg_iters=10, callback=None, verbose=False, residual_tol=1e-10):
"""
Demmel p 312
"""
p = b.copy()
r = b.copy()
x = np.zeros_like(b)
rdotr = r.dot(r)
fmtstr = "%10i %10.3g %10.3g"
titlestr = "%10s %10s %10s"
if verbose: print(titlestr % ("iter", "residual norm", "soln norm"))
for i in range(cg_iters):
if callback is not None:
callback(x)
if verbose: print(fmtstr % (i, rdotr, np.linalg.norm(x)))
z = f_Ax(p)
v = rdotr / p.dot(z)
x += v*p
r -= v*z
newrdotr = r.dot(r)
mu = newrdotr/rdotr
p = r + mu*p
rdotr = newrdotr
if rdotr < residual_tol:
break
if callback is not None:
callback(x)
if verbose: print(fmtstr % (i+1, rdotr, np.linalg.norm(x))) # pylint: disable=W0631
return x | [
"def",
"cg",
"(",
"f_Ax",
",",
"b",
",",
"cg_iters",
"=",
"10",
",",
"callback",
"=",
"None",
",",
"verbose",
"=",
"False",
",",
"residual_tol",
"=",
"1e-10",
")",
":",
"p",
"=",
"b",
".",
"copy",
"(",
")",
"r",
"=",
"b",
".",
"copy",
"(",
")",
"x",
"=",
"np",
".",
"zeros_like",
"(",
"b",
")",
"rdotr",
"=",
"r",
".",
"dot",
"(",
"r",
")",
"fmtstr",
"=",
"\"%10i %10.3g %10.3g\"",
"titlestr",
"=",
"\"%10s %10s %10s\"",
"if",
"verbose",
":",
"print",
"(",
"titlestr",
"%",
"(",
"\"iter\"",
",",
"\"residual norm\"",
",",
"\"soln norm\"",
")",
")",
"for",
"i",
"in",
"range",
"(",
"cg_iters",
")",
":",
"if",
"callback",
"is",
"not",
"None",
":",
"callback",
"(",
"x",
")",
"if",
"verbose",
":",
"print",
"(",
"fmtstr",
"%",
"(",
"i",
",",
"rdotr",
",",
"np",
".",
"linalg",
".",
"norm",
"(",
"x",
")",
")",
")",
"z",
"=",
"f_Ax",
"(",
"p",
")",
"v",
"=",
"rdotr",
"/",
"p",
".",
"dot",
"(",
"z",
")",
"x",
"+=",
"v",
"*",
"p",
"r",
"-=",
"v",
"*",
"z",
"newrdotr",
"=",
"r",
".",
"dot",
"(",
"r",
")",
"mu",
"=",
"newrdotr",
"/",
"rdotr",
"p",
"=",
"r",
"+",
"mu",
"*",
"p",
"rdotr",
"=",
"newrdotr",
"if",
"rdotr",
"<",
"residual_tol",
":",
"break",
"if",
"callback",
"is",
"not",
"None",
":",
"callback",
"(",
"x",
")",
"if",
"verbose",
":",
"print",
"(",
"fmtstr",
"%",
"(",
"i",
"+",
"1",
",",
"rdotr",
",",
"np",
".",
"linalg",
".",
"norm",
"(",
"x",
")",
")",
")",
"# pylint: disable=W0631",
"return",
"x"
] | Demmel p 312 | [
"Demmel",
"p",
"312"
] | 3301089b48c42b87b396e246ea3f56fa4bfc9678 | https://github.com/openai/baselines/blob/3301089b48c42b87b396e246ea3f56fa4bfc9678/baselines/common/cg.py#L2-L34 | valid |
openai/baselines | baselines/common/input.py | observation_placeholder | def observation_placeholder(ob_space, batch_size=None, name='Ob'):
'''
Create placeholder to feed observations into of the size appropriate to the observation space
Parameters:
----------
ob_space: gym.Space observation space
batch_size: int size of the batch to be fed into input. Can be left None in most cases.
name: str name of the placeholder
Returns:
-------
tensorflow placeholder tensor
'''
assert isinstance(ob_space, Discrete) or isinstance(ob_space, Box) or isinstance(ob_space, MultiDiscrete), \
'Can only deal with Discrete and Box observation spaces for now'
dtype = ob_space.dtype
if dtype == np.int8:
dtype = np.uint8
return tf.placeholder(shape=(batch_size,) + ob_space.shape, dtype=dtype, name=name) | python | def observation_placeholder(ob_space, batch_size=None, name='Ob'):
'''
Create placeholder to feed observations into of the size appropriate to the observation space
Parameters:
----------
ob_space: gym.Space observation space
batch_size: int size of the batch to be fed into input. Can be left None in most cases.
name: str name of the placeholder
Returns:
-------
tensorflow placeholder tensor
'''
assert isinstance(ob_space, Discrete) or isinstance(ob_space, Box) or isinstance(ob_space, MultiDiscrete), \
'Can only deal with Discrete and Box observation spaces for now'
dtype = ob_space.dtype
if dtype == np.int8:
dtype = np.uint8
return tf.placeholder(shape=(batch_size,) + ob_space.shape, dtype=dtype, name=name) | [
"def",
"observation_placeholder",
"(",
"ob_space",
",",
"batch_size",
"=",
"None",
",",
"name",
"=",
"'Ob'",
")",
":",
"assert",
"isinstance",
"(",
"ob_space",
",",
"Discrete",
")",
"or",
"isinstance",
"(",
"ob_space",
",",
"Box",
")",
"or",
"isinstance",
"(",
"ob_space",
",",
"MultiDiscrete",
")",
",",
"'Can only deal with Discrete and Box observation spaces for now'",
"dtype",
"=",
"ob_space",
".",
"dtype",
"if",
"dtype",
"==",
"np",
".",
"int8",
":",
"dtype",
"=",
"np",
".",
"uint8",
"return",
"tf",
".",
"placeholder",
"(",
"shape",
"=",
"(",
"batch_size",
",",
")",
"+",
"ob_space",
".",
"shape",
",",
"dtype",
"=",
"dtype",
",",
"name",
"=",
"name",
")"
] | Create placeholder to feed observations into of the size appropriate to the observation space
Parameters:
----------
ob_space: gym.Space observation space
batch_size: int size of the batch to be fed into input. Can be left None in most cases.
name: str name of the placeholder
Returns:
-------
tensorflow placeholder tensor | [
"Create",
"placeholder",
"to",
"feed",
"observations",
"into",
"of",
"the",
"size",
"appropriate",
"to",
"the",
"observation",
"space"
] | 3301089b48c42b87b396e246ea3f56fa4bfc9678 | https://github.com/openai/baselines/blob/3301089b48c42b87b396e246ea3f56fa4bfc9678/baselines/common/input.py#L5-L31 | valid |
openai/baselines | baselines/common/input.py | observation_input | def observation_input(ob_space, batch_size=None, name='Ob'):
'''
Create placeholder to feed observations into of the size appropriate to the observation space, and add input
encoder of the appropriate type.
'''
placeholder = observation_placeholder(ob_space, batch_size, name)
return placeholder, encode_observation(ob_space, placeholder) | python | def observation_input(ob_space, batch_size=None, name='Ob'):
'''
Create placeholder to feed observations into of the size appropriate to the observation space, and add input
encoder of the appropriate type.
'''
placeholder = observation_placeholder(ob_space, batch_size, name)
return placeholder, encode_observation(ob_space, placeholder) | [
"def",
"observation_input",
"(",
"ob_space",
",",
"batch_size",
"=",
"None",
",",
"name",
"=",
"'Ob'",
")",
":",
"placeholder",
"=",
"observation_placeholder",
"(",
"ob_space",
",",
"batch_size",
",",
"name",
")",
"return",
"placeholder",
",",
"encode_observation",
"(",
"ob_space",
",",
"placeholder",
")"
] | Create placeholder to feed observations into of the size appropriate to the observation space, and add input
encoder of the appropriate type. | [
"Create",
"placeholder",
"to",
"feed",
"observations",
"into",
"of",
"the",
"size",
"appropriate",
"to",
"the",
"observation",
"space",
"and",
"add",
"input",
"encoder",
"of",
"the",
"appropriate",
"type",
"."
] | 3301089b48c42b87b396e246ea3f56fa4bfc9678 | https://github.com/openai/baselines/blob/3301089b48c42b87b396e246ea3f56fa4bfc9678/baselines/common/input.py#L34-L41 | valid |
openai/baselines | baselines/common/input.py | encode_observation | def encode_observation(ob_space, placeholder):
'''
Encode input in the way that is appropriate to the observation space
Parameters:
----------
ob_space: gym.Space observation space
placeholder: tf.placeholder observation input placeholder
'''
if isinstance(ob_space, Discrete):
return tf.to_float(tf.one_hot(placeholder, ob_space.n))
elif isinstance(ob_space, Box):
return tf.to_float(placeholder)
elif isinstance(ob_space, MultiDiscrete):
placeholder = tf.cast(placeholder, tf.int32)
one_hots = [tf.to_float(tf.one_hot(placeholder[..., i], ob_space.nvec[i])) for i in range(placeholder.shape[-1])]
return tf.concat(one_hots, axis=-1)
else:
raise NotImplementedError | python | def encode_observation(ob_space, placeholder):
'''
Encode input in the way that is appropriate to the observation space
Parameters:
----------
ob_space: gym.Space observation space
placeholder: tf.placeholder observation input placeholder
'''
if isinstance(ob_space, Discrete):
return tf.to_float(tf.one_hot(placeholder, ob_space.n))
elif isinstance(ob_space, Box):
return tf.to_float(placeholder)
elif isinstance(ob_space, MultiDiscrete):
placeholder = tf.cast(placeholder, tf.int32)
one_hots = [tf.to_float(tf.one_hot(placeholder[..., i], ob_space.nvec[i])) for i in range(placeholder.shape[-1])]
return tf.concat(one_hots, axis=-1)
else:
raise NotImplementedError | [
"def",
"encode_observation",
"(",
"ob_space",
",",
"placeholder",
")",
":",
"if",
"isinstance",
"(",
"ob_space",
",",
"Discrete",
")",
":",
"return",
"tf",
".",
"to_float",
"(",
"tf",
".",
"one_hot",
"(",
"placeholder",
",",
"ob_space",
".",
"n",
")",
")",
"elif",
"isinstance",
"(",
"ob_space",
",",
"Box",
")",
":",
"return",
"tf",
".",
"to_float",
"(",
"placeholder",
")",
"elif",
"isinstance",
"(",
"ob_space",
",",
"MultiDiscrete",
")",
":",
"placeholder",
"=",
"tf",
".",
"cast",
"(",
"placeholder",
",",
"tf",
".",
"int32",
")",
"one_hots",
"=",
"[",
"tf",
".",
"to_float",
"(",
"tf",
".",
"one_hot",
"(",
"placeholder",
"[",
"...",
",",
"i",
"]",
",",
"ob_space",
".",
"nvec",
"[",
"i",
"]",
")",
")",
"for",
"i",
"in",
"range",
"(",
"placeholder",
".",
"shape",
"[",
"-",
"1",
"]",
")",
"]",
"return",
"tf",
".",
"concat",
"(",
"one_hots",
",",
"axis",
"=",
"-",
"1",
")",
"else",
":",
"raise",
"NotImplementedError"
] | Encode input in the way that is appropriate to the observation space
Parameters:
----------
ob_space: gym.Space observation space
placeholder: tf.placeholder observation input placeholder | [
"Encode",
"input",
"in",
"the",
"way",
"that",
"is",
"appropriate",
"to",
"the",
"observation",
"space"
] | 3301089b48c42b87b396e246ea3f56fa4bfc9678 | https://github.com/openai/baselines/blob/3301089b48c42b87b396e246ea3f56fa4bfc9678/baselines/common/input.py#L43-L63 | valid |
openai/baselines | baselines/her/rollout.py | RolloutWorker.generate_rollouts | def generate_rollouts(self):
"""Performs `rollout_batch_size` rollouts in parallel for time horizon `T` with the current
policy acting on it accordingly.
"""
self.reset_all_rollouts()
# compute observations
o = np.empty((self.rollout_batch_size, self.dims['o']), np.float32) # observations
ag = np.empty((self.rollout_batch_size, self.dims['g']), np.float32) # achieved goals
o[:] = self.initial_o
ag[:] = self.initial_ag
# generate episodes
obs, achieved_goals, acts, goals, successes = [], [], [], [], []
dones = []
info_values = [np.empty((self.T - 1, self.rollout_batch_size, self.dims['info_' + key]), np.float32) for key in self.info_keys]
Qs = []
for t in range(self.T):
policy_output = self.policy.get_actions(
o, ag, self.g,
compute_Q=self.compute_Q,
noise_eps=self.noise_eps if not self.exploit else 0.,
random_eps=self.random_eps if not self.exploit else 0.,
use_target_net=self.use_target_net)
if self.compute_Q:
u, Q = policy_output
Qs.append(Q)
else:
u = policy_output
if u.ndim == 1:
# The non-batched case should still have a reasonable shape.
u = u.reshape(1, -1)
o_new = np.empty((self.rollout_batch_size, self.dims['o']))
ag_new = np.empty((self.rollout_batch_size, self.dims['g']))
success = np.zeros(self.rollout_batch_size)
# compute new states and observations
obs_dict_new, _, done, info = self.venv.step(u)
o_new = obs_dict_new['observation']
ag_new = obs_dict_new['achieved_goal']
success = np.array([i.get('is_success', 0.0) for i in info])
if any(done):
# here we assume all environments are done is ~same number of steps, so we terminate rollouts whenever any of the envs returns done
# trick with using vecenvs is not to add the obs from the environments that are "done", because those are already observations
# after a reset
break
for i, info_dict in enumerate(info):
for idx, key in enumerate(self.info_keys):
info_values[idx][t, i] = info[i][key]
if np.isnan(o_new).any():
self.logger.warn('NaN caught during rollout generation. Trying again...')
self.reset_all_rollouts()
return self.generate_rollouts()
dones.append(done)
obs.append(o.copy())
achieved_goals.append(ag.copy())
successes.append(success.copy())
acts.append(u.copy())
goals.append(self.g.copy())
o[...] = o_new
ag[...] = ag_new
obs.append(o.copy())
achieved_goals.append(ag.copy())
episode = dict(o=obs,
u=acts,
g=goals,
ag=achieved_goals)
for key, value in zip(self.info_keys, info_values):
episode['info_{}'.format(key)] = value
# stats
successful = np.array(successes)[-1, :]
assert successful.shape == (self.rollout_batch_size,)
success_rate = np.mean(successful)
self.success_history.append(success_rate)
if self.compute_Q:
self.Q_history.append(np.mean(Qs))
self.n_episodes += self.rollout_batch_size
return convert_episode_to_batch_major(episode) | python | def generate_rollouts(self):
"""Performs `rollout_batch_size` rollouts in parallel for time horizon `T` with the current
policy acting on it accordingly.
"""
self.reset_all_rollouts()
# compute observations
o = np.empty((self.rollout_batch_size, self.dims['o']), np.float32) # observations
ag = np.empty((self.rollout_batch_size, self.dims['g']), np.float32) # achieved goals
o[:] = self.initial_o
ag[:] = self.initial_ag
# generate episodes
obs, achieved_goals, acts, goals, successes = [], [], [], [], []
dones = []
info_values = [np.empty((self.T - 1, self.rollout_batch_size, self.dims['info_' + key]), np.float32) for key in self.info_keys]
Qs = []
for t in range(self.T):
policy_output = self.policy.get_actions(
o, ag, self.g,
compute_Q=self.compute_Q,
noise_eps=self.noise_eps if not self.exploit else 0.,
random_eps=self.random_eps if not self.exploit else 0.,
use_target_net=self.use_target_net)
if self.compute_Q:
u, Q = policy_output
Qs.append(Q)
else:
u = policy_output
if u.ndim == 1:
# The non-batched case should still have a reasonable shape.
u = u.reshape(1, -1)
o_new = np.empty((self.rollout_batch_size, self.dims['o']))
ag_new = np.empty((self.rollout_batch_size, self.dims['g']))
success = np.zeros(self.rollout_batch_size)
# compute new states and observations
obs_dict_new, _, done, info = self.venv.step(u)
o_new = obs_dict_new['observation']
ag_new = obs_dict_new['achieved_goal']
success = np.array([i.get('is_success', 0.0) for i in info])
if any(done):
# here we assume all environments are done is ~same number of steps, so we terminate rollouts whenever any of the envs returns done
# trick with using vecenvs is not to add the obs from the environments that are "done", because those are already observations
# after a reset
break
for i, info_dict in enumerate(info):
for idx, key in enumerate(self.info_keys):
info_values[idx][t, i] = info[i][key]
if np.isnan(o_new).any():
self.logger.warn('NaN caught during rollout generation. Trying again...')
self.reset_all_rollouts()
return self.generate_rollouts()
dones.append(done)
obs.append(o.copy())
achieved_goals.append(ag.copy())
successes.append(success.copy())
acts.append(u.copy())
goals.append(self.g.copy())
o[...] = o_new
ag[...] = ag_new
obs.append(o.copy())
achieved_goals.append(ag.copy())
episode = dict(o=obs,
u=acts,
g=goals,
ag=achieved_goals)
for key, value in zip(self.info_keys, info_values):
episode['info_{}'.format(key)] = value
# stats
successful = np.array(successes)[-1, :]
assert successful.shape == (self.rollout_batch_size,)
success_rate = np.mean(successful)
self.success_history.append(success_rate)
if self.compute_Q:
self.Q_history.append(np.mean(Qs))
self.n_episodes += self.rollout_batch_size
return convert_episode_to_batch_major(episode) | [
"def",
"generate_rollouts",
"(",
"self",
")",
":",
"self",
".",
"reset_all_rollouts",
"(",
")",
"# compute observations",
"o",
"=",
"np",
".",
"empty",
"(",
"(",
"self",
".",
"rollout_batch_size",
",",
"self",
".",
"dims",
"[",
"'o'",
"]",
")",
",",
"np",
".",
"float32",
")",
"# observations",
"ag",
"=",
"np",
".",
"empty",
"(",
"(",
"self",
".",
"rollout_batch_size",
",",
"self",
".",
"dims",
"[",
"'g'",
"]",
")",
",",
"np",
".",
"float32",
")",
"# achieved goals",
"o",
"[",
":",
"]",
"=",
"self",
".",
"initial_o",
"ag",
"[",
":",
"]",
"=",
"self",
".",
"initial_ag",
"# generate episodes",
"obs",
",",
"achieved_goals",
",",
"acts",
",",
"goals",
",",
"successes",
"=",
"[",
"]",
",",
"[",
"]",
",",
"[",
"]",
",",
"[",
"]",
",",
"[",
"]",
"dones",
"=",
"[",
"]",
"info_values",
"=",
"[",
"np",
".",
"empty",
"(",
"(",
"self",
".",
"T",
"-",
"1",
",",
"self",
".",
"rollout_batch_size",
",",
"self",
".",
"dims",
"[",
"'info_'",
"+",
"key",
"]",
")",
",",
"np",
".",
"float32",
")",
"for",
"key",
"in",
"self",
".",
"info_keys",
"]",
"Qs",
"=",
"[",
"]",
"for",
"t",
"in",
"range",
"(",
"self",
".",
"T",
")",
":",
"policy_output",
"=",
"self",
".",
"policy",
".",
"get_actions",
"(",
"o",
",",
"ag",
",",
"self",
".",
"g",
",",
"compute_Q",
"=",
"self",
".",
"compute_Q",
",",
"noise_eps",
"=",
"self",
".",
"noise_eps",
"if",
"not",
"self",
".",
"exploit",
"else",
"0.",
",",
"random_eps",
"=",
"self",
".",
"random_eps",
"if",
"not",
"self",
".",
"exploit",
"else",
"0.",
",",
"use_target_net",
"=",
"self",
".",
"use_target_net",
")",
"if",
"self",
".",
"compute_Q",
":",
"u",
",",
"Q",
"=",
"policy_output",
"Qs",
".",
"append",
"(",
"Q",
")",
"else",
":",
"u",
"=",
"policy_output",
"if",
"u",
".",
"ndim",
"==",
"1",
":",
"# The non-batched case should still have a reasonable shape.",
"u",
"=",
"u",
".",
"reshape",
"(",
"1",
",",
"-",
"1",
")",
"o_new",
"=",
"np",
".",
"empty",
"(",
"(",
"self",
".",
"rollout_batch_size",
",",
"self",
".",
"dims",
"[",
"'o'",
"]",
")",
")",
"ag_new",
"=",
"np",
".",
"empty",
"(",
"(",
"self",
".",
"rollout_batch_size",
",",
"self",
".",
"dims",
"[",
"'g'",
"]",
")",
")",
"success",
"=",
"np",
".",
"zeros",
"(",
"self",
".",
"rollout_batch_size",
")",
"# compute new states and observations",
"obs_dict_new",
",",
"_",
",",
"done",
",",
"info",
"=",
"self",
".",
"venv",
".",
"step",
"(",
"u",
")",
"o_new",
"=",
"obs_dict_new",
"[",
"'observation'",
"]",
"ag_new",
"=",
"obs_dict_new",
"[",
"'achieved_goal'",
"]",
"success",
"=",
"np",
".",
"array",
"(",
"[",
"i",
".",
"get",
"(",
"'is_success'",
",",
"0.0",
")",
"for",
"i",
"in",
"info",
"]",
")",
"if",
"any",
"(",
"done",
")",
":",
"# here we assume all environments are done is ~same number of steps, so we terminate rollouts whenever any of the envs returns done",
"# trick with using vecenvs is not to add the obs from the environments that are \"done\", because those are already observations",
"# after a reset",
"break",
"for",
"i",
",",
"info_dict",
"in",
"enumerate",
"(",
"info",
")",
":",
"for",
"idx",
",",
"key",
"in",
"enumerate",
"(",
"self",
".",
"info_keys",
")",
":",
"info_values",
"[",
"idx",
"]",
"[",
"t",
",",
"i",
"]",
"=",
"info",
"[",
"i",
"]",
"[",
"key",
"]",
"if",
"np",
".",
"isnan",
"(",
"o_new",
")",
".",
"any",
"(",
")",
":",
"self",
".",
"logger",
".",
"warn",
"(",
"'NaN caught during rollout generation. Trying again...'",
")",
"self",
".",
"reset_all_rollouts",
"(",
")",
"return",
"self",
".",
"generate_rollouts",
"(",
")",
"dones",
".",
"append",
"(",
"done",
")",
"obs",
".",
"append",
"(",
"o",
".",
"copy",
"(",
")",
")",
"achieved_goals",
".",
"append",
"(",
"ag",
".",
"copy",
"(",
")",
")",
"successes",
".",
"append",
"(",
"success",
".",
"copy",
"(",
")",
")",
"acts",
".",
"append",
"(",
"u",
".",
"copy",
"(",
")",
")",
"goals",
".",
"append",
"(",
"self",
".",
"g",
".",
"copy",
"(",
")",
")",
"o",
"[",
"...",
"]",
"=",
"o_new",
"ag",
"[",
"...",
"]",
"=",
"ag_new",
"obs",
".",
"append",
"(",
"o",
".",
"copy",
"(",
")",
")",
"achieved_goals",
".",
"append",
"(",
"ag",
".",
"copy",
"(",
")",
")",
"episode",
"=",
"dict",
"(",
"o",
"=",
"obs",
",",
"u",
"=",
"acts",
",",
"g",
"=",
"goals",
",",
"ag",
"=",
"achieved_goals",
")",
"for",
"key",
",",
"value",
"in",
"zip",
"(",
"self",
".",
"info_keys",
",",
"info_values",
")",
":",
"episode",
"[",
"'info_{}'",
".",
"format",
"(",
"key",
")",
"]",
"=",
"value",
"# stats",
"successful",
"=",
"np",
".",
"array",
"(",
"successes",
")",
"[",
"-",
"1",
",",
":",
"]",
"assert",
"successful",
".",
"shape",
"==",
"(",
"self",
".",
"rollout_batch_size",
",",
")",
"success_rate",
"=",
"np",
".",
"mean",
"(",
"successful",
")",
"self",
".",
"success_history",
".",
"append",
"(",
"success_rate",
")",
"if",
"self",
".",
"compute_Q",
":",
"self",
".",
"Q_history",
".",
"append",
"(",
"np",
".",
"mean",
"(",
"Qs",
")",
")",
"self",
".",
"n_episodes",
"+=",
"self",
".",
"rollout_batch_size",
"return",
"convert_episode_to_batch_major",
"(",
"episode",
")"
] | Performs `rollout_batch_size` rollouts in parallel for time horizon `T` with the current
policy acting on it accordingly. | [
"Performs",
"rollout_batch_size",
"rollouts",
"in",
"parallel",
"for",
"time",
"horizon",
"T",
"with",
"the",
"current",
"policy",
"acting",
"on",
"it",
"accordingly",
"."
] | 3301089b48c42b87b396e246ea3f56fa4bfc9678 | https://github.com/openai/baselines/blob/3301089b48c42b87b396e246ea3f56fa4bfc9678/baselines/her/rollout.py#L51-L137 | valid |
openai/baselines | baselines/her/rollout.py | RolloutWorker.save_policy | def save_policy(self, path):
"""Pickles the current policy for later inspection.
"""
with open(path, 'wb') as f:
pickle.dump(self.policy, f) | python | def save_policy(self, path):
"""Pickles the current policy for later inspection.
"""
with open(path, 'wb') as f:
pickle.dump(self.policy, f) | [
"def",
"save_policy",
"(",
"self",
",",
"path",
")",
":",
"with",
"open",
"(",
"path",
",",
"'wb'",
")",
"as",
"f",
":",
"pickle",
".",
"dump",
"(",
"self",
".",
"policy",
",",
"f",
")"
] | Pickles the current policy for later inspection. | [
"Pickles",
"the",
"current",
"policy",
"for",
"later",
"inspection",
"."
] | 3301089b48c42b87b396e246ea3f56fa4bfc9678 | https://github.com/openai/baselines/blob/3301089b48c42b87b396e246ea3f56fa4bfc9678/baselines/her/rollout.py#L151-L155 | valid |
openai/baselines | baselines/her/rollout.py | RolloutWorker.logs | def logs(self, prefix='worker'):
"""Generates a dictionary that contains all collected statistics.
"""
logs = []
logs += [('success_rate', np.mean(self.success_history))]
if self.compute_Q:
logs += [('mean_Q', np.mean(self.Q_history))]
logs += [('episode', self.n_episodes)]
if prefix != '' and not prefix.endswith('/'):
return [(prefix + '/' + key, val) for key, val in logs]
else:
return logs | python | def logs(self, prefix='worker'):
"""Generates a dictionary that contains all collected statistics.
"""
logs = []
logs += [('success_rate', np.mean(self.success_history))]
if self.compute_Q:
logs += [('mean_Q', np.mean(self.Q_history))]
logs += [('episode', self.n_episodes)]
if prefix != '' and not prefix.endswith('/'):
return [(prefix + '/' + key, val) for key, val in logs]
else:
return logs | [
"def",
"logs",
"(",
"self",
",",
"prefix",
"=",
"'worker'",
")",
":",
"logs",
"=",
"[",
"]",
"logs",
"+=",
"[",
"(",
"'success_rate'",
",",
"np",
".",
"mean",
"(",
"self",
".",
"success_history",
")",
")",
"]",
"if",
"self",
".",
"compute_Q",
":",
"logs",
"+=",
"[",
"(",
"'mean_Q'",
",",
"np",
".",
"mean",
"(",
"self",
".",
"Q_history",
")",
")",
"]",
"logs",
"+=",
"[",
"(",
"'episode'",
",",
"self",
".",
"n_episodes",
")",
"]",
"if",
"prefix",
"!=",
"''",
"and",
"not",
"prefix",
".",
"endswith",
"(",
"'/'",
")",
":",
"return",
"[",
"(",
"prefix",
"+",
"'/'",
"+",
"key",
",",
"val",
")",
"for",
"key",
",",
"val",
"in",
"logs",
"]",
"else",
":",
"return",
"logs"
] | Generates a dictionary that contains all collected statistics. | [
"Generates",
"a",
"dictionary",
"that",
"contains",
"all",
"collected",
"statistics",
"."
] | 3301089b48c42b87b396e246ea3f56fa4bfc9678 | https://github.com/openai/baselines/blob/3301089b48c42b87b396e246ea3f56fa4bfc9678/baselines/her/rollout.py#L157-L169 | valid |
openai/baselines | baselines/common/plot_util.py | smooth | def smooth(y, radius, mode='two_sided', valid_only=False):
'''
Smooth signal y, where radius is determines the size of the window
mode='twosided':
average over the window [max(index - radius, 0), min(index + radius, len(y)-1)]
mode='causal':
average over the window [max(index - radius, 0), index]
valid_only: put nan in entries where the full-sized window is not available
'''
assert mode in ('two_sided', 'causal')
if len(y) < 2*radius+1:
return np.ones_like(y) * y.mean()
elif mode == 'two_sided':
convkernel = np.ones(2 * radius+1)
out = np.convolve(y, convkernel,mode='same') / np.convolve(np.ones_like(y), convkernel, mode='same')
if valid_only:
out[:radius] = out[-radius:] = np.nan
elif mode == 'causal':
convkernel = np.ones(radius)
out = np.convolve(y, convkernel,mode='full') / np.convolve(np.ones_like(y), convkernel, mode='full')
out = out[:-radius+1]
if valid_only:
out[:radius] = np.nan
return out | python | def smooth(y, radius, mode='two_sided', valid_only=False):
'''
Smooth signal y, where radius is determines the size of the window
mode='twosided':
average over the window [max(index - radius, 0), min(index + radius, len(y)-1)]
mode='causal':
average over the window [max(index - radius, 0), index]
valid_only: put nan in entries where the full-sized window is not available
'''
assert mode in ('two_sided', 'causal')
if len(y) < 2*radius+1:
return np.ones_like(y) * y.mean()
elif mode == 'two_sided':
convkernel = np.ones(2 * radius+1)
out = np.convolve(y, convkernel,mode='same') / np.convolve(np.ones_like(y), convkernel, mode='same')
if valid_only:
out[:radius] = out[-radius:] = np.nan
elif mode == 'causal':
convkernel = np.ones(radius)
out = np.convolve(y, convkernel,mode='full') / np.convolve(np.ones_like(y), convkernel, mode='full')
out = out[:-radius+1]
if valid_only:
out[:radius] = np.nan
return out | [
"def",
"smooth",
"(",
"y",
",",
"radius",
",",
"mode",
"=",
"'two_sided'",
",",
"valid_only",
"=",
"False",
")",
":",
"assert",
"mode",
"in",
"(",
"'two_sided'",
",",
"'causal'",
")",
"if",
"len",
"(",
"y",
")",
"<",
"2",
"*",
"radius",
"+",
"1",
":",
"return",
"np",
".",
"ones_like",
"(",
"y",
")",
"*",
"y",
".",
"mean",
"(",
")",
"elif",
"mode",
"==",
"'two_sided'",
":",
"convkernel",
"=",
"np",
".",
"ones",
"(",
"2",
"*",
"radius",
"+",
"1",
")",
"out",
"=",
"np",
".",
"convolve",
"(",
"y",
",",
"convkernel",
",",
"mode",
"=",
"'same'",
")",
"/",
"np",
".",
"convolve",
"(",
"np",
".",
"ones_like",
"(",
"y",
")",
",",
"convkernel",
",",
"mode",
"=",
"'same'",
")",
"if",
"valid_only",
":",
"out",
"[",
":",
"radius",
"]",
"=",
"out",
"[",
"-",
"radius",
":",
"]",
"=",
"np",
".",
"nan",
"elif",
"mode",
"==",
"'causal'",
":",
"convkernel",
"=",
"np",
".",
"ones",
"(",
"radius",
")",
"out",
"=",
"np",
".",
"convolve",
"(",
"y",
",",
"convkernel",
",",
"mode",
"=",
"'full'",
")",
"/",
"np",
".",
"convolve",
"(",
"np",
".",
"ones_like",
"(",
"y",
")",
",",
"convkernel",
",",
"mode",
"=",
"'full'",
")",
"out",
"=",
"out",
"[",
":",
"-",
"radius",
"+",
"1",
"]",
"if",
"valid_only",
":",
"out",
"[",
":",
"radius",
"]",
"=",
"np",
".",
"nan",
"return",
"out"
] | Smooth signal y, where radius is determines the size of the window
mode='twosided':
average over the window [max(index - radius, 0), min(index + radius, len(y)-1)]
mode='causal':
average over the window [max(index - radius, 0), index]
valid_only: put nan in entries where the full-sized window is not available | [
"Smooth",
"signal",
"y",
"where",
"radius",
"is",
"determines",
"the",
"size",
"of",
"the",
"window"
] | 3301089b48c42b87b396e246ea3f56fa4bfc9678 | https://github.com/openai/baselines/blob/3301089b48c42b87b396e246ea3f56fa4bfc9678/baselines/common/plot_util.py#L11-L37 | valid |
openai/baselines | baselines/common/plot_util.py | one_sided_ema | def one_sided_ema(xolds, yolds, low=None, high=None, n=512, decay_steps=1., low_counts_threshold=1e-8):
'''
perform one-sided (causal) EMA (exponential moving average)
smoothing and resampling to an even grid with n points.
Does not do extrapolation, so we assume
xolds[0] <= low && high <= xolds[-1]
Arguments:
xolds: array or list - x values of data. Needs to be sorted in ascending order
yolds: array of list - y values of data. Has to have the same length as xolds
low: float - min value of the new x grid. By default equals to xolds[0]
high: float - max value of the new x grid. By default equals to xolds[-1]
n: int - number of points in new x grid
decay_steps: float - EMA decay factor, expressed in new x grid steps.
low_counts_threshold: float or int
- y values with counts less than this value will be set to NaN
Returns:
tuple sum_ys, count_ys where
xs - array with new x grid
ys - array of EMA of y at each point of the new x grid
count_ys - array of EMA of y counts at each point of the new x grid
'''
low = xolds[0] if low is None else low
high = xolds[-1] if high is None else high
assert xolds[0] <= low, 'low = {} < xolds[0] = {} - extrapolation not permitted!'.format(low, xolds[0])
assert xolds[-1] >= high, 'high = {} > xolds[-1] = {} - extrapolation not permitted!'.format(high, xolds[-1])
assert len(xolds) == len(yolds), 'length of xolds ({}) and yolds ({}) do not match!'.format(len(xolds), len(yolds))
xolds = xolds.astype('float64')
yolds = yolds.astype('float64')
luoi = 0 # last unused old index
sum_y = 0.
count_y = 0.
xnews = np.linspace(low, high, n)
decay_period = (high - low) / (n - 1) * decay_steps
interstep_decay = np.exp(- 1. / decay_steps)
sum_ys = np.zeros_like(xnews)
count_ys = np.zeros_like(xnews)
for i in range(n):
xnew = xnews[i]
sum_y *= interstep_decay
count_y *= interstep_decay
while True:
xold = xolds[luoi]
if xold <= xnew:
decay = np.exp(- (xnew - xold) / decay_period)
sum_y += decay * yolds[luoi]
count_y += decay
luoi += 1
else:
break
if luoi >= len(xolds):
break
sum_ys[i] = sum_y
count_ys[i] = count_y
ys = sum_ys / count_ys
ys[count_ys < low_counts_threshold] = np.nan
return xnews, ys, count_ys | python | def one_sided_ema(xolds, yolds, low=None, high=None, n=512, decay_steps=1., low_counts_threshold=1e-8):
'''
perform one-sided (causal) EMA (exponential moving average)
smoothing and resampling to an even grid with n points.
Does not do extrapolation, so we assume
xolds[0] <= low && high <= xolds[-1]
Arguments:
xolds: array or list - x values of data. Needs to be sorted in ascending order
yolds: array of list - y values of data. Has to have the same length as xolds
low: float - min value of the new x grid. By default equals to xolds[0]
high: float - max value of the new x grid. By default equals to xolds[-1]
n: int - number of points in new x grid
decay_steps: float - EMA decay factor, expressed in new x grid steps.
low_counts_threshold: float or int
- y values with counts less than this value will be set to NaN
Returns:
tuple sum_ys, count_ys where
xs - array with new x grid
ys - array of EMA of y at each point of the new x grid
count_ys - array of EMA of y counts at each point of the new x grid
'''
low = xolds[0] if low is None else low
high = xolds[-1] if high is None else high
assert xolds[0] <= low, 'low = {} < xolds[0] = {} - extrapolation not permitted!'.format(low, xolds[0])
assert xolds[-1] >= high, 'high = {} > xolds[-1] = {} - extrapolation not permitted!'.format(high, xolds[-1])
assert len(xolds) == len(yolds), 'length of xolds ({}) and yolds ({}) do not match!'.format(len(xolds), len(yolds))
xolds = xolds.astype('float64')
yolds = yolds.astype('float64')
luoi = 0 # last unused old index
sum_y = 0.
count_y = 0.
xnews = np.linspace(low, high, n)
decay_period = (high - low) / (n - 1) * decay_steps
interstep_decay = np.exp(- 1. / decay_steps)
sum_ys = np.zeros_like(xnews)
count_ys = np.zeros_like(xnews)
for i in range(n):
xnew = xnews[i]
sum_y *= interstep_decay
count_y *= interstep_decay
while True:
xold = xolds[luoi]
if xold <= xnew:
decay = np.exp(- (xnew - xold) / decay_period)
sum_y += decay * yolds[luoi]
count_y += decay
luoi += 1
else:
break
if luoi >= len(xolds):
break
sum_ys[i] = sum_y
count_ys[i] = count_y
ys = sum_ys / count_ys
ys[count_ys < low_counts_threshold] = np.nan
return xnews, ys, count_ys | [
"def",
"one_sided_ema",
"(",
"xolds",
",",
"yolds",
",",
"low",
"=",
"None",
",",
"high",
"=",
"None",
",",
"n",
"=",
"512",
",",
"decay_steps",
"=",
"1.",
",",
"low_counts_threshold",
"=",
"1e-8",
")",
":",
"low",
"=",
"xolds",
"[",
"0",
"]",
"if",
"low",
"is",
"None",
"else",
"low",
"high",
"=",
"xolds",
"[",
"-",
"1",
"]",
"if",
"high",
"is",
"None",
"else",
"high",
"assert",
"xolds",
"[",
"0",
"]",
"<=",
"low",
",",
"'low = {} < xolds[0] = {} - extrapolation not permitted!'",
".",
"format",
"(",
"low",
",",
"xolds",
"[",
"0",
"]",
")",
"assert",
"xolds",
"[",
"-",
"1",
"]",
">=",
"high",
",",
"'high = {} > xolds[-1] = {} - extrapolation not permitted!'",
".",
"format",
"(",
"high",
",",
"xolds",
"[",
"-",
"1",
"]",
")",
"assert",
"len",
"(",
"xolds",
")",
"==",
"len",
"(",
"yolds",
")",
",",
"'length of xolds ({}) and yolds ({}) do not match!'",
".",
"format",
"(",
"len",
"(",
"xolds",
")",
",",
"len",
"(",
"yolds",
")",
")",
"xolds",
"=",
"xolds",
".",
"astype",
"(",
"'float64'",
")",
"yolds",
"=",
"yolds",
".",
"astype",
"(",
"'float64'",
")",
"luoi",
"=",
"0",
"# last unused old index",
"sum_y",
"=",
"0.",
"count_y",
"=",
"0.",
"xnews",
"=",
"np",
".",
"linspace",
"(",
"low",
",",
"high",
",",
"n",
")",
"decay_period",
"=",
"(",
"high",
"-",
"low",
")",
"/",
"(",
"n",
"-",
"1",
")",
"*",
"decay_steps",
"interstep_decay",
"=",
"np",
".",
"exp",
"(",
"-",
"1.",
"/",
"decay_steps",
")",
"sum_ys",
"=",
"np",
".",
"zeros_like",
"(",
"xnews",
")",
"count_ys",
"=",
"np",
".",
"zeros_like",
"(",
"xnews",
")",
"for",
"i",
"in",
"range",
"(",
"n",
")",
":",
"xnew",
"=",
"xnews",
"[",
"i",
"]",
"sum_y",
"*=",
"interstep_decay",
"count_y",
"*=",
"interstep_decay",
"while",
"True",
":",
"xold",
"=",
"xolds",
"[",
"luoi",
"]",
"if",
"xold",
"<=",
"xnew",
":",
"decay",
"=",
"np",
".",
"exp",
"(",
"-",
"(",
"xnew",
"-",
"xold",
")",
"/",
"decay_period",
")",
"sum_y",
"+=",
"decay",
"*",
"yolds",
"[",
"luoi",
"]",
"count_y",
"+=",
"decay",
"luoi",
"+=",
"1",
"else",
":",
"break",
"if",
"luoi",
">=",
"len",
"(",
"xolds",
")",
":",
"break",
"sum_ys",
"[",
"i",
"]",
"=",
"sum_y",
"count_ys",
"[",
"i",
"]",
"=",
"count_y",
"ys",
"=",
"sum_ys",
"/",
"count_ys",
"ys",
"[",
"count_ys",
"<",
"low_counts_threshold",
"]",
"=",
"np",
".",
"nan",
"return",
"xnews",
",",
"ys",
",",
"count_ys"
] | perform one-sided (causal) EMA (exponential moving average)
smoothing and resampling to an even grid with n points.
Does not do extrapolation, so we assume
xolds[0] <= low && high <= xolds[-1]
Arguments:
xolds: array or list - x values of data. Needs to be sorted in ascending order
yolds: array of list - y values of data. Has to have the same length as xolds
low: float - min value of the new x grid. By default equals to xolds[0]
high: float - max value of the new x grid. By default equals to xolds[-1]
n: int - number of points in new x grid
decay_steps: float - EMA decay factor, expressed in new x grid steps.
low_counts_threshold: float or int
- y values with counts less than this value will be set to NaN
Returns:
tuple sum_ys, count_ys where
xs - array with new x grid
ys - array of EMA of y at each point of the new x grid
count_ys - array of EMA of y counts at each point of the new x grid | [
"perform",
"one",
"-",
"sided",
"(",
"causal",
")",
"EMA",
"(",
"exponential",
"moving",
"average",
")",
"smoothing",
"and",
"resampling",
"to",
"an",
"even",
"grid",
"with",
"n",
"points",
".",
"Does",
"not",
"do",
"extrapolation",
"so",
"we",
"assume",
"xolds",
"[",
"0",
"]",
"<",
"=",
"low",
"&&",
"high",
"<",
"=",
"xolds",
"[",
"-",
"1",
"]"
] | 3301089b48c42b87b396e246ea3f56fa4bfc9678 | https://github.com/openai/baselines/blob/3301089b48c42b87b396e246ea3f56fa4bfc9678/baselines/common/plot_util.py#L39-L109 | valid |
openai/baselines | baselines/common/plot_util.py | symmetric_ema | def symmetric_ema(xolds, yolds, low=None, high=None, n=512, decay_steps=1., low_counts_threshold=1e-8):
'''
perform symmetric EMA (exponential moving average)
smoothing and resampling to an even grid with n points.
Does not do extrapolation, so we assume
xolds[0] <= low && high <= xolds[-1]
Arguments:
xolds: array or list - x values of data. Needs to be sorted in ascending order
yolds: array of list - y values of data. Has to have the same length as xolds
low: float - min value of the new x grid. By default equals to xolds[0]
high: float - max value of the new x grid. By default equals to xolds[-1]
n: int - number of points in new x grid
decay_steps: float - EMA decay factor, expressed in new x grid steps.
low_counts_threshold: float or int
- y values with counts less than this value will be set to NaN
Returns:
tuple sum_ys, count_ys where
xs - array with new x grid
ys - array of EMA of y at each point of the new x grid
count_ys - array of EMA of y counts at each point of the new x grid
'''
xs, ys1, count_ys1 = one_sided_ema(xolds, yolds, low, high, n, decay_steps, low_counts_threshold=0)
_, ys2, count_ys2 = one_sided_ema(-xolds[::-1], yolds[::-1], -high, -low, n, decay_steps, low_counts_threshold=0)
ys2 = ys2[::-1]
count_ys2 = count_ys2[::-1]
count_ys = count_ys1 + count_ys2
ys = (ys1 * count_ys1 + ys2 * count_ys2) / count_ys
ys[count_ys < low_counts_threshold] = np.nan
return xs, ys, count_ys | python | def symmetric_ema(xolds, yolds, low=None, high=None, n=512, decay_steps=1., low_counts_threshold=1e-8):
'''
perform symmetric EMA (exponential moving average)
smoothing and resampling to an even grid with n points.
Does not do extrapolation, so we assume
xolds[0] <= low && high <= xolds[-1]
Arguments:
xolds: array or list - x values of data. Needs to be sorted in ascending order
yolds: array of list - y values of data. Has to have the same length as xolds
low: float - min value of the new x grid. By default equals to xolds[0]
high: float - max value of the new x grid. By default equals to xolds[-1]
n: int - number of points in new x grid
decay_steps: float - EMA decay factor, expressed in new x grid steps.
low_counts_threshold: float or int
- y values with counts less than this value will be set to NaN
Returns:
tuple sum_ys, count_ys where
xs - array with new x grid
ys - array of EMA of y at each point of the new x grid
count_ys - array of EMA of y counts at each point of the new x grid
'''
xs, ys1, count_ys1 = one_sided_ema(xolds, yolds, low, high, n, decay_steps, low_counts_threshold=0)
_, ys2, count_ys2 = one_sided_ema(-xolds[::-1], yolds[::-1], -high, -low, n, decay_steps, low_counts_threshold=0)
ys2 = ys2[::-1]
count_ys2 = count_ys2[::-1]
count_ys = count_ys1 + count_ys2
ys = (ys1 * count_ys1 + ys2 * count_ys2) / count_ys
ys[count_ys < low_counts_threshold] = np.nan
return xs, ys, count_ys | [
"def",
"symmetric_ema",
"(",
"xolds",
",",
"yolds",
",",
"low",
"=",
"None",
",",
"high",
"=",
"None",
",",
"n",
"=",
"512",
",",
"decay_steps",
"=",
"1.",
",",
"low_counts_threshold",
"=",
"1e-8",
")",
":",
"xs",
",",
"ys1",
",",
"count_ys1",
"=",
"one_sided_ema",
"(",
"xolds",
",",
"yolds",
",",
"low",
",",
"high",
",",
"n",
",",
"decay_steps",
",",
"low_counts_threshold",
"=",
"0",
")",
"_",
",",
"ys2",
",",
"count_ys2",
"=",
"one_sided_ema",
"(",
"-",
"xolds",
"[",
":",
":",
"-",
"1",
"]",
",",
"yolds",
"[",
":",
":",
"-",
"1",
"]",
",",
"-",
"high",
",",
"-",
"low",
",",
"n",
",",
"decay_steps",
",",
"low_counts_threshold",
"=",
"0",
")",
"ys2",
"=",
"ys2",
"[",
":",
":",
"-",
"1",
"]",
"count_ys2",
"=",
"count_ys2",
"[",
":",
":",
"-",
"1",
"]",
"count_ys",
"=",
"count_ys1",
"+",
"count_ys2",
"ys",
"=",
"(",
"ys1",
"*",
"count_ys1",
"+",
"ys2",
"*",
"count_ys2",
")",
"/",
"count_ys",
"ys",
"[",
"count_ys",
"<",
"low_counts_threshold",
"]",
"=",
"np",
".",
"nan",
"return",
"xs",
",",
"ys",
",",
"count_ys"
] | perform symmetric EMA (exponential moving average)
smoothing and resampling to an even grid with n points.
Does not do extrapolation, so we assume
xolds[0] <= low && high <= xolds[-1]
Arguments:
xolds: array or list - x values of data. Needs to be sorted in ascending order
yolds: array of list - y values of data. Has to have the same length as xolds
low: float - min value of the new x grid. By default equals to xolds[0]
high: float - max value of the new x grid. By default equals to xolds[-1]
n: int - number of points in new x grid
decay_steps: float - EMA decay factor, expressed in new x grid steps.
low_counts_threshold: float or int
- y values with counts less than this value will be set to NaN
Returns:
tuple sum_ys, count_ys where
xs - array with new x grid
ys - array of EMA of y at each point of the new x grid
count_ys - array of EMA of y counts at each point of the new x grid | [
"perform",
"symmetric",
"EMA",
"(",
"exponential",
"moving",
"average",
")",
"smoothing",
"and",
"resampling",
"to",
"an",
"even",
"grid",
"with",
"n",
"points",
".",
"Does",
"not",
"do",
"extrapolation",
"so",
"we",
"assume",
"xolds",
"[",
"0",
"]",
"<",
"=",
"low",
"&&",
"high",
"<",
"=",
"xolds",
"[",
"-",
"1",
"]"
] | 3301089b48c42b87b396e246ea3f56fa4bfc9678 | https://github.com/openai/baselines/blob/3301089b48c42b87b396e246ea3f56fa4bfc9678/baselines/common/plot_util.py#L111-L147 | valid |
openai/baselines | baselines/common/plot_util.py | load_results | def load_results(root_dir_or_dirs, enable_progress=True, enable_monitor=True, verbose=False):
'''
load summaries of runs from a list of directories (including subdirectories)
Arguments:
enable_progress: bool - if True, will attempt to load data from progress.csv files (data saved by logger). Default: True
enable_monitor: bool - if True, will attempt to load data from monitor.csv files (data saved by Monitor environment wrapper). Default: True
verbose: bool - if True, will print out list of directories from which the data is loaded. Default: False
Returns:
List of Result objects with the following fields:
- dirname - path to the directory data was loaded from
- metadata - run metadata (such as command-line arguments and anything else in metadata.json file
- monitor - if enable_monitor is True, this field contains pandas dataframe with loaded monitor.csv file (or aggregate of all *.monitor.csv files in the directory)
- progress - if enable_progress is True, this field contains pandas dataframe with loaded progress.csv file
'''
import re
if isinstance(root_dir_or_dirs, str):
rootdirs = [osp.expanduser(root_dir_or_dirs)]
else:
rootdirs = [osp.expanduser(d) for d in root_dir_or_dirs]
allresults = []
for rootdir in rootdirs:
assert osp.exists(rootdir), "%s doesn't exist"%rootdir
for dirname, dirs, files in os.walk(rootdir):
if '-proc' in dirname:
files[:] = []
continue
monitor_re = re.compile(r'(\d+\.)?(\d+\.)?monitor\.csv')
if set(['metadata.json', 'monitor.json', 'progress.json', 'progress.csv']).intersection(files) or \
any([f for f in files if monitor_re.match(f)]): # also match monitor files like 0.1.monitor.csv
# used to be uncommented, which means do not go deeper than current directory if any of the data files
# are found
# dirs[:] = []
result = {'dirname' : dirname}
if "metadata.json" in files:
with open(osp.join(dirname, "metadata.json"), "r") as fh:
result['metadata'] = json.load(fh)
progjson = osp.join(dirname, "progress.json")
progcsv = osp.join(dirname, "progress.csv")
if enable_progress:
if osp.exists(progjson):
result['progress'] = pandas.DataFrame(read_json(progjson))
elif osp.exists(progcsv):
try:
result['progress'] = read_csv(progcsv)
except pandas.errors.EmptyDataError:
print('skipping progress file in ', dirname, 'empty data')
else:
if verbose: print('skipping %s: no progress file'%dirname)
if enable_monitor:
try:
result['monitor'] = pandas.DataFrame(monitor.load_results(dirname))
except monitor.LoadMonitorResultsError:
print('skipping %s: no monitor files'%dirname)
except Exception as e:
print('exception loading monitor file in %s: %s'%(dirname, e))
if result.get('monitor') is not None or result.get('progress') is not None:
allresults.append(Result(**result))
if verbose:
print('successfully loaded %s'%dirname)
if verbose: print('loaded %i results'%len(allresults))
return allresults | python | def load_results(root_dir_or_dirs, enable_progress=True, enable_monitor=True, verbose=False):
'''
load summaries of runs from a list of directories (including subdirectories)
Arguments:
enable_progress: bool - if True, will attempt to load data from progress.csv files (data saved by logger). Default: True
enable_monitor: bool - if True, will attempt to load data from monitor.csv files (data saved by Monitor environment wrapper). Default: True
verbose: bool - if True, will print out list of directories from which the data is loaded. Default: False
Returns:
List of Result objects with the following fields:
- dirname - path to the directory data was loaded from
- metadata - run metadata (such as command-line arguments and anything else in metadata.json file
- monitor - if enable_monitor is True, this field contains pandas dataframe with loaded monitor.csv file (or aggregate of all *.monitor.csv files in the directory)
- progress - if enable_progress is True, this field contains pandas dataframe with loaded progress.csv file
'''
import re
if isinstance(root_dir_or_dirs, str):
rootdirs = [osp.expanduser(root_dir_or_dirs)]
else:
rootdirs = [osp.expanduser(d) for d in root_dir_or_dirs]
allresults = []
for rootdir in rootdirs:
assert osp.exists(rootdir), "%s doesn't exist"%rootdir
for dirname, dirs, files in os.walk(rootdir):
if '-proc' in dirname:
files[:] = []
continue
monitor_re = re.compile(r'(\d+\.)?(\d+\.)?monitor\.csv')
if set(['metadata.json', 'monitor.json', 'progress.json', 'progress.csv']).intersection(files) or \
any([f for f in files if monitor_re.match(f)]): # also match monitor files like 0.1.monitor.csv
# used to be uncommented, which means do not go deeper than current directory if any of the data files
# are found
# dirs[:] = []
result = {'dirname' : dirname}
if "metadata.json" in files:
with open(osp.join(dirname, "metadata.json"), "r") as fh:
result['metadata'] = json.load(fh)
progjson = osp.join(dirname, "progress.json")
progcsv = osp.join(dirname, "progress.csv")
if enable_progress:
if osp.exists(progjson):
result['progress'] = pandas.DataFrame(read_json(progjson))
elif osp.exists(progcsv):
try:
result['progress'] = read_csv(progcsv)
except pandas.errors.EmptyDataError:
print('skipping progress file in ', dirname, 'empty data')
else:
if verbose: print('skipping %s: no progress file'%dirname)
if enable_monitor:
try:
result['monitor'] = pandas.DataFrame(monitor.load_results(dirname))
except monitor.LoadMonitorResultsError:
print('skipping %s: no monitor files'%dirname)
except Exception as e:
print('exception loading monitor file in %s: %s'%(dirname, e))
if result.get('monitor') is not None or result.get('progress') is not None:
allresults.append(Result(**result))
if verbose:
print('successfully loaded %s'%dirname)
if verbose: print('loaded %i results'%len(allresults))
return allresults | [
"def",
"load_results",
"(",
"root_dir_or_dirs",
",",
"enable_progress",
"=",
"True",
",",
"enable_monitor",
"=",
"True",
",",
"verbose",
"=",
"False",
")",
":",
"import",
"re",
"if",
"isinstance",
"(",
"root_dir_or_dirs",
",",
"str",
")",
":",
"rootdirs",
"=",
"[",
"osp",
".",
"expanduser",
"(",
"root_dir_or_dirs",
")",
"]",
"else",
":",
"rootdirs",
"=",
"[",
"osp",
".",
"expanduser",
"(",
"d",
")",
"for",
"d",
"in",
"root_dir_or_dirs",
"]",
"allresults",
"=",
"[",
"]",
"for",
"rootdir",
"in",
"rootdirs",
":",
"assert",
"osp",
".",
"exists",
"(",
"rootdir",
")",
",",
"\"%s doesn't exist\"",
"%",
"rootdir",
"for",
"dirname",
",",
"dirs",
",",
"files",
"in",
"os",
".",
"walk",
"(",
"rootdir",
")",
":",
"if",
"'-proc'",
"in",
"dirname",
":",
"files",
"[",
":",
"]",
"=",
"[",
"]",
"continue",
"monitor_re",
"=",
"re",
".",
"compile",
"(",
"r'(\\d+\\.)?(\\d+\\.)?monitor\\.csv'",
")",
"if",
"set",
"(",
"[",
"'metadata.json'",
",",
"'monitor.json'",
",",
"'progress.json'",
",",
"'progress.csv'",
"]",
")",
".",
"intersection",
"(",
"files",
")",
"or",
"any",
"(",
"[",
"f",
"for",
"f",
"in",
"files",
"if",
"monitor_re",
".",
"match",
"(",
"f",
")",
"]",
")",
":",
"# also match monitor files like 0.1.monitor.csv",
"# used to be uncommented, which means do not go deeper than current directory if any of the data files",
"# are found",
"# dirs[:] = []",
"result",
"=",
"{",
"'dirname'",
":",
"dirname",
"}",
"if",
"\"metadata.json\"",
"in",
"files",
":",
"with",
"open",
"(",
"osp",
".",
"join",
"(",
"dirname",
",",
"\"metadata.json\"",
")",
",",
"\"r\"",
")",
"as",
"fh",
":",
"result",
"[",
"'metadata'",
"]",
"=",
"json",
".",
"load",
"(",
"fh",
")",
"progjson",
"=",
"osp",
".",
"join",
"(",
"dirname",
",",
"\"progress.json\"",
")",
"progcsv",
"=",
"osp",
".",
"join",
"(",
"dirname",
",",
"\"progress.csv\"",
")",
"if",
"enable_progress",
":",
"if",
"osp",
".",
"exists",
"(",
"progjson",
")",
":",
"result",
"[",
"'progress'",
"]",
"=",
"pandas",
".",
"DataFrame",
"(",
"read_json",
"(",
"progjson",
")",
")",
"elif",
"osp",
".",
"exists",
"(",
"progcsv",
")",
":",
"try",
":",
"result",
"[",
"'progress'",
"]",
"=",
"read_csv",
"(",
"progcsv",
")",
"except",
"pandas",
".",
"errors",
".",
"EmptyDataError",
":",
"print",
"(",
"'skipping progress file in '",
",",
"dirname",
",",
"'empty data'",
")",
"else",
":",
"if",
"verbose",
":",
"print",
"(",
"'skipping %s: no progress file'",
"%",
"dirname",
")",
"if",
"enable_monitor",
":",
"try",
":",
"result",
"[",
"'monitor'",
"]",
"=",
"pandas",
".",
"DataFrame",
"(",
"monitor",
".",
"load_results",
"(",
"dirname",
")",
")",
"except",
"monitor",
".",
"LoadMonitorResultsError",
":",
"print",
"(",
"'skipping %s: no monitor files'",
"%",
"dirname",
")",
"except",
"Exception",
"as",
"e",
":",
"print",
"(",
"'exception loading monitor file in %s: %s'",
"%",
"(",
"dirname",
",",
"e",
")",
")",
"if",
"result",
".",
"get",
"(",
"'monitor'",
")",
"is",
"not",
"None",
"or",
"result",
".",
"get",
"(",
"'progress'",
")",
"is",
"not",
"None",
":",
"allresults",
".",
"append",
"(",
"Result",
"(",
"*",
"*",
"result",
")",
")",
"if",
"verbose",
":",
"print",
"(",
"'successfully loaded %s'",
"%",
"dirname",
")",
"if",
"verbose",
":",
"print",
"(",
"'loaded %i results'",
"%",
"len",
"(",
"allresults",
")",
")",
"return",
"allresults"
] | load summaries of runs from a list of directories (including subdirectories)
Arguments:
enable_progress: bool - if True, will attempt to load data from progress.csv files (data saved by logger). Default: True
enable_monitor: bool - if True, will attempt to load data from monitor.csv files (data saved by Monitor environment wrapper). Default: True
verbose: bool - if True, will print out list of directories from which the data is loaded. Default: False
Returns:
List of Result objects with the following fields:
- dirname - path to the directory data was loaded from
- metadata - run metadata (such as command-line arguments and anything else in metadata.json file
- monitor - if enable_monitor is True, this field contains pandas dataframe with loaded monitor.csv file (or aggregate of all *.monitor.csv files in the directory)
- progress - if enable_progress is True, this field contains pandas dataframe with loaded progress.csv file | [
"load",
"summaries",
"of",
"runs",
"from",
"a",
"list",
"of",
"directories",
"(",
"including",
"subdirectories",
")",
"Arguments",
":"
] | 3301089b48c42b87b396e246ea3f56fa4bfc9678 | https://github.com/openai/baselines/blob/3301089b48c42b87b396e246ea3f56fa4bfc9678/baselines/common/plot_util.py#L152-L220 | valid |
openai/baselines | baselines/common/plot_util.py | plot_results | def plot_results(
allresults, *,
xy_fn=default_xy_fn,
split_fn=default_split_fn,
group_fn=default_split_fn,
average_group=False,
shaded_std=True,
shaded_err=True,
figsize=None,
legend_outside=False,
resample=0,
smooth_step=1.0
):
'''
Plot multiple Results objects
xy_fn: function Result -> x,y - function that converts results objects into tuple of x and y values.
By default, x is cumsum of episode lengths, and y is episode rewards
split_fn: function Result -> hashable - function that converts results objects into keys to split curves into sub-panels by.
That is, the results r for which split_fn(r) is different will be put on different sub-panels.
By default, the portion of r.dirname between last / and -<digits> is returned. The sub-panels are
stacked vertically in the figure.
group_fn: function Result -> hashable - function that converts results objects into keys to group curves by.
That is, the results r for which group_fn(r) is the same will be put into the same group.
Curves in the same group have the same color (if average_group is False), or averaged over
(if average_group is True). The default value is the same as default value for split_fn
average_group: bool - if True, will average the curves in the same group and plot the mean. Enables resampling
(if resample = 0, will use 512 steps)
shaded_std: bool - if True (default), the shaded region corresponding to standard deviation of the group of curves will be
shown (only applicable if average_group = True)
shaded_err: bool - if True (default), the shaded region corresponding to error in mean estimate of the group of curves
(that is, standard deviation divided by square root of number of curves) will be
shown (only applicable if average_group = True)
figsize: tuple or None - size of the resulting figure (including sub-panels). By default, width is 6 and height is 6 times number of
sub-panels.
legend_outside: bool - if True, will place the legend outside of the sub-panels.
resample: int - if not zero, size of the uniform grid in x direction to resample onto. Resampling is performed via symmetric
EMA smoothing (see the docstring for symmetric_ema).
Default is zero (no resampling). Note that if average_group is True, resampling is necessary; in that case, default
value is 512.
smooth_step: float - when resampling (i.e. when resample > 0 or average_group is True), use this EMA decay parameter (in units of the new grid step).
See docstrings for decay_steps in symmetric_ema or one_sided_ema functions.
'''
if split_fn is None: split_fn = lambda _ : ''
if group_fn is None: group_fn = lambda _ : ''
sk2r = defaultdict(list) # splitkey2results
for result in allresults:
splitkey = split_fn(result)
sk2r[splitkey].append(result)
assert len(sk2r) > 0
assert isinstance(resample, int), "0: don't resample. <integer>: that many samples"
nrows = len(sk2r)
ncols = 1
figsize = figsize or (6, 6 * nrows)
f, axarr = plt.subplots(nrows, ncols, sharex=False, squeeze=False, figsize=figsize)
groups = list(set(group_fn(result) for result in allresults))
default_samples = 512
if average_group:
resample = resample or default_samples
for (isplit, sk) in enumerate(sorted(sk2r.keys())):
g2l = {}
g2c = defaultdict(int)
sresults = sk2r[sk]
gresults = defaultdict(list)
ax = axarr[isplit][0]
for result in sresults:
group = group_fn(result)
g2c[group] += 1
x, y = xy_fn(result)
if x is None: x = np.arange(len(y))
x, y = map(np.asarray, (x, y))
if average_group:
gresults[group].append((x,y))
else:
if resample:
x, y, counts = symmetric_ema(x, y, x[0], x[-1], resample, decay_steps=smooth_step)
l, = ax.plot(x, y, color=COLORS[groups.index(group) % len(COLORS)])
g2l[group] = l
if average_group:
for group in sorted(groups):
xys = gresults[group]
if not any(xys):
continue
color = COLORS[groups.index(group) % len(COLORS)]
origxs = [xy[0] for xy in xys]
minxlen = min(map(len, origxs))
def allequal(qs):
return all((q==qs[0]).all() for q in qs[1:])
if resample:
low = max(x[0] for x in origxs)
high = min(x[-1] for x in origxs)
usex = np.linspace(low, high, resample)
ys = []
for (x, y) in xys:
ys.append(symmetric_ema(x, y, low, high, resample, decay_steps=smooth_step)[1])
else:
assert allequal([x[:minxlen] for x in origxs]),\
'If you want to average unevenly sampled data, set resample=<number of samples you want>'
usex = origxs[0]
ys = [xy[1][:minxlen] for xy in xys]
ymean = np.mean(ys, axis=0)
ystd = np.std(ys, axis=0)
ystderr = ystd / np.sqrt(len(ys))
l, = axarr[isplit][0].plot(usex, ymean, color=color)
g2l[group] = l
if shaded_err:
ax.fill_between(usex, ymean - ystderr, ymean + ystderr, color=color, alpha=.4)
if shaded_std:
ax.fill_between(usex, ymean - ystd, ymean + ystd, color=color, alpha=.2)
# https://matplotlib.org/users/legend_guide.html
plt.tight_layout()
if any(g2l.keys()):
ax.legend(
g2l.values(),
['%s (%i)'%(g, g2c[g]) for g in g2l] if average_group else g2l.keys(),
loc=2 if legend_outside else None,
bbox_to_anchor=(1,1) if legend_outside else None)
ax.set_title(sk)
return f, axarr | python | def plot_results(
allresults, *,
xy_fn=default_xy_fn,
split_fn=default_split_fn,
group_fn=default_split_fn,
average_group=False,
shaded_std=True,
shaded_err=True,
figsize=None,
legend_outside=False,
resample=0,
smooth_step=1.0
):
'''
Plot multiple Results objects
xy_fn: function Result -> x,y - function that converts results objects into tuple of x and y values.
By default, x is cumsum of episode lengths, and y is episode rewards
split_fn: function Result -> hashable - function that converts results objects into keys to split curves into sub-panels by.
That is, the results r for which split_fn(r) is different will be put on different sub-panels.
By default, the portion of r.dirname between last / and -<digits> is returned. The sub-panels are
stacked vertically in the figure.
group_fn: function Result -> hashable - function that converts results objects into keys to group curves by.
That is, the results r for which group_fn(r) is the same will be put into the same group.
Curves in the same group have the same color (if average_group is False), or averaged over
(if average_group is True). The default value is the same as default value for split_fn
average_group: bool - if True, will average the curves in the same group and plot the mean. Enables resampling
(if resample = 0, will use 512 steps)
shaded_std: bool - if True (default), the shaded region corresponding to standard deviation of the group of curves will be
shown (only applicable if average_group = True)
shaded_err: bool - if True (default), the shaded region corresponding to error in mean estimate of the group of curves
(that is, standard deviation divided by square root of number of curves) will be
shown (only applicable if average_group = True)
figsize: tuple or None - size of the resulting figure (including sub-panels). By default, width is 6 and height is 6 times number of
sub-panels.
legend_outside: bool - if True, will place the legend outside of the sub-panels.
resample: int - if not zero, size of the uniform grid in x direction to resample onto. Resampling is performed via symmetric
EMA smoothing (see the docstring for symmetric_ema).
Default is zero (no resampling). Note that if average_group is True, resampling is necessary; in that case, default
value is 512.
smooth_step: float - when resampling (i.e. when resample > 0 or average_group is True), use this EMA decay parameter (in units of the new grid step).
See docstrings for decay_steps in symmetric_ema or one_sided_ema functions.
'''
if split_fn is None: split_fn = lambda _ : ''
if group_fn is None: group_fn = lambda _ : ''
sk2r = defaultdict(list) # splitkey2results
for result in allresults:
splitkey = split_fn(result)
sk2r[splitkey].append(result)
assert len(sk2r) > 0
assert isinstance(resample, int), "0: don't resample. <integer>: that many samples"
nrows = len(sk2r)
ncols = 1
figsize = figsize or (6, 6 * nrows)
f, axarr = plt.subplots(nrows, ncols, sharex=False, squeeze=False, figsize=figsize)
groups = list(set(group_fn(result) for result in allresults))
default_samples = 512
if average_group:
resample = resample or default_samples
for (isplit, sk) in enumerate(sorted(sk2r.keys())):
g2l = {}
g2c = defaultdict(int)
sresults = sk2r[sk]
gresults = defaultdict(list)
ax = axarr[isplit][0]
for result in sresults:
group = group_fn(result)
g2c[group] += 1
x, y = xy_fn(result)
if x is None: x = np.arange(len(y))
x, y = map(np.asarray, (x, y))
if average_group:
gresults[group].append((x,y))
else:
if resample:
x, y, counts = symmetric_ema(x, y, x[0], x[-1], resample, decay_steps=smooth_step)
l, = ax.plot(x, y, color=COLORS[groups.index(group) % len(COLORS)])
g2l[group] = l
if average_group:
for group in sorted(groups):
xys = gresults[group]
if not any(xys):
continue
color = COLORS[groups.index(group) % len(COLORS)]
origxs = [xy[0] for xy in xys]
minxlen = min(map(len, origxs))
def allequal(qs):
return all((q==qs[0]).all() for q in qs[1:])
if resample:
low = max(x[0] for x in origxs)
high = min(x[-1] for x in origxs)
usex = np.linspace(low, high, resample)
ys = []
for (x, y) in xys:
ys.append(symmetric_ema(x, y, low, high, resample, decay_steps=smooth_step)[1])
else:
assert allequal([x[:minxlen] for x in origxs]),\
'If you want to average unevenly sampled data, set resample=<number of samples you want>'
usex = origxs[0]
ys = [xy[1][:minxlen] for xy in xys]
ymean = np.mean(ys, axis=0)
ystd = np.std(ys, axis=0)
ystderr = ystd / np.sqrt(len(ys))
l, = axarr[isplit][0].plot(usex, ymean, color=color)
g2l[group] = l
if shaded_err:
ax.fill_between(usex, ymean - ystderr, ymean + ystderr, color=color, alpha=.4)
if shaded_std:
ax.fill_between(usex, ymean - ystd, ymean + ystd, color=color, alpha=.2)
# https://matplotlib.org/users/legend_guide.html
plt.tight_layout()
if any(g2l.keys()):
ax.legend(
g2l.values(),
['%s (%i)'%(g, g2c[g]) for g in g2l] if average_group else g2l.keys(),
loc=2 if legend_outside else None,
bbox_to_anchor=(1,1) if legend_outside else None)
ax.set_title(sk)
return f, axarr | [
"def",
"plot_results",
"(",
"allresults",
",",
"*",
",",
"xy_fn",
"=",
"default_xy_fn",
",",
"split_fn",
"=",
"default_split_fn",
",",
"group_fn",
"=",
"default_split_fn",
",",
"average_group",
"=",
"False",
",",
"shaded_std",
"=",
"True",
",",
"shaded_err",
"=",
"True",
",",
"figsize",
"=",
"None",
",",
"legend_outside",
"=",
"False",
",",
"resample",
"=",
"0",
",",
"smooth_step",
"=",
"1.0",
")",
":",
"if",
"split_fn",
"is",
"None",
":",
"split_fn",
"=",
"lambda",
"_",
":",
"''",
"if",
"group_fn",
"is",
"None",
":",
"group_fn",
"=",
"lambda",
"_",
":",
"''",
"sk2r",
"=",
"defaultdict",
"(",
"list",
")",
"# splitkey2results",
"for",
"result",
"in",
"allresults",
":",
"splitkey",
"=",
"split_fn",
"(",
"result",
")",
"sk2r",
"[",
"splitkey",
"]",
".",
"append",
"(",
"result",
")",
"assert",
"len",
"(",
"sk2r",
")",
">",
"0",
"assert",
"isinstance",
"(",
"resample",
",",
"int",
")",
",",
"\"0: don't resample. <integer>: that many samples\"",
"nrows",
"=",
"len",
"(",
"sk2r",
")",
"ncols",
"=",
"1",
"figsize",
"=",
"figsize",
"or",
"(",
"6",
",",
"6",
"*",
"nrows",
")",
"f",
",",
"axarr",
"=",
"plt",
".",
"subplots",
"(",
"nrows",
",",
"ncols",
",",
"sharex",
"=",
"False",
",",
"squeeze",
"=",
"False",
",",
"figsize",
"=",
"figsize",
")",
"groups",
"=",
"list",
"(",
"set",
"(",
"group_fn",
"(",
"result",
")",
"for",
"result",
"in",
"allresults",
")",
")",
"default_samples",
"=",
"512",
"if",
"average_group",
":",
"resample",
"=",
"resample",
"or",
"default_samples",
"for",
"(",
"isplit",
",",
"sk",
")",
"in",
"enumerate",
"(",
"sorted",
"(",
"sk2r",
".",
"keys",
"(",
")",
")",
")",
":",
"g2l",
"=",
"{",
"}",
"g2c",
"=",
"defaultdict",
"(",
"int",
")",
"sresults",
"=",
"sk2r",
"[",
"sk",
"]",
"gresults",
"=",
"defaultdict",
"(",
"list",
")",
"ax",
"=",
"axarr",
"[",
"isplit",
"]",
"[",
"0",
"]",
"for",
"result",
"in",
"sresults",
":",
"group",
"=",
"group_fn",
"(",
"result",
")",
"g2c",
"[",
"group",
"]",
"+=",
"1",
"x",
",",
"y",
"=",
"xy_fn",
"(",
"result",
")",
"if",
"x",
"is",
"None",
":",
"x",
"=",
"np",
".",
"arange",
"(",
"len",
"(",
"y",
")",
")",
"x",
",",
"y",
"=",
"map",
"(",
"np",
".",
"asarray",
",",
"(",
"x",
",",
"y",
")",
")",
"if",
"average_group",
":",
"gresults",
"[",
"group",
"]",
".",
"append",
"(",
"(",
"x",
",",
"y",
")",
")",
"else",
":",
"if",
"resample",
":",
"x",
",",
"y",
",",
"counts",
"=",
"symmetric_ema",
"(",
"x",
",",
"y",
",",
"x",
"[",
"0",
"]",
",",
"x",
"[",
"-",
"1",
"]",
",",
"resample",
",",
"decay_steps",
"=",
"smooth_step",
")",
"l",
",",
"=",
"ax",
".",
"plot",
"(",
"x",
",",
"y",
",",
"color",
"=",
"COLORS",
"[",
"groups",
".",
"index",
"(",
"group",
")",
"%",
"len",
"(",
"COLORS",
")",
"]",
")",
"g2l",
"[",
"group",
"]",
"=",
"l",
"if",
"average_group",
":",
"for",
"group",
"in",
"sorted",
"(",
"groups",
")",
":",
"xys",
"=",
"gresults",
"[",
"group",
"]",
"if",
"not",
"any",
"(",
"xys",
")",
":",
"continue",
"color",
"=",
"COLORS",
"[",
"groups",
".",
"index",
"(",
"group",
")",
"%",
"len",
"(",
"COLORS",
")",
"]",
"origxs",
"=",
"[",
"xy",
"[",
"0",
"]",
"for",
"xy",
"in",
"xys",
"]",
"minxlen",
"=",
"min",
"(",
"map",
"(",
"len",
",",
"origxs",
")",
")",
"def",
"allequal",
"(",
"qs",
")",
":",
"return",
"all",
"(",
"(",
"q",
"==",
"qs",
"[",
"0",
"]",
")",
".",
"all",
"(",
")",
"for",
"q",
"in",
"qs",
"[",
"1",
":",
"]",
")",
"if",
"resample",
":",
"low",
"=",
"max",
"(",
"x",
"[",
"0",
"]",
"for",
"x",
"in",
"origxs",
")",
"high",
"=",
"min",
"(",
"x",
"[",
"-",
"1",
"]",
"for",
"x",
"in",
"origxs",
")",
"usex",
"=",
"np",
".",
"linspace",
"(",
"low",
",",
"high",
",",
"resample",
")",
"ys",
"=",
"[",
"]",
"for",
"(",
"x",
",",
"y",
")",
"in",
"xys",
":",
"ys",
".",
"append",
"(",
"symmetric_ema",
"(",
"x",
",",
"y",
",",
"low",
",",
"high",
",",
"resample",
",",
"decay_steps",
"=",
"smooth_step",
")",
"[",
"1",
"]",
")",
"else",
":",
"assert",
"allequal",
"(",
"[",
"x",
"[",
":",
"minxlen",
"]",
"for",
"x",
"in",
"origxs",
"]",
")",
",",
"'If you want to average unevenly sampled data, set resample=<number of samples you want>'",
"usex",
"=",
"origxs",
"[",
"0",
"]",
"ys",
"=",
"[",
"xy",
"[",
"1",
"]",
"[",
":",
"minxlen",
"]",
"for",
"xy",
"in",
"xys",
"]",
"ymean",
"=",
"np",
".",
"mean",
"(",
"ys",
",",
"axis",
"=",
"0",
")",
"ystd",
"=",
"np",
".",
"std",
"(",
"ys",
",",
"axis",
"=",
"0",
")",
"ystderr",
"=",
"ystd",
"/",
"np",
".",
"sqrt",
"(",
"len",
"(",
"ys",
")",
")",
"l",
",",
"=",
"axarr",
"[",
"isplit",
"]",
"[",
"0",
"]",
".",
"plot",
"(",
"usex",
",",
"ymean",
",",
"color",
"=",
"color",
")",
"g2l",
"[",
"group",
"]",
"=",
"l",
"if",
"shaded_err",
":",
"ax",
".",
"fill_between",
"(",
"usex",
",",
"ymean",
"-",
"ystderr",
",",
"ymean",
"+",
"ystderr",
",",
"color",
"=",
"color",
",",
"alpha",
"=",
".4",
")",
"if",
"shaded_std",
":",
"ax",
".",
"fill_between",
"(",
"usex",
",",
"ymean",
"-",
"ystd",
",",
"ymean",
"+",
"ystd",
",",
"color",
"=",
"color",
",",
"alpha",
"=",
".2",
")",
"# https://matplotlib.org/users/legend_guide.html",
"plt",
".",
"tight_layout",
"(",
")",
"if",
"any",
"(",
"g2l",
".",
"keys",
"(",
")",
")",
":",
"ax",
".",
"legend",
"(",
"g2l",
".",
"values",
"(",
")",
",",
"[",
"'%s (%i)'",
"%",
"(",
"g",
",",
"g2c",
"[",
"g",
"]",
")",
"for",
"g",
"in",
"g2l",
"]",
"if",
"average_group",
"else",
"g2l",
".",
"keys",
"(",
")",
",",
"loc",
"=",
"2",
"if",
"legend_outside",
"else",
"None",
",",
"bbox_to_anchor",
"=",
"(",
"1",
",",
"1",
")",
"if",
"legend_outside",
"else",
"None",
")",
"ax",
".",
"set_title",
"(",
"sk",
")",
"return",
"f",
",",
"axarr"
] | Plot multiple Results objects
xy_fn: function Result -> x,y - function that converts results objects into tuple of x and y values.
By default, x is cumsum of episode lengths, and y is episode rewards
split_fn: function Result -> hashable - function that converts results objects into keys to split curves into sub-panels by.
That is, the results r for which split_fn(r) is different will be put on different sub-panels.
By default, the portion of r.dirname between last / and -<digits> is returned. The sub-panels are
stacked vertically in the figure.
group_fn: function Result -> hashable - function that converts results objects into keys to group curves by.
That is, the results r for which group_fn(r) is the same will be put into the same group.
Curves in the same group have the same color (if average_group is False), or averaged over
(if average_group is True). The default value is the same as default value for split_fn
average_group: bool - if True, will average the curves in the same group and plot the mean. Enables resampling
(if resample = 0, will use 512 steps)
shaded_std: bool - if True (default), the shaded region corresponding to standard deviation of the group of curves will be
shown (only applicable if average_group = True)
shaded_err: bool - if True (default), the shaded region corresponding to error in mean estimate of the group of curves
(that is, standard deviation divided by square root of number of curves) will be
shown (only applicable if average_group = True)
figsize: tuple or None - size of the resulting figure (including sub-panels). By default, width is 6 and height is 6 times number of
sub-panels.
legend_outside: bool - if True, will place the legend outside of the sub-panels.
resample: int - if not zero, size of the uniform grid in x direction to resample onto. Resampling is performed via symmetric
EMA smoothing (see the docstring for symmetric_ema).
Default is zero (no resampling). Note that if average_group is True, resampling is necessary; in that case, default
value is 512.
smooth_step: float - when resampling (i.e. when resample > 0 or average_group is True), use this EMA decay parameter (in units of the new grid step).
See docstrings for decay_steps in symmetric_ema or one_sided_ema functions. | [
"Plot",
"multiple",
"Results",
"objects"
] | 3301089b48c42b87b396e246ea3f56fa4bfc9678 | https://github.com/openai/baselines/blob/3301089b48c42b87b396e246ea3f56fa4bfc9678/baselines/common/plot_util.py#L240-L375 | valid |
openai/baselines | baselines/common/mpi_adam_optimizer.py | check_synced | def check_synced(localval, comm=None):
"""
It's common to forget to initialize your variables to the same values, or
(less commonly) if you update them in some other way than adam, to get them out of sync.
This function checks that variables on all MPI workers are the same, and raises
an AssertionError otherwise
Arguments:
comm: MPI communicator
localval: list of local variables (list of variables on current worker to be compared with the other workers)
"""
comm = comm or MPI.COMM_WORLD
vals = comm.gather(localval)
if comm.rank == 0:
assert all(val==vals[0] for val in vals[1:]) | python | def check_synced(localval, comm=None):
"""
It's common to forget to initialize your variables to the same values, or
(less commonly) if you update them in some other way than adam, to get them out of sync.
This function checks that variables on all MPI workers are the same, and raises
an AssertionError otherwise
Arguments:
comm: MPI communicator
localval: list of local variables (list of variables on current worker to be compared with the other workers)
"""
comm = comm or MPI.COMM_WORLD
vals = comm.gather(localval)
if comm.rank == 0:
assert all(val==vals[0] for val in vals[1:]) | [
"def",
"check_synced",
"(",
"localval",
",",
"comm",
"=",
"None",
")",
":",
"comm",
"=",
"comm",
"or",
"MPI",
".",
"COMM_WORLD",
"vals",
"=",
"comm",
".",
"gather",
"(",
"localval",
")",
"if",
"comm",
".",
"rank",
"==",
"0",
":",
"assert",
"all",
"(",
"val",
"==",
"vals",
"[",
"0",
"]",
"for",
"val",
"in",
"vals",
"[",
"1",
":",
"]",
")"
] | It's common to forget to initialize your variables to the same values, or
(less commonly) if you update them in some other way than adam, to get them out of sync.
This function checks that variables on all MPI workers are the same, and raises
an AssertionError otherwise
Arguments:
comm: MPI communicator
localval: list of local variables (list of variables on current worker to be compared with the other workers) | [
"It",
"s",
"common",
"to",
"forget",
"to",
"initialize",
"your",
"variables",
"to",
"the",
"same",
"values",
"or",
"(",
"less",
"commonly",
")",
"if",
"you",
"update",
"them",
"in",
"some",
"other",
"way",
"than",
"adam",
"to",
"get",
"them",
"out",
"of",
"sync",
".",
"This",
"function",
"checks",
"that",
"variables",
"on",
"all",
"MPI",
"workers",
"are",
"the",
"same",
"and",
"raises",
"an",
"AssertionError",
"otherwise"
] | 3301089b48c42b87b396e246ea3f56fa4bfc9678 | https://github.com/openai/baselines/blob/3301089b48c42b87b396e246ea3f56fa4bfc9678/baselines/common/mpi_adam_optimizer.py#L40-L54 | valid |
openai/baselines | baselines/common/vec_env/util.py | copy_obs_dict | def copy_obs_dict(obs):
"""
Deep-copy an observation dict.
"""
return {k: np.copy(v) for k, v in obs.items()} | python | def copy_obs_dict(obs):
"""
Deep-copy an observation dict.
"""
return {k: np.copy(v) for k, v in obs.items()} | [
"def",
"copy_obs_dict",
"(",
"obs",
")",
":",
"return",
"{",
"k",
":",
"np",
".",
"copy",
"(",
"v",
")",
"for",
"k",
",",
"v",
"in",
"obs",
".",
"items",
"(",
")",
"}"
] | Deep-copy an observation dict. | [
"Deep",
"-",
"copy",
"an",
"observation",
"dict",
"."
] | 3301089b48c42b87b396e246ea3f56fa4bfc9678 | https://github.com/openai/baselines/blob/3301089b48c42b87b396e246ea3f56fa4bfc9678/baselines/common/vec_env/util.py#L11-L15 | valid |
openai/baselines | baselines/common/vec_env/util.py | obs_space_info | def obs_space_info(obs_space):
"""
Get dict-structured information about a gym.Space.
Returns:
A tuple (keys, shapes, dtypes):
keys: a list of dict keys.
shapes: a dict mapping keys to shapes.
dtypes: a dict mapping keys to dtypes.
"""
if isinstance(obs_space, gym.spaces.Dict):
assert isinstance(obs_space.spaces, OrderedDict)
subspaces = obs_space.spaces
else:
subspaces = {None: obs_space}
keys = []
shapes = {}
dtypes = {}
for key, box in subspaces.items():
keys.append(key)
shapes[key] = box.shape
dtypes[key] = box.dtype
return keys, shapes, dtypes | python | def obs_space_info(obs_space):
"""
Get dict-structured information about a gym.Space.
Returns:
A tuple (keys, shapes, dtypes):
keys: a list of dict keys.
shapes: a dict mapping keys to shapes.
dtypes: a dict mapping keys to dtypes.
"""
if isinstance(obs_space, gym.spaces.Dict):
assert isinstance(obs_space.spaces, OrderedDict)
subspaces = obs_space.spaces
else:
subspaces = {None: obs_space}
keys = []
shapes = {}
dtypes = {}
for key, box in subspaces.items():
keys.append(key)
shapes[key] = box.shape
dtypes[key] = box.dtype
return keys, shapes, dtypes | [
"def",
"obs_space_info",
"(",
"obs_space",
")",
":",
"if",
"isinstance",
"(",
"obs_space",
",",
"gym",
".",
"spaces",
".",
"Dict",
")",
":",
"assert",
"isinstance",
"(",
"obs_space",
".",
"spaces",
",",
"OrderedDict",
")",
"subspaces",
"=",
"obs_space",
".",
"spaces",
"else",
":",
"subspaces",
"=",
"{",
"None",
":",
"obs_space",
"}",
"keys",
"=",
"[",
"]",
"shapes",
"=",
"{",
"}",
"dtypes",
"=",
"{",
"}",
"for",
"key",
",",
"box",
"in",
"subspaces",
".",
"items",
"(",
")",
":",
"keys",
".",
"append",
"(",
"key",
")",
"shapes",
"[",
"key",
"]",
"=",
"box",
".",
"shape",
"dtypes",
"[",
"key",
"]",
"=",
"box",
".",
"dtype",
"return",
"keys",
",",
"shapes",
",",
"dtypes"
] | Get dict-structured information about a gym.Space.
Returns:
A tuple (keys, shapes, dtypes):
keys: a list of dict keys.
shapes: a dict mapping keys to shapes.
dtypes: a dict mapping keys to dtypes. | [
"Get",
"dict",
"-",
"structured",
"information",
"about",
"a",
"gym",
".",
"Space",
"."
] | 3301089b48c42b87b396e246ea3f56fa4bfc9678 | https://github.com/openai/baselines/blob/3301089b48c42b87b396e246ea3f56fa4bfc9678/baselines/common/vec_env/util.py#L28-L50 | valid |
openai/baselines | baselines/acer/acer.py | q_retrace | def q_retrace(R, D, q_i, v, rho_i, nenvs, nsteps, gamma):
"""
Calculates q_retrace targets
:param R: Rewards
:param D: Dones
:param q_i: Q values for actions taken
:param v: V values
:param rho_i: Importance weight for each action
:return: Q_retrace values
"""
rho_bar = batch_to_seq(tf.minimum(1.0, rho_i), nenvs, nsteps, True) # list of len steps, shape [nenvs]
rs = batch_to_seq(R, nenvs, nsteps, True) # list of len steps, shape [nenvs]
ds = batch_to_seq(D, nenvs, nsteps, True) # list of len steps, shape [nenvs]
q_is = batch_to_seq(q_i, nenvs, nsteps, True)
vs = batch_to_seq(v, nenvs, nsteps + 1, True)
v_final = vs[-1]
qret = v_final
qrets = []
for i in range(nsteps - 1, -1, -1):
check_shape([qret, ds[i], rs[i], rho_bar[i], q_is[i], vs[i]], [[nenvs]] * 6)
qret = rs[i] + gamma * qret * (1.0 - ds[i])
qrets.append(qret)
qret = (rho_bar[i] * (qret - q_is[i])) + vs[i]
qrets = qrets[::-1]
qret = seq_to_batch(qrets, flat=True)
return qret | python | def q_retrace(R, D, q_i, v, rho_i, nenvs, nsteps, gamma):
"""
Calculates q_retrace targets
:param R: Rewards
:param D: Dones
:param q_i: Q values for actions taken
:param v: V values
:param rho_i: Importance weight for each action
:return: Q_retrace values
"""
rho_bar = batch_to_seq(tf.minimum(1.0, rho_i), nenvs, nsteps, True) # list of len steps, shape [nenvs]
rs = batch_to_seq(R, nenvs, nsteps, True) # list of len steps, shape [nenvs]
ds = batch_to_seq(D, nenvs, nsteps, True) # list of len steps, shape [nenvs]
q_is = batch_to_seq(q_i, nenvs, nsteps, True)
vs = batch_to_seq(v, nenvs, nsteps + 1, True)
v_final = vs[-1]
qret = v_final
qrets = []
for i in range(nsteps - 1, -1, -1):
check_shape([qret, ds[i], rs[i], rho_bar[i], q_is[i], vs[i]], [[nenvs]] * 6)
qret = rs[i] + gamma * qret * (1.0 - ds[i])
qrets.append(qret)
qret = (rho_bar[i] * (qret - q_is[i])) + vs[i]
qrets = qrets[::-1]
qret = seq_to_batch(qrets, flat=True)
return qret | [
"def",
"q_retrace",
"(",
"R",
",",
"D",
",",
"q_i",
",",
"v",
",",
"rho_i",
",",
"nenvs",
",",
"nsteps",
",",
"gamma",
")",
":",
"rho_bar",
"=",
"batch_to_seq",
"(",
"tf",
".",
"minimum",
"(",
"1.0",
",",
"rho_i",
")",
",",
"nenvs",
",",
"nsteps",
",",
"True",
")",
"# list of len steps, shape [nenvs]",
"rs",
"=",
"batch_to_seq",
"(",
"R",
",",
"nenvs",
",",
"nsteps",
",",
"True",
")",
"# list of len steps, shape [nenvs]",
"ds",
"=",
"batch_to_seq",
"(",
"D",
",",
"nenvs",
",",
"nsteps",
",",
"True",
")",
"# list of len steps, shape [nenvs]",
"q_is",
"=",
"batch_to_seq",
"(",
"q_i",
",",
"nenvs",
",",
"nsteps",
",",
"True",
")",
"vs",
"=",
"batch_to_seq",
"(",
"v",
",",
"nenvs",
",",
"nsteps",
"+",
"1",
",",
"True",
")",
"v_final",
"=",
"vs",
"[",
"-",
"1",
"]",
"qret",
"=",
"v_final",
"qrets",
"=",
"[",
"]",
"for",
"i",
"in",
"range",
"(",
"nsteps",
"-",
"1",
",",
"-",
"1",
",",
"-",
"1",
")",
":",
"check_shape",
"(",
"[",
"qret",
",",
"ds",
"[",
"i",
"]",
",",
"rs",
"[",
"i",
"]",
",",
"rho_bar",
"[",
"i",
"]",
",",
"q_is",
"[",
"i",
"]",
",",
"vs",
"[",
"i",
"]",
"]",
",",
"[",
"[",
"nenvs",
"]",
"]",
"*",
"6",
")",
"qret",
"=",
"rs",
"[",
"i",
"]",
"+",
"gamma",
"*",
"qret",
"*",
"(",
"1.0",
"-",
"ds",
"[",
"i",
"]",
")",
"qrets",
".",
"append",
"(",
"qret",
")",
"qret",
"=",
"(",
"rho_bar",
"[",
"i",
"]",
"*",
"(",
"qret",
"-",
"q_is",
"[",
"i",
"]",
")",
")",
"+",
"vs",
"[",
"i",
"]",
"qrets",
"=",
"qrets",
"[",
":",
":",
"-",
"1",
"]",
"qret",
"=",
"seq_to_batch",
"(",
"qrets",
",",
"flat",
"=",
"True",
")",
"return",
"qret"
] | Calculates q_retrace targets
:param R: Rewards
:param D: Dones
:param q_i: Q values for actions taken
:param v: V values
:param rho_i: Importance weight for each action
:return: Q_retrace values | [
"Calculates",
"q_retrace",
"targets"
] | 3301089b48c42b87b396e246ea3f56fa4bfc9678 | https://github.com/openai/baselines/blob/3301089b48c42b87b396e246ea3f56fa4bfc9678/baselines/acer/acer.py#L25-L51 | valid |
openai/baselines | baselines/acer/acer.py | learn | def learn(network, env, seed=None, nsteps=20, total_timesteps=int(80e6), q_coef=0.5, ent_coef=0.01,
max_grad_norm=10, lr=7e-4, lrschedule='linear', rprop_epsilon=1e-5, rprop_alpha=0.99, gamma=0.99,
log_interval=100, buffer_size=50000, replay_ratio=4, replay_start=10000, c=10.0,
trust_region=True, alpha=0.99, delta=1, load_path=None, **network_kwargs):
'''
Main entrypoint for ACER (Actor-Critic with Experience Replay) algorithm (https://arxiv.org/pdf/1611.01224.pdf)
Train an agent with given network architecture on a given environment using ACER.
Parameters:
----------
network: policy network architecture. Either string (mlp, lstm, lnlstm, cnn_lstm, cnn, cnn_small, conv_only - see baselines.common/models.py for full list)
specifying the standard network architecture, or a function that takes tensorflow tensor as input and returns
tuple (output_tensor, extra_feed) where output tensor is the last network layer output, extra_feed is None for feed-forward
neural nets, and extra_feed is a dictionary describing how to feed state into the network for recurrent neural nets.
See baselines.common/policies.py/lstm for more details on using recurrent nets in policies
env: environment. Needs to be vectorized for parallel environment simulation.
The environments produced by gym.make can be wrapped using baselines.common.vec_env.DummyVecEnv class.
nsteps: int, number of steps of the vectorized environment per update (i.e. batch size is nsteps * nenv where
nenv is number of environment copies simulated in parallel) (default: 20)
nstack: int, size of the frame stack, i.e. number of the frames passed to the step model. Frames are stacked along channel dimension
(last image dimension) (default: 4)
total_timesteps: int, number of timesteps (i.e. number of actions taken in the environment) (default: 80M)
q_coef: float, value function loss coefficient in the optimization objective (analog of vf_coef for other actor-critic methods)
ent_coef: float, policy entropy coefficient in the optimization objective (default: 0.01)
max_grad_norm: float, gradient norm clipping coefficient. If set to None, no clipping. (default: 10),
lr: float, learning rate for RMSProp (current implementation has RMSProp hardcoded in) (default: 7e-4)
lrschedule: schedule of learning rate. Can be 'linear', 'constant', or a function [0..1] -> [0..1] that takes fraction of the training progress as input and
returns fraction of the learning rate (specified as lr) as output
rprop_epsilon: float, RMSProp epsilon (stabilizes square root computation in denominator of RMSProp update) (default: 1e-5)
rprop_alpha: float, RMSProp decay parameter (default: 0.99)
gamma: float, reward discounting factor (default: 0.99)
log_interval: int, number of updates between logging events (default: 100)
buffer_size: int, size of the replay buffer (default: 50k)
replay_ratio: int, now many (on average) batches of data to sample from the replay buffer take after batch from the environment (default: 4)
replay_start: int, the sampling from the replay buffer does not start until replay buffer has at least that many samples (default: 10k)
c: float, importance weight clipping factor (default: 10)
trust_region bool, whether or not algorithms estimates the gradient KL divergence between the old and updated policy and uses it to determine step size (default: True)
delta: float, max KL divergence between the old policy and updated policy (default: 1)
alpha: float, momentum factor in the Polyak (exponential moving average) averaging of the model parameters (default: 0.99)
load_path: str, path to load the model from (default: None)
**network_kwargs: keyword arguments to the policy / network builder. See baselines.common/policies.py/build_policy and arguments to a particular type of network
For instance, 'mlp' network architecture has arguments num_hidden and num_layers.
'''
print("Running Acer Simple")
print(locals())
set_global_seeds(seed)
if not isinstance(env, VecFrameStack):
env = VecFrameStack(env, 1)
policy = build_policy(env, network, estimate_q=True, **network_kwargs)
nenvs = env.num_envs
ob_space = env.observation_space
ac_space = env.action_space
nstack = env.nstack
model = Model(policy=policy, ob_space=ob_space, ac_space=ac_space, nenvs=nenvs, nsteps=nsteps,
ent_coef=ent_coef, q_coef=q_coef, gamma=gamma,
max_grad_norm=max_grad_norm, lr=lr, rprop_alpha=rprop_alpha, rprop_epsilon=rprop_epsilon,
total_timesteps=total_timesteps, lrschedule=lrschedule, c=c,
trust_region=trust_region, alpha=alpha, delta=delta)
runner = Runner(env=env, model=model, nsteps=nsteps)
if replay_ratio > 0:
buffer = Buffer(env=env, nsteps=nsteps, size=buffer_size)
else:
buffer = None
nbatch = nenvs*nsteps
acer = Acer(runner, model, buffer, log_interval)
acer.tstart = time.time()
for acer.steps in range(0, total_timesteps, nbatch): #nbatch samples, 1 on_policy call and multiple off-policy calls
acer.call(on_policy=True)
if replay_ratio > 0 and buffer.has_atleast(replay_start):
n = np.random.poisson(replay_ratio)
for _ in range(n):
acer.call(on_policy=False) # no simulation steps in this
return model | python | def learn(network, env, seed=None, nsteps=20, total_timesteps=int(80e6), q_coef=0.5, ent_coef=0.01,
max_grad_norm=10, lr=7e-4, lrschedule='linear', rprop_epsilon=1e-5, rprop_alpha=0.99, gamma=0.99,
log_interval=100, buffer_size=50000, replay_ratio=4, replay_start=10000, c=10.0,
trust_region=True, alpha=0.99, delta=1, load_path=None, **network_kwargs):
'''
Main entrypoint for ACER (Actor-Critic with Experience Replay) algorithm (https://arxiv.org/pdf/1611.01224.pdf)
Train an agent with given network architecture on a given environment using ACER.
Parameters:
----------
network: policy network architecture. Either string (mlp, lstm, lnlstm, cnn_lstm, cnn, cnn_small, conv_only - see baselines.common/models.py for full list)
specifying the standard network architecture, or a function that takes tensorflow tensor as input and returns
tuple (output_tensor, extra_feed) where output tensor is the last network layer output, extra_feed is None for feed-forward
neural nets, and extra_feed is a dictionary describing how to feed state into the network for recurrent neural nets.
See baselines.common/policies.py/lstm for more details on using recurrent nets in policies
env: environment. Needs to be vectorized for parallel environment simulation.
The environments produced by gym.make can be wrapped using baselines.common.vec_env.DummyVecEnv class.
nsteps: int, number of steps of the vectorized environment per update (i.e. batch size is nsteps * nenv where
nenv is number of environment copies simulated in parallel) (default: 20)
nstack: int, size of the frame stack, i.e. number of the frames passed to the step model. Frames are stacked along channel dimension
(last image dimension) (default: 4)
total_timesteps: int, number of timesteps (i.e. number of actions taken in the environment) (default: 80M)
q_coef: float, value function loss coefficient in the optimization objective (analog of vf_coef for other actor-critic methods)
ent_coef: float, policy entropy coefficient in the optimization objective (default: 0.01)
max_grad_norm: float, gradient norm clipping coefficient. If set to None, no clipping. (default: 10),
lr: float, learning rate for RMSProp (current implementation has RMSProp hardcoded in) (default: 7e-4)
lrschedule: schedule of learning rate. Can be 'linear', 'constant', or a function [0..1] -> [0..1] that takes fraction of the training progress as input and
returns fraction of the learning rate (specified as lr) as output
rprop_epsilon: float, RMSProp epsilon (stabilizes square root computation in denominator of RMSProp update) (default: 1e-5)
rprop_alpha: float, RMSProp decay parameter (default: 0.99)
gamma: float, reward discounting factor (default: 0.99)
log_interval: int, number of updates between logging events (default: 100)
buffer_size: int, size of the replay buffer (default: 50k)
replay_ratio: int, now many (on average) batches of data to sample from the replay buffer take after batch from the environment (default: 4)
replay_start: int, the sampling from the replay buffer does not start until replay buffer has at least that many samples (default: 10k)
c: float, importance weight clipping factor (default: 10)
trust_region bool, whether or not algorithms estimates the gradient KL divergence between the old and updated policy and uses it to determine step size (default: True)
delta: float, max KL divergence between the old policy and updated policy (default: 1)
alpha: float, momentum factor in the Polyak (exponential moving average) averaging of the model parameters (default: 0.99)
load_path: str, path to load the model from (default: None)
**network_kwargs: keyword arguments to the policy / network builder. See baselines.common/policies.py/build_policy and arguments to a particular type of network
For instance, 'mlp' network architecture has arguments num_hidden and num_layers.
'''
print("Running Acer Simple")
print(locals())
set_global_seeds(seed)
if not isinstance(env, VecFrameStack):
env = VecFrameStack(env, 1)
policy = build_policy(env, network, estimate_q=True, **network_kwargs)
nenvs = env.num_envs
ob_space = env.observation_space
ac_space = env.action_space
nstack = env.nstack
model = Model(policy=policy, ob_space=ob_space, ac_space=ac_space, nenvs=nenvs, nsteps=nsteps,
ent_coef=ent_coef, q_coef=q_coef, gamma=gamma,
max_grad_norm=max_grad_norm, lr=lr, rprop_alpha=rprop_alpha, rprop_epsilon=rprop_epsilon,
total_timesteps=total_timesteps, lrschedule=lrschedule, c=c,
trust_region=trust_region, alpha=alpha, delta=delta)
runner = Runner(env=env, model=model, nsteps=nsteps)
if replay_ratio > 0:
buffer = Buffer(env=env, nsteps=nsteps, size=buffer_size)
else:
buffer = None
nbatch = nenvs*nsteps
acer = Acer(runner, model, buffer, log_interval)
acer.tstart = time.time()
for acer.steps in range(0, total_timesteps, nbatch): #nbatch samples, 1 on_policy call and multiple off-policy calls
acer.call(on_policy=True)
if replay_ratio > 0 and buffer.has_atleast(replay_start):
n = np.random.poisson(replay_ratio)
for _ in range(n):
acer.call(on_policy=False) # no simulation steps in this
return model | [
"def",
"learn",
"(",
"network",
",",
"env",
",",
"seed",
"=",
"None",
",",
"nsteps",
"=",
"20",
",",
"total_timesteps",
"=",
"int",
"(",
"80e6",
")",
",",
"q_coef",
"=",
"0.5",
",",
"ent_coef",
"=",
"0.01",
",",
"max_grad_norm",
"=",
"10",
",",
"lr",
"=",
"7e-4",
",",
"lrschedule",
"=",
"'linear'",
",",
"rprop_epsilon",
"=",
"1e-5",
",",
"rprop_alpha",
"=",
"0.99",
",",
"gamma",
"=",
"0.99",
",",
"log_interval",
"=",
"100",
",",
"buffer_size",
"=",
"50000",
",",
"replay_ratio",
"=",
"4",
",",
"replay_start",
"=",
"10000",
",",
"c",
"=",
"10.0",
",",
"trust_region",
"=",
"True",
",",
"alpha",
"=",
"0.99",
",",
"delta",
"=",
"1",
",",
"load_path",
"=",
"None",
",",
"*",
"*",
"network_kwargs",
")",
":",
"print",
"(",
"\"Running Acer Simple\"",
")",
"print",
"(",
"locals",
"(",
")",
")",
"set_global_seeds",
"(",
"seed",
")",
"if",
"not",
"isinstance",
"(",
"env",
",",
"VecFrameStack",
")",
":",
"env",
"=",
"VecFrameStack",
"(",
"env",
",",
"1",
")",
"policy",
"=",
"build_policy",
"(",
"env",
",",
"network",
",",
"estimate_q",
"=",
"True",
",",
"*",
"*",
"network_kwargs",
")",
"nenvs",
"=",
"env",
".",
"num_envs",
"ob_space",
"=",
"env",
".",
"observation_space",
"ac_space",
"=",
"env",
".",
"action_space",
"nstack",
"=",
"env",
".",
"nstack",
"model",
"=",
"Model",
"(",
"policy",
"=",
"policy",
",",
"ob_space",
"=",
"ob_space",
",",
"ac_space",
"=",
"ac_space",
",",
"nenvs",
"=",
"nenvs",
",",
"nsteps",
"=",
"nsteps",
",",
"ent_coef",
"=",
"ent_coef",
",",
"q_coef",
"=",
"q_coef",
",",
"gamma",
"=",
"gamma",
",",
"max_grad_norm",
"=",
"max_grad_norm",
",",
"lr",
"=",
"lr",
",",
"rprop_alpha",
"=",
"rprop_alpha",
",",
"rprop_epsilon",
"=",
"rprop_epsilon",
",",
"total_timesteps",
"=",
"total_timesteps",
",",
"lrschedule",
"=",
"lrschedule",
",",
"c",
"=",
"c",
",",
"trust_region",
"=",
"trust_region",
",",
"alpha",
"=",
"alpha",
",",
"delta",
"=",
"delta",
")",
"runner",
"=",
"Runner",
"(",
"env",
"=",
"env",
",",
"model",
"=",
"model",
",",
"nsteps",
"=",
"nsteps",
")",
"if",
"replay_ratio",
">",
"0",
":",
"buffer",
"=",
"Buffer",
"(",
"env",
"=",
"env",
",",
"nsteps",
"=",
"nsteps",
",",
"size",
"=",
"buffer_size",
")",
"else",
":",
"buffer",
"=",
"None",
"nbatch",
"=",
"nenvs",
"*",
"nsteps",
"acer",
"=",
"Acer",
"(",
"runner",
",",
"model",
",",
"buffer",
",",
"log_interval",
")",
"acer",
".",
"tstart",
"=",
"time",
".",
"time",
"(",
")",
"for",
"acer",
".",
"steps",
"in",
"range",
"(",
"0",
",",
"total_timesteps",
",",
"nbatch",
")",
":",
"#nbatch samples, 1 on_policy call and multiple off-policy calls",
"acer",
".",
"call",
"(",
"on_policy",
"=",
"True",
")",
"if",
"replay_ratio",
">",
"0",
"and",
"buffer",
".",
"has_atleast",
"(",
"replay_start",
")",
":",
"n",
"=",
"np",
".",
"random",
".",
"poisson",
"(",
"replay_ratio",
")",
"for",
"_",
"in",
"range",
"(",
"n",
")",
":",
"acer",
".",
"call",
"(",
"on_policy",
"=",
"False",
")",
"# no simulation steps in this",
"return",
"model"
] | Main entrypoint for ACER (Actor-Critic with Experience Replay) algorithm (https://arxiv.org/pdf/1611.01224.pdf)
Train an agent with given network architecture on a given environment using ACER.
Parameters:
----------
network: policy network architecture. Either string (mlp, lstm, lnlstm, cnn_lstm, cnn, cnn_small, conv_only - see baselines.common/models.py for full list)
specifying the standard network architecture, or a function that takes tensorflow tensor as input and returns
tuple (output_tensor, extra_feed) where output tensor is the last network layer output, extra_feed is None for feed-forward
neural nets, and extra_feed is a dictionary describing how to feed state into the network for recurrent neural nets.
See baselines.common/policies.py/lstm for more details on using recurrent nets in policies
env: environment. Needs to be vectorized for parallel environment simulation.
The environments produced by gym.make can be wrapped using baselines.common.vec_env.DummyVecEnv class.
nsteps: int, number of steps of the vectorized environment per update (i.e. batch size is nsteps * nenv where
nenv is number of environment copies simulated in parallel) (default: 20)
nstack: int, size of the frame stack, i.e. number of the frames passed to the step model. Frames are stacked along channel dimension
(last image dimension) (default: 4)
total_timesteps: int, number of timesteps (i.e. number of actions taken in the environment) (default: 80M)
q_coef: float, value function loss coefficient in the optimization objective (analog of vf_coef for other actor-critic methods)
ent_coef: float, policy entropy coefficient in the optimization objective (default: 0.01)
max_grad_norm: float, gradient norm clipping coefficient. If set to None, no clipping. (default: 10),
lr: float, learning rate for RMSProp (current implementation has RMSProp hardcoded in) (default: 7e-4)
lrschedule: schedule of learning rate. Can be 'linear', 'constant', or a function [0..1] -> [0..1] that takes fraction of the training progress as input and
returns fraction of the learning rate (specified as lr) as output
rprop_epsilon: float, RMSProp epsilon (stabilizes square root computation in denominator of RMSProp update) (default: 1e-5)
rprop_alpha: float, RMSProp decay parameter (default: 0.99)
gamma: float, reward discounting factor (default: 0.99)
log_interval: int, number of updates between logging events (default: 100)
buffer_size: int, size of the replay buffer (default: 50k)
replay_ratio: int, now many (on average) batches of data to sample from the replay buffer take after batch from the environment (default: 4)
replay_start: int, the sampling from the replay buffer does not start until replay buffer has at least that many samples (default: 10k)
c: float, importance weight clipping factor (default: 10)
trust_region bool, whether or not algorithms estimates the gradient KL divergence between the old and updated policy and uses it to determine step size (default: True)
delta: float, max KL divergence between the old policy and updated policy (default: 1)
alpha: float, momentum factor in the Polyak (exponential moving average) averaging of the model parameters (default: 0.99)
load_path: str, path to load the model from (default: None)
**network_kwargs: keyword arguments to the policy / network builder. See baselines.common/policies.py/build_policy and arguments to a particular type of network
For instance, 'mlp' network architecture has arguments num_hidden and num_layers. | [
"Main",
"entrypoint",
"for",
"ACER",
"(",
"Actor",
"-",
"Critic",
"with",
"Experience",
"Replay",
")",
"algorithm",
"(",
"https",
":",
"//",
"arxiv",
".",
"org",
"/",
"pdf",
"/",
"1611",
".",
"01224",
".",
"pdf",
")",
"Train",
"an",
"agent",
"with",
"given",
"network",
"architecture",
"on",
"a",
"given",
"environment",
"using",
"ACER",
"."
] | 3301089b48c42b87b396e246ea3f56fa4bfc9678 | https://github.com/openai/baselines/blob/3301089b48c42b87b396e246ea3f56fa4bfc9678/baselines/acer/acer.py#L274-L377 | valid |
openai/baselines | baselines/acktr/kfac.py | KfacOptimizer.apply_stats | def apply_stats(self, statsUpdates):
""" compute stats and update/apply the new stats to the running average
"""
def updateAccumStats():
if self._full_stats_init:
return tf.cond(tf.greater(self.sgd_step, self._cold_iter), lambda: tf.group(*self._apply_stats(statsUpdates, accumulate=True, accumulateCoeff=1. / self._stats_accum_iter)), tf.no_op)
else:
return tf.group(*self._apply_stats(statsUpdates, accumulate=True, accumulateCoeff=1. / self._stats_accum_iter))
def updateRunningAvgStats(statsUpdates, fac_iter=1):
# return tf.cond(tf.greater_equal(self.factor_step,
# tf.convert_to_tensor(fac_iter)), lambda:
# tf.group(*self._apply_stats(stats_list, varlist)), tf.no_op)
return tf.group(*self._apply_stats(statsUpdates))
if self._async_stats:
# asynchronous stats update
update_stats = self._apply_stats(statsUpdates)
queue = tf.FIFOQueue(1, [item.dtype for item in update_stats], shapes=[
item.get_shape() for item in update_stats])
enqueue_op = queue.enqueue(update_stats)
def dequeue_stats_op():
return queue.dequeue()
self.qr_stats = tf.train.QueueRunner(queue, [enqueue_op])
update_stats_op = tf.cond(tf.equal(queue.size(), tf.convert_to_tensor(
0)), tf.no_op, lambda: tf.group(*[dequeue_stats_op(), ]))
else:
# synchronous stats update
update_stats_op = tf.cond(tf.greater_equal(
self.stats_step, self._stats_accum_iter), lambda: updateRunningAvgStats(statsUpdates), updateAccumStats)
self._update_stats_op = update_stats_op
return update_stats_op | python | def apply_stats(self, statsUpdates):
""" compute stats and update/apply the new stats to the running average
"""
def updateAccumStats():
if self._full_stats_init:
return tf.cond(tf.greater(self.sgd_step, self._cold_iter), lambda: tf.group(*self._apply_stats(statsUpdates, accumulate=True, accumulateCoeff=1. / self._stats_accum_iter)), tf.no_op)
else:
return tf.group(*self._apply_stats(statsUpdates, accumulate=True, accumulateCoeff=1. / self._stats_accum_iter))
def updateRunningAvgStats(statsUpdates, fac_iter=1):
# return tf.cond(tf.greater_equal(self.factor_step,
# tf.convert_to_tensor(fac_iter)), lambda:
# tf.group(*self._apply_stats(stats_list, varlist)), tf.no_op)
return tf.group(*self._apply_stats(statsUpdates))
if self._async_stats:
# asynchronous stats update
update_stats = self._apply_stats(statsUpdates)
queue = tf.FIFOQueue(1, [item.dtype for item in update_stats], shapes=[
item.get_shape() for item in update_stats])
enqueue_op = queue.enqueue(update_stats)
def dequeue_stats_op():
return queue.dequeue()
self.qr_stats = tf.train.QueueRunner(queue, [enqueue_op])
update_stats_op = tf.cond(tf.equal(queue.size(), tf.convert_to_tensor(
0)), tf.no_op, lambda: tf.group(*[dequeue_stats_op(), ]))
else:
# synchronous stats update
update_stats_op = tf.cond(tf.greater_equal(
self.stats_step, self._stats_accum_iter), lambda: updateRunningAvgStats(statsUpdates), updateAccumStats)
self._update_stats_op = update_stats_op
return update_stats_op | [
"def",
"apply_stats",
"(",
"self",
",",
"statsUpdates",
")",
":",
"def",
"updateAccumStats",
"(",
")",
":",
"if",
"self",
".",
"_full_stats_init",
":",
"return",
"tf",
".",
"cond",
"(",
"tf",
".",
"greater",
"(",
"self",
".",
"sgd_step",
",",
"self",
".",
"_cold_iter",
")",
",",
"lambda",
":",
"tf",
".",
"group",
"(",
"*",
"self",
".",
"_apply_stats",
"(",
"statsUpdates",
",",
"accumulate",
"=",
"True",
",",
"accumulateCoeff",
"=",
"1.",
"/",
"self",
".",
"_stats_accum_iter",
")",
")",
",",
"tf",
".",
"no_op",
")",
"else",
":",
"return",
"tf",
".",
"group",
"(",
"*",
"self",
".",
"_apply_stats",
"(",
"statsUpdates",
",",
"accumulate",
"=",
"True",
",",
"accumulateCoeff",
"=",
"1.",
"/",
"self",
".",
"_stats_accum_iter",
")",
")",
"def",
"updateRunningAvgStats",
"(",
"statsUpdates",
",",
"fac_iter",
"=",
"1",
")",
":",
"# return tf.cond(tf.greater_equal(self.factor_step,",
"# tf.convert_to_tensor(fac_iter)), lambda:",
"# tf.group(*self._apply_stats(stats_list, varlist)), tf.no_op)",
"return",
"tf",
".",
"group",
"(",
"*",
"self",
".",
"_apply_stats",
"(",
"statsUpdates",
")",
")",
"if",
"self",
".",
"_async_stats",
":",
"# asynchronous stats update",
"update_stats",
"=",
"self",
".",
"_apply_stats",
"(",
"statsUpdates",
")",
"queue",
"=",
"tf",
".",
"FIFOQueue",
"(",
"1",
",",
"[",
"item",
".",
"dtype",
"for",
"item",
"in",
"update_stats",
"]",
",",
"shapes",
"=",
"[",
"item",
".",
"get_shape",
"(",
")",
"for",
"item",
"in",
"update_stats",
"]",
")",
"enqueue_op",
"=",
"queue",
".",
"enqueue",
"(",
"update_stats",
")",
"def",
"dequeue_stats_op",
"(",
")",
":",
"return",
"queue",
".",
"dequeue",
"(",
")",
"self",
".",
"qr_stats",
"=",
"tf",
".",
"train",
".",
"QueueRunner",
"(",
"queue",
",",
"[",
"enqueue_op",
"]",
")",
"update_stats_op",
"=",
"tf",
".",
"cond",
"(",
"tf",
".",
"equal",
"(",
"queue",
".",
"size",
"(",
")",
",",
"tf",
".",
"convert_to_tensor",
"(",
"0",
")",
")",
",",
"tf",
".",
"no_op",
",",
"lambda",
":",
"tf",
".",
"group",
"(",
"*",
"[",
"dequeue_stats_op",
"(",
")",
",",
"]",
")",
")",
"else",
":",
"# synchronous stats update",
"update_stats_op",
"=",
"tf",
".",
"cond",
"(",
"tf",
".",
"greater_equal",
"(",
"self",
".",
"stats_step",
",",
"self",
".",
"_stats_accum_iter",
")",
",",
"lambda",
":",
"updateRunningAvgStats",
"(",
"statsUpdates",
")",
",",
"updateAccumStats",
")",
"self",
".",
"_update_stats_op",
"=",
"update_stats_op",
"return",
"update_stats_op"
] | compute stats and update/apply the new stats to the running average | [
"compute",
"stats",
"and",
"update",
"/",
"apply",
"the",
"new",
"stats",
"to",
"the",
"running",
"average"
] | 3301089b48c42b87b396e246ea3f56fa4bfc9678 | https://github.com/openai/baselines/blob/3301089b48c42b87b396e246ea3f56fa4bfc9678/baselines/acktr/kfac.py#L440-L474 | valid |
openai/baselines | baselines/common/tile_images.py | tile_images | def tile_images(img_nhwc):
"""
Tile N images into one big PxQ image
(P,Q) are chosen to be as close as possible, and if N
is square, then P=Q.
input: img_nhwc, list or array of images, ndim=4 once turned into array
n = batch index, h = height, w = width, c = channel
returns:
bigim_HWc, ndarray with ndim=3
"""
img_nhwc = np.asarray(img_nhwc)
N, h, w, c = img_nhwc.shape
H = int(np.ceil(np.sqrt(N)))
W = int(np.ceil(float(N)/H))
img_nhwc = np.array(list(img_nhwc) + [img_nhwc[0]*0 for _ in range(N, H*W)])
img_HWhwc = img_nhwc.reshape(H, W, h, w, c)
img_HhWwc = img_HWhwc.transpose(0, 2, 1, 3, 4)
img_Hh_Ww_c = img_HhWwc.reshape(H*h, W*w, c)
return img_Hh_Ww_c | python | def tile_images(img_nhwc):
"""
Tile N images into one big PxQ image
(P,Q) are chosen to be as close as possible, and if N
is square, then P=Q.
input: img_nhwc, list or array of images, ndim=4 once turned into array
n = batch index, h = height, w = width, c = channel
returns:
bigim_HWc, ndarray with ndim=3
"""
img_nhwc = np.asarray(img_nhwc)
N, h, w, c = img_nhwc.shape
H = int(np.ceil(np.sqrt(N)))
W = int(np.ceil(float(N)/H))
img_nhwc = np.array(list(img_nhwc) + [img_nhwc[0]*0 for _ in range(N, H*W)])
img_HWhwc = img_nhwc.reshape(H, W, h, w, c)
img_HhWwc = img_HWhwc.transpose(0, 2, 1, 3, 4)
img_Hh_Ww_c = img_HhWwc.reshape(H*h, W*w, c)
return img_Hh_Ww_c | [
"def",
"tile_images",
"(",
"img_nhwc",
")",
":",
"img_nhwc",
"=",
"np",
".",
"asarray",
"(",
"img_nhwc",
")",
"N",
",",
"h",
",",
"w",
",",
"c",
"=",
"img_nhwc",
".",
"shape",
"H",
"=",
"int",
"(",
"np",
".",
"ceil",
"(",
"np",
".",
"sqrt",
"(",
"N",
")",
")",
")",
"W",
"=",
"int",
"(",
"np",
".",
"ceil",
"(",
"float",
"(",
"N",
")",
"/",
"H",
")",
")",
"img_nhwc",
"=",
"np",
".",
"array",
"(",
"list",
"(",
"img_nhwc",
")",
"+",
"[",
"img_nhwc",
"[",
"0",
"]",
"*",
"0",
"for",
"_",
"in",
"range",
"(",
"N",
",",
"H",
"*",
"W",
")",
"]",
")",
"img_HWhwc",
"=",
"img_nhwc",
".",
"reshape",
"(",
"H",
",",
"W",
",",
"h",
",",
"w",
",",
"c",
")",
"img_HhWwc",
"=",
"img_HWhwc",
".",
"transpose",
"(",
"0",
",",
"2",
",",
"1",
",",
"3",
",",
"4",
")",
"img_Hh_Ww_c",
"=",
"img_HhWwc",
".",
"reshape",
"(",
"H",
"*",
"h",
",",
"W",
"*",
"w",
",",
"c",
")",
"return",
"img_Hh_Ww_c"
] | Tile N images into one big PxQ image
(P,Q) are chosen to be as close as possible, and if N
is square, then P=Q.
input: img_nhwc, list or array of images, ndim=4 once turned into array
n = batch index, h = height, w = width, c = channel
returns:
bigim_HWc, ndarray with ndim=3 | [
"Tile",
"N",
"images",
"into",
"one",
"big",
"PxQ",
"image",
"(",
"P",
"Q",
")",
"are",
"chosen",
"to",
"be",
"as",
"close",
"as",
"possible",
"and",
"if",
"N",
"is",
"square",
"then",
"P",
"=",
"Q",
"."
] | 3301089b48c42b87b396e246ea3f56fa4bfc9678 | https://github.com/openai/baselines/blob/3301089b48c42b87b396e246ea3f56fa4bfc9678/baselines/common/tile_images.py#L3-L22 | valid |
openai/baselines | baselines/common/segment_tree.py | SumSegmentTree.sum | def sum(self, start=0, end=None):
"""Returns arr[start] + ... + arr[end]"""
return super(SumSegmentTree, self).reduce(start, end) | python | def sum(self, start=0, end=None):
"""Returns arr[start] + ... + arr[end]"""
return super(SumSegmentTree, self).reduce(start, end) | [
"def",
"sum",
"(",
"self",
",",
"start",
"=",
"0",
",",
"end",
"=",
"None",
")",
":",
"return",
"super",
"(",
"SumSegmentTree",
",",
"self",
")",
".",
"reduce",
"(",
"start",
",",
"end",
")"
] | Returns arr[start] + ... + arr[end] | [
"Returns",
"arr",
"[",
"start",
"]",
"+",
"...",
"+",
"arr",
"[",
"end",
"]"
] | 3301089b48c42b87b396e246ea3f56fa4bfc9678 | https://github.com/openai/baselines/blob/3301089b48c42b87b396e246ea3f56fa4bfc9678/baselines/common/segment_tree.py#L101-L103 | valid |
openai/baselines | baselines/common/segment_tree.py | SumSegmentTree.find_prefixsum_idx | def find_prefixsum_idx(self, prefixsum):
"""Find the highest index `i` in the array such that
sum(arr[0] + arr[1] + ... + arr[i - i]) <= prefixsum
if array values are probabilities, this function
allows to sample indexes according to the discrete
probability efficiently.
Parameters
----------
perfixsum: float
upperbound on the sum of array prefix
Returns
-------
idx: int
highest index satisfying the prefixsum constraint
"""
assert 0 <= prefixsum <= self.sum() + 1e-5
idx = 1
while idx < self._capacity: # while non-leaf
if self._value[2 * idx] > prefixsum:
idx = 2 * idx
else:
prefixsum -= self._value[2 * idx]
idx = 2 * idx + 1
return idx - self._capacity | python | def find_prefixsum_idx(self, prefixsum):
"""Find the highest index `i` in the array such that
sum(arr[0] + arr[1] + ... + arr[i - i]) <= prefixsum
if array values are probabilities, this function
allows to sample indexes according to the discrete
probability efficiently.
Parameters
----------
perfixsum: float
upperbound on the sum of array prefix
Returns
-------
idx: int
highest index satisfying the prefixsum constraint
"""
assert 0 <= prefixsum <= self.sum() + 1e-5
idx = 1
while idx < self._capacity: # while non-leaf
if self._value[2 * idx] > prefixsum:
idx = 2 * idx
else:
prefixsum -= self._value[2 * idx]
idx = 2 * idx + 1
return idx - self._capacity | [
"def",
"find_prefixsum_idx",
"(",
"self",
",",
"prefixsum",
")",
":",
"assert",
"0",
"<=",
"prefixsum",
"<=",
"self",
".",
"sum",
"(",
")",
"+",
"1e-5",
"idx",
"=",
"1",
"while",
"idx",
"<",
"self",
".",
"_capacity",
":",
"# while non-leaf",
"if",
"self",
".",
"_value",
"[",
"2",
"*",
"idx",
"]",
">",
"prefixsum",
":",
"idx",
"=",
"2",
"*",
"idx",
"else",
":",
"prefixsum",
"-=",
"self",
".",
"_value",
"[",
"2",
"*",
"idx",
"]",
"idx",
"=",
"2",
"*",
"idx",
"+",
"1",
"return",
"idx",
"-",
"self",
".",
"_capacity"
] | Find the highest index `i` in the array such that
sum(arr[0] + arr[1] + ... + arr[i - i]) <= prefixsum
if array values are probabilities, this function
allows to sample indexes according to the discrete
probability efficiently.
Parameters
----------
perfixsum: float
upperbound on the sum of array prefix
Returns
-------
idx: int
highest index satisfying the prefixsum constraint | [
"Find",
"the",
"highest",
"index",
"i",
"in",
"the",
"array",
"such",
"that",
"sum",
"(",
"arr",
"[",
"0",
"]",
"+",
"arr",
"[",
"1",
"]",
"+",
"...",
"+",
"arr",
"[",
"i",
"-",
"i",
"]",
")",
"<",
"=",
"prefixsum"
] | 3301089b48c42b87b396e246ea3f56fa4bfc9678 | https://github.com/openai/baselines/blob/3301089b48c42b87b396e246ea3f56fa4bfc9678/baselines/common/segment_tree.py#L105-L131 | valid |
openai/baselines | baselines/common/segment_tree.py | MinSegmentTree.min | def min(self, start=0, end=None):
"""Returns min(arr[start], ..., arr[end])"""
return super(MinSegmentTree, self).reduce(start, end) | python | def min(self, start=0, end=None):
"""Returns min(arr[start], ..., arr[end])"""
return super(MinSegmentTree, self).reduce(start, end) | [
"def",
"min",
"(",
"self",
",",
"start",
"=",
"0",
",",
"end",
"=",
"None",
")",
":",
"return",
"super",
"(",
"MinSegmentTree",
",",
"self",
")",
".",
"reduce",
"(",
"start",
",",
"end",
")"
] | Returns min(arr[start], ..., arr[end]) | [
"Returns",
"min",
"(",
"arr",
"[",
"start",
"]",
"...",
"arr",
"[",
"end",
"]",
")"
] | 3301089b48c42b87b396e246ea3f56fa4bfc9678 | https://github.com/openai/baselines/blob/3301089b48c42b87b396e246ea3f56fa4bfc9678/baselines/common/segment_tree.py#L142-L145 | valid |
openai/baselines | baselines/common/schedules.py | PiecewiseSchedule.value | def value(self, t):
"""See Schedule.value"""
for (l_t, l), (r_t, r) in zip(self._endpoints[:-1], self._endpoints[1:]):
if l_t <= t and t < r_t:
alpha = float(t - l_t) / (r_t - l_t)
return self._interpolation(l, r, alpha)
# t does not belong to any of the pieces, so doom.
assert self._outside_value is not None
return self._outside_value | python | def value(self, t):
"""See Schedule.value"""
for (l_t, l), (r_t, r) in zip(self._endpoints[:-1], self._endpoints[1:]):
if l_t <= t and t < r_t:
alpha = float(t - l_t) / (r_t - l_t)
return self._interpolation(l, r, alpha)
# t does not belong to any of the pieces, so doom.
assert self._outside_value is not None
return self._outside_value | [
"def",
"value",
"(",
"self",
",",
"t",
")",
":",
"for",
"(",
"l_t",
",",
"l",
")",
",",
"(",
"r_t",
",",
"r",
")",
"in",
"zip",
"(",
"self",
".",
"_endpoints",
"[",
":",
"-",
"1",
"]",
",",
"self",
".",
"_endpoints",
"[",
"1",
":",
"]",
")",
":",
"if",
"l_t",
"<=",
"t",
"and",
"t",
"<",
"r_t",
":",
"alpha",
"=",
"float",
"(",
"t",
"-",
"l_t",
")",
"/",
"(",
"r_t",
"-",
"l_t",
")",
"return",
"self",
".",
"_interpolation",
"(",
"l",
",",
"r",
",",
"alpha",
")",
"# t does not belong to any of the pieces, so doom.",
"assert",
"self",
".",
"_outside_value",
"is",
"not",
"None",
"return",
"self",
".",
"_outside_value"
] | See Schedule.value | [
"See",
"Schedule",
".",
"value"
] | 3301089b48c42b87b396e246ea3f56fa4bfc9678 | https://github.com/openai/baselines/blob/3301089b48c42b87b396e246ea3f56fa4bfc9678/baselines/common/schedules.py#L64-L73 | valid |
openai/baselines | baselines/common/vec_env/shmem_vec_env.py | _subproc_worker | def _subproc_worker(pipe, parent_pipe, env_fn_wrapper, obs_bufs, obs_shapes, obs_dtypes, keys):
"""
Control a single environment instance using IPC and
shared memory.
"""
def _write_obs(maybe_dict_obs):
flatdict = obs_to_dict(maybe_dict_obs)
for k in keys:
dst = obs_bufs[k].get_obj()
dst_np = np.frombuffer(dst, dtype=obs_dtypes[k]).reshape(obs_shapes[k]) # pylint: disable=W0212
np.copyto(dst_np, flatdict[k])
env = env_fn_wrapper.x()
parent_pipe.close()
try:
while True:
cmd, data = pipe.recv()
if cmd == 'reset':
pipe.send(_write_obs(env.reset()))
elif cmd == 'step':
obs, reward, done, info = env.step(data)
if done:
obs = env.reset()
pipe.send((_write_obs(obs), reward, done, info))
elif cmd == 'render':
pipe.send(env.render(mode='rgb_array'))
elif cmd == 'close':
pipe.send(None)
break
else:
raise RuntimeError('Got unrecognized cmd %s' % cmd)
except KeyboardInterrupt:
print('ShmemVecEnv worker: got KeyboardInterrupt')
finally:
env.close() | python | def _subproc_worker(pipe, parent_pipe, env_fn_wrapper, obs_bufs, obs_shapes, obs_dtypes, keys):
"""
Control a single environment instance using IPC and
shared memory.
"""
def _write_obs(maybe_dict_obs):
flatdict = obs_to_dict(maybe_dict_obs)
for k in keys:
dst = obs_bufs[k].get_obj()
dst_np = np.frombuffer(dst, dtype=obs_dtypes[k]).reshape(obs_shapes[k]) # pylint: disable=W0212
np.copyto(dst_np, flatdict[k])
env = env_fn_wrapper.x()
parent_pipe.close()
try:
while True:
cmd, data = pipe.recv()
if cmd == 'reset':
pipe.send(_write_obs(env.reset()))
elif cmd == 'step':
obs, reward, done, info = env.step(data)
if done:
obs = env.reset()
pipe.send((_write_obs(obs), reward, done, info))
elif cmd == 'render':
pipe.send(env.render(mode='rgb_array'))
elif cmd == 'close':
pipe.send(None)
break
else:
raise RuntimeError('Got unrecognized cmd %s' % cmd)
except KeyboardInterrupt:
print('ShmemVecEnv worker: got KeyboardInterrupt')
finally:
env.close() | [
"def",
"_subproc_worker",
"(",
"pipe",
",",
"parent_pipe",
",",
"env_fn_wrapper",
",",
"obs_bufs",
",",
"obs_shapes",
",",
"obs_dtypes",
",",
"keys",
")",
":",
"def",
"_write_obs",
"(",
"maybe_dict_obs",
")",
":",
"flatdict",
"=",
"obs_to_dict",
"(",
"maybe_dict_obs",
")",
"for",
"k",
"in",
"keys",
":",
"dst",
"=",
"obs_bufs",
"[",
"k",
"]",
".",
"get_obj",
"(",
")",
"dst_np",
"=",
"np",
".",
"frombuffer",
"(",
"dst",
",",
"dtype",
"=",
"obs_dtypes",
"[",
"k",
"]",
")",
".",
"reshape",
"(",
"obs_shapes",
"[",
"k",
"]",
")",
"# pylint: disable=W0212",
"np",
".",
"copyto",
"(",
"dst_np",
",",
"flatdict",
"[",
"k",
"]",
")",
"env",
"=",
"env_fn_wrapper",
".",
"x",
"(",
")",
"parent_pipe",
".",
"close",
"(",
")",
"try",
":",
"while",
"True",
":",
"cmd",
",",
"data",
"=",
"pipe",
".",
"recv",
"(",
")",
"if",
"cmd",
"==",
"'reset'",
":",
"pipe",
".",
"send",
"(",
"_write_obs",
"(",
"env",
".",
"reset",
"(",
")",
")",
")",
"elif",
"cmd",
"==",
"'step'",
":",
"obs",
",",
"reward",
",",
"done",
",",
"info",
"=",
"env",
".",
"step",
"(",
"data",
")",
"if",
"done",
":",
"obs",
"=",
"env",
".",
"reset",
"(",
")",
"pipe",
".",
"send",
"(",
"(",
"_write_obs",
"(",
"obs",
")",
",",
"reward",
",",
"done",
",",
"info",
")",
")",
"elif",
"cmd",
"==",
"'render'",
":",
"pipe",
".",
"send",
"(",
"env",
".",
"render",
"(",
"mode",
"=",
"'rgb_array'",
")",
")",
"elif",
"cmd",
"==",
"'close'",
":",
"pipe",
".",
"send",
"(",
"None",
")",
"break",
"else",
":",
"raise",
"RuntimeError",
"(",
"'Got unrecognized cmd %s'",
"%",
"cmd",
")",
"except",
"KeyboardInterrupt",
":",
"print",
"(",
"'ShmemVecEnv worker: got KeyboardInterrupt'",
")",
"finally",
":",
"env",
".",
"close",
"(",
")"
] | Control a single environment instance using IPC and
shared memory. | [
"Control",
"a",
"single",
"environment",
"instance",
"using",
"IPC",
"and",
"shared",
"memory",
"."
] | 3301089b48c42b87b396e246ea3f56fa4bfc9678 | https://github.com/openai/baselines/blob/3301089b48c42b87b396e246ea3f56fa4bfc9678/baselines/common/vec_env/shmem_vec_env.py#L105-L139 | valid |
openai/baselines | baselines/a2c/a2c.py | learn | def learn(
network,
env,
seed=None,
nsteps=5,
total_timesteps=int(80e6),
vf_coef=0.5,
ent_coef=0.01,
max_grad_norm=0.5,
lr=7e-4,
lrschedule='linear',
epsilon=1e-5,
alpha=0.99,
gamma=0.99,
log_interval=100,
load_path=None,
**network_kwargs):
'''
Main entrypoint for A2C algorithm. Train a policy with given network architecture on a given environment using a2c algorithm.
Parameters:
-----------
network: policy network architecture. Either string (mlp, lstm, lnlstm, cnn_lstm, cnn, cnn_small, conv_only - see baselines.common/models.py for full list)
specifying the standard network architecture, or a function that takes tensorflow tensor as input and returns
tuple (output_tensor, extra_feed) where output tensor is the last network layer output, extra_feed is None for feed-forward
neural nets, and extra_feed is a dictionary describing how to feed state into the network for recurrent neural nets.
See baselines.common/policies.py/lstm for more details on using recurrent nets in policies
env: RL environment. Should implement interface similar to VecEnv (baselines.common/vec_env) or be wrapped with DummyVecEnv (baselines.common/vec_env/dummy_vec_env.py)
seed: seed to make random number sequence in the alorightm reproducible. By default is None which means seed from system noise generator (not reproducible)
nsteps: int, number of steps of the vectorized environment per update (i.e. batch size is nsteps * nenv where
nenv is number of environment copies simulated in parallel)
total_timesteps: int, total number of timesteps to train on (default: 80M)
vf_coef: float, coefficient in front of value function loss in the total loss function (default: 0.5)
ent_coef: float, coeffictiant in front of the policy entropy in the total loss function (default: 0.01)
max_gradient_norm: float, gradient is clipped to have global L2 norm no more than this value (default: 0.5)
lr: float, learning rate for RMSProp (current implementation has RMSProp hardcoded in) (default: 7e-4)
lrschedule: schedule of learning rate. Can be 'linear', 'constant', or a function [0..1] -> [0..1] that takes fraction of the training progress as input and
returns fraction of the learning rate (specified as lr) as output
epsilon: float, RMSProp epsilon (stabilizes square root computation in denominator of RMSProp update) (default: 1e-5)
alpha: float, RMSProp decay parameter (default: 0.99)
gamma: float, reward discounting parameter (default: 0.99)
log_interval: int, specifies how frequently the logs are printed out (default: 100)
**network_kwargs: keyword arguments to the policy / network builder. See baselines.common/policies.py/build_policy and arguments to a particular type of network
For instance, 'mlp' network architecture has arguments num_hidden and num_layers.
'''
set_global_seeds(seed)
# Get the nb of env
nenvs = env.num_envs
policy = build_policy(env, network, **network_kwargs)
# Instantiate the model object (that creates step_model and train_model)
model = Model(policy=policy, env=env, nsteps=nsteps, ent_coef=ent_coef, vf_coef=vf_coef,
max_grad_norm=max_grad_norm, lr=lr, alpha=alpha, epsilon=epsilon, total_timesteps=total_timesteps, lrschedule=lrschedule)
if load_path is not None:
model.load(load_path)
# Instantiate the runner object
runner = Runner(env, model, nsteps=nsteps, gamma=gamma)
epinfobuf = deque(maxlen=100)
# Calculate the batch_size
nbatch = nenvs*nsteps
# Start total timer
tstart = time.time()
for update in range(1, total_timesteps//nbatch+1):
# Get mini batch of experiences
obs, states, rewards, masks, actions, values, epinfos = runner.run()
epinfobuf.extend(epinfos)
policy_loss, value_loss, policy_entropy = model.train(obs, states, rewards, masks, actions, values)
nseconds = time.time()-tstart
# Calculate the fps (frame per second)
fps = int((update*nbatch)/nseconds)
if update % log_interval == 0 or update == 1:
# Calculates if value function is a good predicator of the returns (ev > 1)
# or if it's just worse than predicting nothing (ev =< 0)
ev = explained_variance(values, rewards)
logger.record_tabular("nupdates", update)
logger.record_tabular("total_timesteps", update*nbatch)
logger.record_tabular("fps", fps)
logger.record_tabular("policy_entropy", float(policy_entropy))
logger.record_tabular("value_loss", float(value_loss))
logger.record_tabular("explained_variance", float(ev))
logger.record_tabular("eprewmean", safemean([epinfo['r'] for epinfo in epinfobuf]))
logger.record_tabular("eplenmean", safemean([epinfo['l'] for epinfo in epinfobuf]))
logger.dump_tabular()
return model | python | def learn(
network,
env,
seed=None,
nsteps=5,
total_timesteps=int(80e6),
vf_coef=0.5,
ent_coef=0.01,
max_grad_norm=0.5,
lr=7e-4,
lrschedule='linear',
epsilon=1e-5,
alpha=0.99,
gamma=0.99,
log_interval=100,
load_path=None,
**network_kwargs):
'''
Main entrypoint for A2C algorithm. Train a policy with given network architecture on a given environment using a2c algorithm.
Parameters:
-----------
network: policy network architecture. Either string (mlp, lstm, lnlstm, cnn_lstm, cnn, cnn_small, conv_only - see baselines.common/models.py for full list)
specifying the standard network architecture, or a function that takes tensorflow tensor as input and returns
tuple (output_tensor, extra_feed) where output tensor is the last network layer output, extra_feed is None for feed-forward
neural nets, and extra_feed is a dictionary describing how to feed state into the network for recurrent neural nets.
See baselines.common/policies.py/lstm for more details on using recurrent nets in policies
env: RL environment. Should implement interface similar to VecEnv (baselines.common/vec_env) or be wrapped with DummyVecEnv (baselines.common/vec_env/dummy_vec_env.py)
seed: seed to make random number sequence in the alorightm reproducible. By default is None which means seed from system noise generator (not reproducible)
nsteps: int, number of steps of the vectorized environment per update (i.e. batch size is nsteps * nenv where
nenv is number of environment copies simulated in parallel)
total_timesteps: int, total number of timesteps to train on (default: 80M)
vf_coef: float, coefficient in front of value function loss in the total loss function (default: 0.5)
ent_coef: float, coeffictiant in front of the policy entropy in the total loss function (default: 0.01)
max_gradient_norm: float, gradient is clipped to have global L2 norm no more than this value (default: 0.5)
lr: float, learning rate for RMSProp (current implementation has RMSProp hardcoded in) (default: 7e-4)
lrschedule: schedule of learning rate. Can be 'linear', 'constant', or a function [0..1] -> [0..1] that takes fraction of the training progress as input and
returns fraction of the learning rate (specified as lr) as output
epsilon: float, RMSProp epsilon (stabilizes square root computation in denominator of RMSProp update) (default: 1e-5)
alpha: float, RMSProp decay parameter (default: 0.99)
gamma: float, reward discounting parameter (default: 0.99)
log_interval: int, specifies how frequently the logs are printed out (default: 100)
**network_kwargs: keyword arguments to the policy / network builder. See baselines.common/policies.py/build_policy and arguments to a particular type of network
For instance, 'mlp' network architecture has arguments num_hidden and num_layers.
'''
set_global_seeds(seed)
# Get the nb of env
nenvs = env.num_envs
policy = build_policy(env, network, **network_kwargs)
# Instantiate the model object (that creates step_model and train_model)
model = Model(policy=policy, env=env, nsteps=nsteps, ent_coef=ent_coef, vf_coef=vf_coef,
max_grad_norm=max_grad_norm, lr=lr, alpha=alpha, epsilon=epsilon, total_timesteps=total_timesteps, lrschedule=lrschedule)
if load_path is not None:
model.load(load_path)
# Instantiate the runner object
runner = Runner(env, model, nsteps=nsteps, gamma=gamma)
epinfobuf = deque(maxlen=100)
# Calculate the batch_size
nbatch = nenvs*nsteps
# Start total timer
tstart = time.time()
for update in range(1, total_timesteps//nbatch+1):
# Get mini batch of experiences
obs, states, rewards, masks, actions, values, epinfos = runner.run()
epinfobuf.extend(epinfos)
policy_loss, value_loss, policy_entropy = model.train(obs, states, rewards, masks, actions, values)
nseconds = time.time()-tstart
# Calculate the fps (frame per second)
fps = int((update*nbatch)/nseconds)
if update % log_interval == 0 or update == 1:
# Calculates if value function is a good predicator of the returns (ev > 1)
# or if it's just worse than predicting nothing (ev =< 0)
ev = explained_variance(values, rewards)
logger.record_tabular("nupdates", update)
logger.record_tabular("total_timesteps", update*nbatch)
logger.record_tabular("fps", fps)
logger.record_tabular("policy_entropy", float(policy_entropy))
logger.record_tabular("value_loss", float(value_loss))
logger.record_tabular("explained_variance", float(ev))
logger.record_tabular("eprewmean", safemean([epinfo['r'] for epinfo in epinfobuf]))
logger.record_tabular("eplenmean", safemean([epinfo['l'] for epinfo in epinfobuf]))
logger.dump_tabular()
return model | [
"def",
"learn",
"(",
"network",
",",
"env",
",",
"seed",
"=",
"None",
",",
"nsteps",
"=",
"5",
",",
"total_timesteps",
"=",
"int",
"(",
"80e6",
")",
",",
"vf_coef",
"=",
"0.5",
",",
"ent_coef",
"=",
"0.01",
",",
"max_grad_norm",
"=",
"0.5",
",",
"lr",
"=",
"7e-4",
",",
"lrschedule",
"=",
"'linear'",
",",
"epsilon",
"=",
"1e-5",
",",
"alpha",
"=",
"0.99",
",",
"gamma",
"=",
"0.99",
",",
"log_interval",
"=",
"100",
",",
"load_path",
"=",
"None",
",",
"*",
"*",
"network_kwargs",
")",
":",
"set_global_seeds",
"(",
"seed",
")",
"# Get the nb of env",
"nenvs",
"=",
"env",
".",
"num_envs",
"policy",
"=",
"build_policy",
"(",
"env",
",",
"network",
",",
"*",
"*",
"network_kwargs",
")",
"# Instantiate the model object (that creates step_model and train_model)",
"model",
"=",
"Model",
"(",
"policy",
"=",
"policy",
",",
"env",
"=",
"env",
",",
"nsteps",
"=",
"nsteps",
",",
"ent_coef",
"=",
"ent_coef",
",",
"vf_coef",
"=",
"vf_coef",
",",
"max_grad_norm",
"=",
"max_grad_norm",
",",
"lr",
"=",
"lr",
",",
"alpha",
"=",
"alpha",
",",
"epsilon",
"=",
"epsilon",
",",
"total_timesteps",
"=",
"total_timesteps",
",",
"lrschedule",
"=",
"lrschedule",
")",
"if",
"load_path",
"is",
"not",
"None",
":",
"model",
".",
"load",
"(",
"load_path",
")",
"# Instantiate the runner object",
"runner",
"=",
"Runner",
"(",
"env",
",",
"model",
",",
"nsteps",
"=",
"nsteps",
",",
"gamma",
"=",
"gamma",
")",
"epinfobuf",
"=",
"deque",
"(",
"maxlen",
"=",
"100",
")",
"# Calculate the batch_size",
"nbatch",
"=",
"nenvs",
"*",
"nsteps",
"# Start total timer",
"tstart",
"=",
"time",
".",
"time",
"(",
")",
"for",
"update",
"in",
"range",
"(",
"1",
",",
"total_timesteps",
"//",
"nbatch",
"+",
"1",
")",
":",
"# Get mini batch of experiences",
"obs",
",",
"states",
",",
"rewards",
",",
"masks",
",",
"actions",
",",
"values",
",",
"epinfos",
"=",
"runner",
".",
"run",
"(",
")",
"epinfobuf",
".",
"extend",
"(",
"epinfos",
")",
"policy_loss",
",",
"value_loss",
",",
"policy_entropy",
"=",
"model",
".",
"train",
"(",
"obs",
",",
"states",
",",
"rewards",
",",
"masks",
",",
"actions",
",",
"values",
")",
"nseconds",
"=",
"time",
".",
"time",
"(",
")",
"-",
"tstart",
"# Calculate the fps (frame per second)",
"fps",
"=",
"int",
"(",
"(",
"update",
"*",
"nbatch",
")",
"/",
"nseconds",
")",
"if",
"update",
"%",
"log_interval",
"==",
"0",
"or",
"update",
"==",
"1",
":",
"# Calculates if value function is a good predicator of the returns (ev > 1)",
"# or if it's just worse than predicting nothing (ev =< 0)",
"ev",
"=",
"explained_variance",
"(",
"values",
",",
"rewards",
")",
"logger",
".",
"record_tabular",
"(",
"\"nupdates\"",
",",
"update",
")",
"logger",
".",
"record_tabular",
"(",
"\"total_timesteps\"",
",",
"update",
"*",
"nbatch",
")",
"logger",
".",
"record_tabular",
"(",
"\"fps\"",
",",
"fps",
")",
"logger",
".",
"record_tabular",
"(",
"\"policy_entropy\"",
",",
"float",
"(",
"policy_entropy",
")",
")",
"logger",
".",
"record_tabular",
"(",
"\"value_loss\"",
",",
"float",
"(",
"value_loss",
")",
")",
"logger",
".",
"record_tabular",
"(",
"\"explained_variance\"",
",",
"float",
"(",
"ev",
")",
")",
"logger",
".",
"record_tabular",
"(",
"\"eprewmean\"",
",",
"safemean",
"(",
"[",
"epinfo",
"[",
"'r'",
"]",
"for",
"epinfo",
"in",
"epinfobuf",
"]",
")",
")",
"logger",
".",
"record_tabular",
"(",
"\"eplenmean\"",
",",
"safemean",
"(",
"[",
"epinfo",
"[",
"'l'",
"]",
"for",
"epinfo",
"in",
"epinfobuf",
"]",
")",
")",
"logger",
".",
"dump_tabular",
"(",
")",
"return",
"model"
] | Main entrypoint for A2C algorithm. Train a policy with given network architecture on a given environment using a2c algorithm.
Parameters:
-----------
network: policy network architecture. Either string (mlp, lstm, lnlstm, cnn_lstm, cnn, cnn_small, conv_only - see baselines.common/models.py for full list)
specifying the standard network architecture, or a function that takes tensorflow tensor as input and returns
tuple (output_tensor, extra_feed) where output tensor is the last network layer output, extra_feed is None for feed-forward
neural nets, and extra_feed is a dictionary describing how to feed state into the network for recurrent neural nets.
See baselines.common/policies.py/lstm for more details on using recurrent nets in policies
env: RL environment. Should implement interface similar to VecEnv (baselines.common/vec_env) or be wrapped with DummyVecEnv (baselines.common/vec_env/dummy_vec_env.py)
seed: seed to make random number sequence in the alorightm reproducible. By default is None which means seed from system noise generator (not reproducible)
nsteps: int, number of steps of the vectorized environment per update (i.e. batch size is nsteps * nenv where
nenv is number of environment copies simulated in parallel)
total_timesteps: int, total number of timesteps to train on (default: 80M)
vf_coef: float, coefficient in front of value function loss in the total loss function (default: 0.5)
ent_coef: float, coeffictiant in front of the policy entropy in the total loss function (default: 0.01)
max_gradient_norm: float, gradient is clipped to have global L2 norm no more than this value (default: 0.5)
lr: float, learning rate for RMSProp (current implementation has RMSProp hardcoded in) (default: 7e-4)
lrschedule: schedule of learning rate. Can be 'linear', 'constant', or a function [0..1] -> [0..1] that takes fraction of the training progress as input and
returns fraction of the learning rate (specified as lr) as output
epsilon: float, RMSProp epsilon (stabilizes square root computation in denominator of RMSProp update) (default: 1e-5)
alpha: float, RMSProp decay parameter (default: 0.99)
gamma: float, reward discounting parameter (default: 0.99)
log_interval: int, specifies how frequently the logs are printed out (default: 100)
**network_kwargs: keyword arguments to the policy / network builder. See baselines.common/policies.py/build_policy and arguments to a particular type of network
For instance, 'mlp' network architecture has arguments num_hidden and num_layers. | [
"Main",
"entrypoint",
"for",
"A2C",
"algorithm",
".",
"Train",
"a",
"policy",
"with",
"given",
"network",
"architecture",
"on",
"a",
"given",
"environment",
"using",
"a2c",
"algorithm",
"."
] | 3301089b48c42b87b396e246ea3f56fa4bfc9678 | https://github.com/openai/baselines/blob/3301089b48c42b87b396e246ea3f56fa4bfc9678/baselines/a2c/a2c.py#L119-L231 | valid |
openai/baselines | baselines/ppo2/runner.py | sf01 | def sf01(arr):
"""
swap and then flatten axes 0 and 1
"""
s = arr.shape
return arr.swapaxes(0, 1).reshape(s[0] * s[1], *s[2:]) | python | def sf01(arr):
"""
swap and then flatten axes 0 and 1
"""
s = arr.shape
return arr.swapaxes(0, 1).reshape(s[0] * s[1], *s[2:]) | [
"def",
"sf01",
"(",
"arr",
")",
":",
"s",
"=",
"arr",
".",
"shape",
"return",
"arr",
".",
"swapaxes",
"(",
"0",
",",
"1",
")",
".",
"reshape",
"(",
"s",
"[",
"0",
"]",
"*",
"s",
"[",
"1",
"]",
",",
"*",
"s",
"[",
"2",
":",
"]",
")"
] | swap and then flatten axes 0 and 1 | [
"swap",
"and",
"then",
"flatten",
"axes",
"0",
"and",
"1"
] | 3301089b48c42b87b396e246ea3f56fa4bfc9678 | https://github.com/openai/baselines/blob/3301089b48c42b87b396e246ea3f56fa4bfc9678/baselines/ppo2/runner.py#L69-L74 | valid |
openai/baselines | baselines/common/policies.py | PolicyWithValue.step | def step(self, observation, **extra_feed):
"""
Compute next action(s) given the observation(s)
Parameters:
----------
observation observation data (either single or a batch)
**extra_feed additional data such as state or mask (names of the arguments should match the ones in constructor, see __init__)
Returns:
-------
(action, value estimate, next state, negative log likelihood of the action under current policy parameters) tuple
"""
a, v, state, neglogp = self._evaluate([self.action, self.vf, self.state, self.neglogp], observation, **extra_feed)
if state.size == 0:
state = None
return a, v, state, neglogp | python | def step(self, observation, **extra_feed):
"""
Compute next action(s) given the observation(s)
Parameters:
----------
observation observation data (either single or a batch)
**extra_feed additional data such as state or mask (names of the arguments should match the ones in constructor, see __init__)
Returns:
-------
(action, value estimate, next state, negative log likelihood of the action under current policy parameters) tuple
"""
a, v, state, neglogp = self._evaluate([self.action, self.vf, self.state, self.neglogp], observation, **extra_feed)
if state.size == 0:
state = None
return a, v, state, neglogp | [
"def",
"step",
"(",
"self",
",",
"observation",
",",
"*",
"*",
"extra_feed",
")",
":",
"a",
",",
"v",
",",
"state",
",",
"neglogp",
"=",
"self",
".",
"_evaluate",
"(",
"[",
"self",
".",
"action",
",",
"self",
".",
"vf",
",",
"self",
".",
"state",
",",
"self",
".",
"neglogp",
"]",
",",
"observation",
",",
"*",
"*",
"extra_feed",
")",
"if",
"state",
".",
"size",
"==",
"0",
":",
"state",
"=",
"None",
"return",
"a",
",",
"v",
",",
"state",
",",
"neglogp"
] | Compute next action(s) given the observation(s)
Parameters:
----------
observation observation data (either single or a batch)
**extra_feed additional data such as state or mask (names of the arguments should match the ones in constructor, see __init__)
Returns:
-------
(action, value estimate, next state, negative log likelihood of the action under current policy parameters) tuple | [
"Compute",
"next",
"action",
"(",
"s",
")",
"given",
"the",
"observation",
"(",
"s",
")"
] | 3301089b48c42b87b396e246ea3f56fa4bfc9678 | https://github.com/openai/baselines/blob/3301089b48c42b87b396e246ea3f56fa4bfc9678/baselines/common/policies.py#L77-L96 | valid |
openai/baselines | baselines/common/policies.py | PolicyWithValue.value | def value(self, ob, *args, **kwargs):
"""
Compute value estimate(s) given the observation(s)
Parameters:
----------
observation observation data (either single or a batch)
**extra_feed additional data such as state or mask (names of the arguments should match the ones in constructor, see __init__)
Returns:
-------
value estimate
"""
return self._evaluate(self.vf, ob, *args, **kwargs) | python | def value(self, ob, *args, **kwargs):
"""
Compute value estimate(s) given the observation(s)
Parameters:
----------
observation observation data (either single or a batch)
**extra_feed additional data such as state or mask (names of the arguments should match the ones in constructor, see __init__)
Returns:
-------
value estimate
"""
return self._evaluate(self.vf, ob, *args, **kwargs) | [
"def",
"value",
"(",
"self",
",",
"ob",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"return",
"self",
".",
"_evaluate",
"(",
"self",
".",
"vf",
",",
"ob",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")"
] | Compute value estimate(s) given the observation(s)
Parameters:
----------
observation observation data (either single or a batch)
**extra_feed additional data such as state or mask (names of the arguments should match the ones in constructor, see __init__)
Returns:
-------
value estimate | [
"Compute",
"value",
"estimate",
"(",
"s",
")",
"given",
"the",
"observation",
"(",
"s",
")"
] | 3301089b48c42b87b396e246ea3f56fa4bfc9678 | https://github.com/openai/baselines/blob/3301089b48c42b87b396e246ea3f56fa4bfc9678/baselines/common/policies.py#L98-L113 | valid |
openai/baselines | baselines/common/misc_util.py | pretty_eta | def pretty_eta(seconds_left):
"""Print the number of seconds in human readable format.
Examples:
2 days
2 hours and 37 minutes
less than a minute
Paramters
---------
seconds_left: int
Number of seconds to be converted to the ETA
Returns
-------
eta: str
String representing the pretty ETA.
"""
minutes_left = seconds_left // 60
seconds_left %= 60
hours_left = minutes_left // 60
minutes_left %= 60
days_left = hours_left // 24
hours_left %= 24
def helper(cnt, name):
return "{} {}{}".format(str(cnt), name, ('s' if cnt > 1 else ''))
if days_left > 0:
msg = helper(days_left, 'day')
if hours_left > 0:
msg += ' and ' + helper(hours_left, 'hour')
return msg
if hours_left > 0:
msg = helper(hours_left, 'hour')
if minutes_left > 0:
msg += ' and ' + helper(minutes_left, 'minute')
return msg
if minutes_left > 0:
return helper(minutes_left, 'minute')
return 'less than a minute' | python | def pretty_eta(seconds_left):
"""Print the number of seconds in human readable format.
Examples:
2 days
2 hours and 37 minutes
less than a minute
Paramters
---------
seconds_left: int
Number of seconds to be converted to the ETA
Returns
-------
eta: str
String representing the pretty ETA.
"""
minutes_left = seconds_left // 60
seconds_left %= 60
hours_left = minutes_left // 60
minutes_left %= 60
days_left = hours_left // 24
hours_left %= 24
def helper(cnt, name):
return "{} {}{}".format(str(cnt), name, ('s' if cnt > 1 else ''))
if days_left > 0:
msg = helper(days_left, 'day')
if hours_left > 0:
msg += ' and ' + helper(hours_left, 'hour')
return msg
if hours_left > 0:
msg = helper(hours_left, 'hour')
if minutes_left > 0:
msg += ' and ' + helper(minutes_left, 'minute')
return msg
if minutes_left > 0:
return helper(minutes_left, 'minute')
return 'less than a minute' | [
"def",
"pretty_eta",
"(",
"seconds_left",
")",
":",
"minutes_left",
"=",
"seconds_left",
"//",
"60",
"seconds_left",
"%=",
"60",
"hours_left",
"=",
"minutes_left",
"//",
"60",
"minutes_left",
"%=",
"60",
"days_left",
"=",
"hours_left",
"//",
"24",
"hours_left",
"%=",
"24",
"def",
"helper",
"(",
"cnt",
",",
"name",
")",
":",
"return",
"\"{} {}{}\"",
".",
"format",
"(",
"str",
"(",
"cnt",
")",
",",
"name",
",",
"(",
"'s'",
"if",
"cnt",
">",
"1",
"else",
"''",
")",
")",
"if",
"days_left",
">",
"0",
":",
"msg",
"=",
"helper",
"(",
"days_left",
",",
"'day'",
")",
"if",
"hours_left",
">",
"0",
":",
"msg",
"+=",
"' and '",
"+",
"helper",
"(",
"hours_left",
",",
"'hour'",
")",
"return",
"msg",
"if",
"hours_left",
">",
"0",
":",
"msg",
"=",
"helper",
"(",
"hours_left",
",",
"'hour'",
")",
"if",
"minutes_left",
">",
"0",
":",
"msg",
"+=",
"' and '",
"+",
"helper",
"(",
"minutes_left",
",",
"'minute'",
")",
"return",
"msg",
"if",
"minutes_left",
">",
"0",
":",
"return",
"helper",
"(",
"minutes_left",
",",
"'minute'",
")",
"return",
"'less than a minute'"
] | Print the number of seconds in human readable format.
Examples:
2 days
2 hours and 37 minutes
less than a minute
Paramters
---------
seconds_left: int
Number of seconds to be converted to the ETA
Returns
-------
eta: str
String representing the pretty ETA. | [
"Print",
"the",
"number",
"of",
"seconds",
"in",
"human",
"readable",
"format",
"."
] | 3301089b48c42b87b396e246ea3f56fa4bfc9678 | https://github.com/openai/baselines/blob/3301089b48c42b87b396e246ea3f56fa4bfc9678/baselines/common/misc_util.py#L65-L104 | valid |
openai/baselines | baselines/common/misc_util.py | boolean_flag | def boolean_flag(parser, name, default=False, help=None):
"""Add a boolean flag to argparse parser.
Parameters
----------
parser: argparse.Parser
parser to add the flag to
name: str
--<name> will enable the flag, while --no-<name> will disable it
default: bool or None
default value of the flag
help: str
help string for the flag
"""
dest = name.replace('-', '_')
parser.add_argument("--" + name, action="store_true", default=default, dest=dest, help=help)
parser.add_argument("--no-" + name, action="store_false", dest=dest) | python | def boolean_flag(parser, name, default=False, help=None):
"""Add a boolean flag to argparse parser.
Parameters
----------
parser: argparse.Parser
parser to add the flag to
name: str
--<name> will enable the flag, while --no-<name> will disable it
default: bool or None
default value of the flag
help: str
help string for the flag
"""
dest = name.replace('-', '_')
parser.add_argument("--" + name, action="store_true", default=default, dest=dest, help=help)
parser.add_argument("--no-" + name, action="store_false", dest=dest) | [
"def",
"boolean_flag",
"(",
"parser",
",",
"name",
",",
"default",
"=",
"False",
",",
"help",
"=",
"None",
")",
":",
"dest",
"=",
"name",
".",
"replace",
"(",
"'-'",
",",
"'_'",
")",
"parser",
".",
"add_argument",
"(",
"\"--\"",
"+",
"name",
",",
"action",
"=",
"\"store_true\"",
",",
"default",
"=",
"default",
",",
"dest",
"=",
"dest",
",",
"help",
"=",
"help",
")",
"parser",
".",
"add_argument",
"(",
"\"--no-\"",
"+",
"name",
",",
"action",
"=",
"\"store_false\"",
",",
"dest",
"=",
"dest",
")"
] | Add a boolean flag to argparse parser.
Parameters
----------
parser: argparse.Parser
parser to add the flag to
name: str
--<name> will enable the flag, while --no-<name> will disable it
default: bool or None
default value of the flag
help: str
help string for the flag | [
"Add",
"a",
"boolean",
"flag",
"to",
"argparse",
"parser",
"."
] | 3301089b48c42b87b396e246ea3f56fa4bfc9678 | https://github.com/openai/baselines/blob/3301089b48c42b87b396e246ea3f56fa4bfc9678/baselines/common/misc_util.py#L140-L156 | valid |
openai/baselines | baselines/common/misc_util.py | get_wrapper_by_name | def get_wrapper_by_name(env, classname):
"""Given an a gym environment possibly wrapped multiple times, returns a wrapper
of class named classname or raises ValueError if no such wrapper was applied
Parameters
----------
env: gym.Env of gym.Wrapper
gym environment
classname: str
name of the wrapper
Returns
-------
wrapper: gym.Wrapper
wrapper named classname
"""
currentenv = env
while True:
if classname == currentenv.class_name():
return currentenv
elif isinstance(currentenv, gym.Wrapper):
currentenv = currentenv.env
else:
raise ValueError("Couldn't find wrapper named %s" % classname) | python | def get_wrapper_by_name(env, classname):
"""Given an a gym environment possibly wrapped multiple times, returns a wrapper
of class named classname or raises ValueError if no such wrapper was applied
Parameters
----------
env: gym.Env of gym.Wrapper
gym environment
classname: str
name of the wrapper
Returns
-------
wrapper: gym.Wrapper
wrapper named classname
"""
currentenv = env
while True:
if classname == currentenv.class_name():
return currentenv
elif isinstance(currentenv, gym.Wrapper):
currentenv = currentenv.env
else:
raise ValueError("Couldn't find wrapper named %s" % classname) | [
"def",
"get_wrapper_by_name",
"(",
"env",
",",
"classname",
")",
":",
"currentenv",
"=",
"env",
"while",
"True",
":",
"if",
"classname",
"==",
"currentenv",
".",
"class_name",
"(",
")",
":",
"return",
"currentenv",
"elif",
"isinstance",
"(",
"currentenv",
",",
"gym",
".",
"Wrapper",
")",
":",
"currentenv",
"=",
"currentenv",
".",
"env",
"else",
":",
"raise",
"ValueError",
"(",
"\"Couldn't find wrapper named %s\"",
"%",
"classname",
")"
] | Given an a gym environment possibly wrapped multiple times, returns a wrapper
of class named classname or raises ValueError if no such wrapper was applied
Parameters
----------
env: gym.Env of gym.Wrapper
gym environment
classname: str
name of the wrapper
Returns
-------
wrapper: gym.Wrapper
wrapper named classname | [
"Given",
"an",
"a",
"gym",
"environment",
"possibly",
"wrapped",
"multiple",
"times",
"returns",
"a",
"wrapper",
"of",
"class",
"named",
"classname",
"or",
"raises",
"ValueError",
"if",
"no",
"such",
"wrapper",
"was",
"applied"
] | 3301089b48c42b87b396e246ea3f56fa4bfc9678 | https://github.com/openai/baselines/blob/3301089b48c42b87b396e246ea3f56fa4bfc9678/baselines/common/misc_util.py#L159-L182 | valid |
openai/baselines | baselines/common/misc_util.py | relatively_safe_pickle_dump | def relatively_safe_pickle_dump(obj, path, compression=False):
"""This is just like regular pickle dump, except from the fact that failure cases are
different:
- It's never possible that we end up with a pickle in corrupted state.
- If a there was a different file at the path, that file will remain unchanged in the
even of failure (provided that filesystem rename is atomic).
- it is sometimes possible that we end up with useless temp file which needs to be
deleted manually (it will be removed automatically on the next function call)
The indended use case is periodic checkpoints of experiment state, such that we never
corrupt previous checkpoints if the current one fails.
Parameters
----------
obj: object
object to pickle
path: str
path to the output file
compression: bool
if true pickle will be compressed
"""
temp_storage = path + ".relatively_safe"
if compression:
# Using gzip here would be simpler, but the size is limited to 2GB
with tempfile.NamedTemporaryFile() as uncompressed_file:
pickle.dump(obj, uncompressed_file)
uncompressed_file.file.flush()
with zipfile.ZipFile(temp_storage, "w", compression=zipfile.ZIP_DEFLATED) as myzip:
myzip.write(uncompressed_file.name, "data")
else:
with open(temp_storage, "wb") as f:
pickle.dump(obj, f)
os.rename(temp_storage, path) | python | def relatively_safe_pickle_dump(obj, path, compression=False):
"""This is just like regular pickle dump, except from the fact that failure cases are
different:
- It's never possible that we end up with a pickle in corrupted state.
- If a there was a different file at the path, that file will remain unchanged in the
even of failure (provided that filesystem rename is atomic).
- it is sometimes possible that we end up with useless temp file which needs to be
deleted manually (it will be removed automatically on the next function call)
The indended use case is periodic checkpoints of experiment state, such that we never
corrupt previous checkpoints if the current one fails.
Parameters
----------
obj: object
object to pickle
path: str
path to the output file
compression: bool
if true pickle will be compressed
"""
temp_storage = path + ".relatively_safe"
if compression:
# Using gzip here would be simpler, but the size is limited to 2GB
with tempfile.NamedTemporaryFile() as uncompressed_file:
pickle.dump(obj, uncompressed_file)
uncompressed_file.file.flush()
with zipfile.ZipFile(temp_storage, "w", compression=zipfile.ZIP_DEFLATED) as myzip:
myzip.write(uncompressed_file.name, "data")
else:
with open(temp_storage, "wb") as f:
pickle.dump(obj, f)
os.rename(temp_storage, path) | [
"def",
"relatively_safe_pickle_dump",
"(",
"obj",
",",
"path",
",",
"compression",
"=",
"False",
")",
":",
"temp_storage",
"=",
"path",
"+",
"\".relatively_safe\"",
"if",
"compression",
":",
"# Using gzip here would be simpler, but the size is limited to 2GB",
"with",
"tempfile",
".",
"NamedTemporaryFile",
"(",
")",
"as",
"uncompressed_file",
":",
"pickle",
".",
"dump",
"(",
"obj",
",",
"uncompressed_file",
")",
"uncompressed_file",
".",
"file",
".",
"flush",
"(",
")",
"with",
"zipfile",
".",
"ZipFile",
"(",
"temp_storage",
",",
"\"w\"",
",",
"compression",
"=",
"zipfile",
".",
"ZIP_DEFLATED",
")",
"as",
"myzip",
":",
"myzip",
".",
"write",
"(",
"uncompressed_file",
".",
"name",
",",
"\"data\"",
")",
"else",
":",
"with",
"open",
"(",
"temp_storage",
",",
"\"wb\"",
")",
"as",
"f",
":",
"pickle",
".",
"dump",
"(",
"obj",
",",
"f",
")",
"os",
".",
"rename",
"(",
"temp_storage",
",",
"path",
")"
] | This is just like regular pickle dump, except from the fact that failure cases are
different:
- It's never possible that we end up with a pickle in corrupted state.
- If a there was a different file at the path, that file will remain unchanged in the
even of failure (provided that filesystem rename is atomic).
- it is sometimes possible that we end up with useless temp file which needs to be
deleted manually (it will be removed automatically on the next function call)
The indended use case is periodic checkpoints of experiment state, such that we never
corrupt previous checkpoints if the current one fails.
Parameters
----------
obj: object
object to pickle
path: str
path to the output file
compression: bool
if true pickle will be compressed | [
"This",
"is",
"just",
"like",
"regular",
"pickle",
"dump",
"except",
"from",
"the",
"fact",
"that",
"failure",
"cases",
"are",
"different",
":"
] | 3301089b48c42b87b396e246ea3f56fa4bfc9678 | https://github.com/openai/baselines/blob/3301089b48c42b87b396e246ea3f56fa4bfc9678/baselines/common/misc_util.py#L185-L218 | valid |
openai/baselines | baselines/common/misc_util.py | pickle_load | def pickle_load(path, compression=False):
"""Unpickle a possible compressed pickle.
Parameters
----------
path: str
path to the output file
compression: bool
if true assumes that pickle was compressed when created and attempts decompression.
Returns
-------
obj: object
the unpickled object
"""
if compression:
with zipfile.ZipFile(path, "r", compression=zipfile.ZIP_DEFLATED) as myzip:
with myzip.open("data") as f:
return pickle.load(f)
else:
with open(path, "rb") as f:
return pickle.load(f) | python | def pickle_load(path, compression=False):
"""Unpickle a possible compressed pickle.
Parameters
----------
path: str
path to the output file
compression: bool
if true assumes that pickle was compressed when created and attempts decompression.
Returns
-------
obj: object
the unpickled object
"""
if compression:
with zipfile.ZipFile(path, "r", compression=zipfile.ZIP_DEFLATED) as myzip:
with myzip.open("data") as f:
return pickle.load(f)
else:
with open(path, "rb") as f:
return pickle.load(f) | [
"def",
"pickle_load",
"(",
"path",
",",
"compression",
"=",
"False",
")",
":",
"if",
"compression",
":",
"with",
"zipfile",
".",
"ZipFile",
"(",
"path",
",",
"\"r\"",
",",
"compression",
"=",
"zipfile",
".",
"ZIP_DEFLATED",
")",
"as",
"myzip",
":",
"with",
"myzip",
".",
"open",
"(",
"\"data\"",
")",
"as",
"f",
":",
"return",
"pickle",
".",
"load",
"(",
"f",
")",
"else",
":",
"with",
"open",
"(",
"path",
",",
"\"rb\"",
")",
"as",
"f",
":",
"return",
"pickle",
".",
"load",
"(",
"f",
")"
] | Unpickle a possible compressed pickle.
Parameters
----------
path: str
path to the output file
compression: bool
if true assumes that pickle was compressed when created and attempts decompression.
Returns
-------
obj: object
the unpickled object | [
"Unpickle",
"a",
"possible",
"compressed",
"pickle",
"."
] | 3301089b48c42b87b396e246ea3f56fa4bfc9678 | https://github.com/openai/baselines/blob/3301089b48c42b87b396e246ea3f56fa4bfc9678/baselines/common/misc_util.py#L221-L243 | valid |
openai/baselines | baselines/common/misc_util.py | RunningAvg.update | def update(self, new_val):
"""Update the estimate.
Parameters
----------
new_val: float
new observated value of estimated quantity.
"""
if self._value is None:
self._value = new_val
else:
self._value = self._gamma * self._value + (1.0 - self._gamma) * new_val | python | def update(self, new_val):
"""Update the estimate.
Parameters
----------
new_val: float
new observated value of estimated quantity.
"""
if self._value is None:
self._value = new_val
else:
self._value = self._gamma * self._value + (1.0 - self._gamma) * new_val | [
"def",
"update",
"(",
"self",
",",
"new_val",
")",
":",
"if",
"self",
".",
"_value",
"is",
"None",
":",
"self",
".",
"_value",
"=",
"new_val",
"else",
":",
"self",
".",
"_value",
"=",
"self",
".",
"_gamma",
"*",
"self",
".",
"_value",
"+",
"(",
"1.0",
"-",
"self",
".",
"_gamma",
")",
"*",
"new_val"
] | Update the estimate.
Parameters
----------
new_val: float
new observated value of estimated quantity. | [
"Update",
"the",
"estimate",
"."
] | 3301089b48c42b87b396e246ea3f56fa4bfc9678 | https://github.com/openai/baselines/blob/3301089b48c42b87b396e246ea3f56fa4bfc9678/baselines/common/misc_util.py#L123-L134 | valid |
openai/baselines | baselines/her/util.py | store_args | def store_args(method):
"""Stores provided method args as instance attributes.
"""
argspec = inspect.getfullargspec(method)
defaults = {}
if argspec.defaults is not None:
defaults = dict(
zip(argspec.args[-len(argspec.defaults):], argspec.defaults))
if argspec.kwonlydefaults is not None:
defaults.update(argspec.kwonlydefaults)
arg_names = argspec.args[1:]
@functools.wraps(method)
def wrapper(*positional_args, **keyword_args):
self = positional_args[0]
# Get default arg values
args = defaults.copy()
# Add provided arg values
for name, value in zip(arg_names, positional_args[1:]):
args[name] = value
args.update(keyword_args)
self.__dict__.update(args)
return method(*positional_args, **keyword_args)
return wrapper | python | def store_args(method):
"""Stores provided method args as instance attributes.
"""
argspec = inspect.getfullargspec(method)
defaults = {}
if argspec.defaults is not None:
defaults = dict(
zip(argspec.args[-len(argspec.defaults):], argspec.defaults))
if argspec.kwonlydefaults is not None:
defaults.update(argspec.kwonlydefaults)
arg_names = argspec.args[1:]
@functools.wraps(method)
def wrapper(*positional_args, **keyword_args):
self = positional_args[0]
# Get default arg values
args = defaults.copy()
# Add provided arg values
for name, value in zip(arg_names, positional_args[1:]):
args[name] = value
args.update(keyword_args)
self.__dict__.update(args)
return method(*positional_args, **keyword_args)
return wrapper | [
"def",
"store_args",
"(",
"method",
")",
":",
"argspec",
"=",
"inspect",
".",
"getfullargspec",
"(",
"method",
")",
"defaults",
"=",
"{",
"}",
"if",
"argspec",
".",
"defaults",
"is",
"not",
"None",
":",
"defaults",
"=",
"dict",
"(",
"zip",
"(",
"argspec",
".",
"args",
"[",
"-",
"len",
"(",
"argspec",
".",
"defaults",
")",
":",
"]",
",",
"argspec",
".",
"defaults",
")",
")",
"if",
"argspec",
".",
"kwonlydefaults",
"is",
"not",
"None",
":",
"defaults",
".",
"update",
"(",
"argspec",
".",
"kwonlydefaults",
")",
"arg_names",
"=",
"argspec",
".",
"args",
"[",
"1",
":",
"]",
"@",
"functools",
".",
"wraps",
"(",
"method",
")",
"def",
"wrapper",
"(",
"*",
"positional_args",
",",
"*",
"*",
"keyword_args",
")",
":",
"self",
"=",
"positional_args",
"[",
"0",
"]",
"# Get default arg values",
"args",
"=",
"defaults",
".",
"copy",
"(",
")",
"# Add provided arg values",
"for",
"name",
",",
"value",
"in",
"zip",
"(",
"arg_names",
",",
"positional_args",
"[",
"1",
":",
"]",
")",
":",
"args",
"[",
"name",
"]",
"=",
"value",
"args",
".",
"update",
"(",
"keyword_args",
")",
"self",
".",
"__dict__",
".",
"update",
"(",
"args",
")",
"return",
"method",
"(",
"*",
"positional_args",
",",
"*",
"*",
"keyword_args",
")",
"return",
"wrapper"
] | Stores provided method args as instance attributes. | [
"Stores",
"provided",
"method",
"args",
"as",
"instance",
"attributes",
"."
] | 3301089b48c42b87b396e246ea3f56fa4bfc9678 | https://github.com/openai/baselines/blob/3301089b48c42b87b396e246ea3f56fa4bfc9678/baselines/her/util.py#L14-L38 | valid |
openai/baselines | baselines/her/util.py | import_function | def import_function(spec):
"""Import a function identified by a string like "pkg.module:fn_name".
"""
mod_name, fn_name = spec.split(':')
module = importlib.import_module(mod_name)
fn = getattr(module, fn_name)
return fn | python | def import_function(spec):
"""Import a function identified by a string like "pkg.module:fn_name".
"""
mod_name, fn_name = spec.split(':')
module = importlib.import_module(mod_name)
fn = getattr(module, fn_name)
return fn | [
"def",
"import_function",
"(",
"spec",
")",
":",
"mod_name",
",",
"fn_name",
"=",
"spec",
".",
"split",
"(",
"':'",
")",
"module",
"=",
"importlib",
".",
"import_module",
"(",
"mod_name",
")",
"fn",
"=",
"getattr",
"(",
"module",
",",
"fn_name",
")",
"return",
"fn"
] | Import a function identified by a string like "pkg.module:fn_name". | [
"Import",
"a",
"function",
"identified",
"by",
"a",
"string",
"like",
"pkg",
".",
"module",
":",
"fn_name",
"."
] | 3301089b48c42b87b396e246ea3f56fa4bfc9678 | https://github.com/openai/baselines/blob/3301089b48c42b87b396e246ea3f56fa4bfc9678/baselines/her/util.py#L41-L47 | valid |
openai/baselines | baselines/her/util.py | flatten_grads | def flatten_grads(var_list, grads):
"""Flattens a variables and their gradients.
"""
return tf.concat([tf.reshape(grad, [U.numel(v)])
for (v, grad) in zip(var_list, grads)], 0) | python | def flatten_grads(var_list, grads):
"""Flattens a variables and their gradients.
"""
return tf.concat([tf.reshape(grad, [U.numel(v)])
for (v, grad) in zip(var_list, grads)], 0) | [
"def",
"flatten_grads",
"(",
"var_list",
",",
"grads",
")",
":",
"return",
"tf",
".",
"concat",
"(",
"[",
"tf",
".",
"reshape",
"(",
"grad",
",",
"[",
"U",
".",
"numel",
"(",
"v",
")",
"]",
")",
"for",
"(",
"v",
",",
"grad",
")",
"in",
"zip",
"(",
"var_list",
",",
"grads",
")",
"]",
",",
"0",
")"
] | Flattens a variables and their gradients. | [
"Flattens",
"a",
"variables",
"and",
"their",
"gradients",
"."
] | 3301089b48c42b87b396e246ea3f56fa4bfc9678 | https://github.com/openai/baselines/blob/3301089b48c42b87b396e246ea3f56fa4bfc9678/baselines/her/util.py#L50-L54 | valid |
openai/baselines | baselines/her/util.py | nn | def nn(input, layers_sizes, reuse=None, flatten=False, name=""):
"""Creates a simple neural network
"""
for i, size in enumerate(layers_sizes):
activation = tf.nn.relu if i < len(layers_sizes) - 1 else None
input = tf.layers.dense(inputs=input,
units=size,
kernel_initializer=tf.contrib.layers.xavier_initializer(),
reuse=reuse,
name=name + '_' + str(i))
if activation:
input = activation(input)
if flatten:
assert layers_sizes[-1] == 1
input = tf.reshape(input, [-1])
return input | python | def nn(input, layers_sizes, reuse=None, flatten=False, name=""):
"""Creates a simple neural network
"""
for i, size in enumerate(layers_sizes):
activation = tf.nn.relu if i < len(layers_sizes) - 1 else None
input = tf.layers.dense(inputs=input,
units=size,
kernel_initializer=tf.contrib.layers.xavier_initializer(),
reuse=reuse,
name=name + '_' + str(i))
if activation:
input = activation(input)
if flatten:
assert layers_sizes[-1] == 1
input = tf.reshape(input, [-1])
return input | [
"def",
"nn",
"(",
"input",
",",
"layers_sizes",
",",
"reuse",
"=",
"None",
",",
"flatten",
"=",
"False",
",",
"name",
"=",
"\"\"",
")",
":",
"for",
"i",
",",
"size",
"in",
"enumerate",
"(",
"layers_sizes",
")",
":",
"activation",
"=",
"tf",
".",
"nn",
".",
"relu",
"if",
"i",
"<",
"len",
"(",
"layers_sizes",
")",
"-",
"1",
"else",
"None",
"input",
"=",
"tf",
".",
"layers",
".",
"dense",
"(",
"inputs",
"=",
"input",
",",
"units",
"=",
"size",
",",
"kernel_initializer",
"=",
"tf",
".",
"contrib",
".",
"layers",
".",
"xavier_initializer",
"(",
")",
",",
"reuse",
"=",
"reuse",
",",
"name",
"=",
"name",
"+",
"'_'",
"+",
"str",
"(",
"i",
")",
")",
"if",
"activation",
":",
"input",
"=",
"activation",
"(",
"input",
")",
"if",
"flatten",
":",
"assert",
"layers_sizes",
"[",
"-",
"1",
"]",
"==",
"1",
"input",
"=",
"tf",
".",
"reshape",
"(",
"input",
",",
"[",
"-",
"1",
"]",
")",
"return",
"input"
] | Creates a simple neural network | [
"Creates",
"a",
"simple",
"neural",
"network"
] | 3301089b48c42b87b396e246ea3f56fa4bfc9678 | https://github.com/openai/baselines/blob/3301089b48c42b87b396e246ea3f56fa4bfc9678/baselines/her/util.py#L57-L72 | valid |
openai/baselines | baselines/her/util.py | mpi_fork | def mpi_fork(n, extra_mpi_args=[]):
"""Re-launches the current script with workers
Returns "parent" for original parent, "child" for MPI children
"""
if n <= 1:
return "child"
if os.getenv("IN_MPI") is None:
env = os.environ.copy()
env.update(
MKL_NUM_THREADS="1",
OMP_NUM_THREADS="1",
IN_MPI="1"
)
# "-bind-to core" is crucial for good performance
args = ["mpirun", "-np", str(n)] + \
extra_mpi_args + \
[sys.executable]
args += sys.argv
subprocess.check_call(args, env=env)
return "parent"
else:
install_mpi_excepthook()
return "child" | python | def mpi_fork(n, extra_mpi_args=[]):
"""Re-launches the current script with workers
Returns "parent" for original parent, "child" for MPI children
"""
if n <= 1:
return "child"
if os.getenv("IN_MPI") is None:
env = os.environ.copy()
env.update(
MKL_NUM_THREADS="1",
OMP_NUM_THREADS="1",
IN_MPI="1"
)
# "-bind-to core" is crucial for good performance
args = ["mpirun", "-np", str(n)] + \
extra_mpi_args + \
[sys.executable]
args += sys.argv
subprocess.check_call(args, env=env)
return "parent"
else:
install_mpi_excepthook()
return "child" | [
"def",
"mpi_fork",
"(",
"n",
",",
"extra_mpi_args",
"=",
"[",
"]",
")",
":",
"if",
"n",
"<=",
"1",
":",
"return",
"\"child\"",
"if",
"os",
".",
"getenv",
"(",
"\"IN_MPI\"",
")",
"is",
"None",
":",
"env",
"=",
"os",
".",
"environ",
".",
"copy",
"(",
")",
"env",
".",
"update",
"(",
"MKL_NUM_THREADS",
"=",
"\"1\"",
",",
"OMP_NUM_THREADS",
"=",
"\"1\"",
",",
"IN_MPI",
"=",
"\"1\"",
")",
"# \"-bind-to core\" is crucial for good performance",
"args",
"=",
"[",
"\"mpirun\"",
",",
"\"-np\"",
",",
"str",
"(",
"n",
")",
"]",
"+",
"extra_mpi_args",
"+",
"[",
"sys",
".",
"executable",
"]",
"args",
"+=",
"sys",
".",
"argv",
"subprocess",
".",
"check_call",
"(",
"args",
",",
"env",
"=",
"env",
")",
"return",
"\"parent\"",
"else",
":",
"install_mpi_excepthook",
"(",
")",
"return",
"\"child\""
] | Re-launches the current script with workers
Returns "parent" for original parent, "child" for MPI children | [
"Re",
"-",
"launches",
"the",
"current",
"script",
"with",
"workers",
"Returns",
"parent",
"for",
"original",
"parent",
"child",
"for",
"MPI",
"children"
] | 3301089b48c42b87b396e246ea3f56fa4bfc9678 | https://github.com/openai/baselines/blob/3301089b48c42b87b396e246ea3f56fa4bfc9678/baselines/her/util.py#L88-L111 | valid |
openai/baselines | baselines/her/util.py | convert_episode_to_batch_major | def convert_episode_to_batch_major(episode):
"""Converts an episode to have the batch dimension in the major (first)
dimension.
"""
episode_batch = {}
for key in episode.keys():
val = np.array(episode[key]).copy()
# make inputs batch-major instead of time-major
episode_batch[key] = val.swapaxes(0, 1)
return episode_batch | python | def convert_episode_to_batch_major(episode):
"""Converts an episode to have the batch dimension in the major (first)
dimension.
"""
episode_batch = {}
for key in episode.keys():
val = np.array(episode[key]).copy()
# make inputs batch-major instead of time-major
episode_batch[key] = val.swapaxes(0, 1)
return episode_batch | [
"def",
"convert_episode_to_batch_major",
"(",
"episode",
")",
":",
"episode_batch",
"=",
"{",
"}",
"for",
"key",
"in",
"episode",
".",
"keys",
"(",
")",
":",
"val",
"=",
"np",
".",
"array",
"(",
"episode",
"[",
"key",
"]",
")",
".",
"copy",
"(",
")",
"# make inputs batch-major instead of time-major",
"episode_batch",
"[",
"key",
"]",
"=",
"val",
".",
"swapaxes",
"(",
"0",
",",
"1",
")",
"return",
"episode_batch"
] | Converts an episode to have the batch dimension in the major (first)
dimension. | [
"Converts",
"an",
"episode",
"to",
"have",
"the",
"batch",
"dimension",
"in",
"the",
"major",
"(",
"first",
")",
"dimension",
"."
] | 3301089b48c42b87b396e246ea3f56fa4bfc9678 | https://github.com/openai/baselines/blob/3301089b48c42b87b396e246ea3f56fa4bfc9678/baselines/her/util.py#L114-L124 | valid |
openai/baselines | baselines/her/util.py | reshape_for_broadcasting | def reshape_for_broadcasting(source, target):
"""Reshapes a tensor (source) to have the correct shape and dtype of the target
before broadcasting it with MPI.
"""
dim = len(target.get_shape())
shape = ([1] * (dim - 1)) + [-1]
return tf.reshape(tf.cast(source, target.dtype), shape) | python | def reshape_for_broadcasting(source, target):
"""Reshapes a tensor (source) to have the correct shape and dtype of the target
before broadcasting it with MPI.
"""
dim = len(target.get_shape())
shape = ([1] * (dim - 1)) + [-1]
return tf.reshape(tf.cast(source, target.dtype), shape) | [
"def",
"reshape_for_broadcasting",
"(",
"source",
",",
"target",
")",
":",
"dim",
"=",
"len",
"(",
"target",
".",
"get_shape",
"(",
")",
")",
"shape",
"=",
"(",
"[",
"1",
"]",
"*",
"(",
"dim",
"-",
"1",
")",
")",
"+",
"[",
"-",
"1",
"]",
"return",
"tf",
".",
"reshape",
"(",
"tf",
".",
"cast",
"(",
"source",
",",
"target",
".",
"dtype",
")",
",",
"shape",
")"
] | Reshapes a tensor (source) to have the correct shape and dtype of the target
before broadcasting it with MPI. | [
"Reshapes",
"a",
"tensor",
"(",
"source",
")",
"to",
"have",
"the",
"correct",
"shape",
"and",
"dtype",
"of",
"the",
"target",
"before",
"broadcasting",
"it",
"with",
"MPI",
"."
] | 3301089b48c42b87b396e246ea3f56fa4bfc9678 | https://github.com/openai/baselines/blob/3301089b48c42b87b396e246ea3f56fa4bfc9678/baselines/her/util.py#L134-L140 | valid |
openai/baselines | baselines/ppo1/pposgd_simple.py | add_vtarg_and_adv | def add_vtarg_and_adv(seg, gamma, lam):
"""
Compute target value using TD(lambda) estimator, and advantage with GAE(lambda)
"""
new = np.append(seg["new"], 0) # last element is only used for last vtarg, but we already zeroed it if last new = 1
vpred = np.append(seg["vpred"], seg["nextvpred"])
T = len(seg["rew"])
seg["adv"] = gaelam = np.empty(T, 'float32')
rew = seg["rew"]
lastgaelam = 0
for t in reversed(range(T)):
nonterminal = 1-new[t+1]
delta = rew[t] + gamma * vpred[t+1] * nonterminal - vpred[t]
gaelam[t] = lastgaelam = delta + gamma * lam * nonterminal * lastgaelam
seg["tdlamret"] = seg["adv"] + seg["vpred"] | python | def add_vtarg_and_adv(seg, gamma, lam):
"""
Compute target value using TD(lambda) estimator, and advantage with GAE(lambda)
"""
new = np.append(seg["new"], 0) # last element is only used for last vtarg, but we already zeroed it if last new = 1
vpred = np.append(seg["vpred"], seg["nextvpred"])
T = len(seg["rew"])
seg["adv"] = gaelam = np.empty(T, 'float32')
rew = seg["rew"]
lastgaelam = 0
for t in reversed(range(T)):
nonterminal = 1-new[t+1]
delta = rew[t] + gamma * vpred[t+1] * nonterminal - vpred[t]
gaelam[t] = lastgaelam = delta + gamma * lam * nonterminal * lastgaelam
seg["tdlamret"] = seg["adv"] + seg["vpred"] | [
"def",
"add_vtarg_and_adv",
"(",
"seg",
",",
"gamma",
",",
"lam",
")",
":",
"new",
"=",
"np",
".",
"append",
"(",
"seg",
"[",
"\"new\"",
"]",
",",
"0",
")",
"# last element is only used for last vtarg, but we already zeroed it if last new = 1",
"vpred",
"=",
"np",
".",
"append",
"(",
"seg",
"[",
"\"vpred\"",
"]",
",",
"seg",
"[",
"\"nextvpred\"",
"]",
")",
"T",
"=",
"len",
"(",
"seg",
"[",
"\"rew\"",
"]",
")",
"seg",
"[",
"\"adv\"",
"]",
"=",
"gaelam",
"=",
"np",
".",
"empty",
"(",
"T",
",",
"'float32'",
")",
"rew",
"=",
"seg",
"[",
"\"rew\"",
"]",
"lastgaelam",
"=",
"0",
"for",
"t",
"in",
"reversed",
"(",
"range",
"(",
"T",
")",
")",
":",
"nonterminal",
"=",
"1",
"-",
"new",
"[",
"t",
"+",
"1",
"]",
"delta",
"=",
"rew",
"[",
"t",
"]",
"+",
"gamma",
"*",
"vpred",
"[",
"t",
"+",
"1",
"]",
"*",
"nonterminal",
"-",
"vpred",
"[",
"t",
"]",
"gaelam",
"[",
"t",
"]",
"=",
"lastgaelam",
"=",
"delta",
"+",
"gamma",
"*",
"lam",
"*",
"nonterminal",
"*",
"lastgaelam",
"seg",
"[",
"\"tdlamret\"",
"]",
"=",
"seg",
"[",
"\"adv\"",
"]",
"+",
"seg",
"[",
"\"vpred\"",
"]"
] | Compute target value using TD(lambda) estimator, and advantage with GAE(lambda) | [
"Compute",
"target",
"value",
"using",
"TD",
"(",
"lambda",
")",
"estimator",
"and",
"advantage",
"with",
"GAE",
"(",
"lambda",
")"
] | 3301089b48c42b87b396e246ea3f56fa4bfc9678 | https://github.com/openai/baselines/blob/3301089b48c42b87b396e246ea3f56fa4bfc9678/baselines/ppo1/pposgd_simple.py#L64-L78 | valid |
openai/baselines | baselines/common/tf_util.py | switch | def switch(condition, then_expression, else_expression):
"""Switches between two operations depending on a scalar value (int or bool).
Note that both `then_expression` and `else_expression`
should be symbolic tensors of the *same shape*.
# Arguments
condition: scalar tensor.
then_expression: TensorFlow operation.
else_expression: TensorFlow operation.
"""
x_shape = copy.copy(then_expression.get_shape())
x = tf.cond(tf.cast(condition, 'bool'),
lambda: then_expression,
lambda: else_expression)
x.set_shape(x_shape)
return x | python | def switch(condition, then_expression, else_expression):
"""Switches between two operations depending on a scalar value (int or bool).
Note that both `then_expression` and `else_expression`
should be symbolic tensors of the *same shape*.
# Arguments
condition: scalar tensor.
then_expression: TensorFlow operation.
else_expression: TensorFlow operation.
"""
x_shape = copy.copy(then_expression.get_shape())
x = tf.cond(tf.cast(condition, 'bool'),
lambda: then_expression,
lambda: else_expression)
x.set_shape(x_shape)
return x | [
"def",
"switch",
"(",
"condition",
",",
"then_expression",
",",
"else_expression",
")",
":",
"x_shape",
"=",
"copy",
".",
"copy",
"(",
"then_expression",
".",
"get_shape",
"(",
")",
")",
"x",
"=",
"tf",
".",
"cond",
"(",
"tf",
".",
"cast",
"(",
"condition",
",",
"'bool'",
")",
",",
"lambda",
":",
"then_expression",
",",
"lambda",
":",
"else_expression",
")",
"x",
".",
"set_shape",
"(",
"x_shape",
")",
"return",
"x"
] | Switches between two operations depending on a scalar value (int or bool).
Note that both `then_expression` and `else_expression`
should be symbolic tensors of the *same shape*.
# Arguments
condition: scalar tensor.
then_expression: TensorFlow operation.
else_expression: TensorFlow operation. | [
"Switches",
"between",
"two",
"operations",
"depending",
"on",
"a",
"scalar",
"value",
"(",
"int",
"or",
"bool",
")",
".",
"Note",
"that",
"both",
"then_expression",
"and",
"else_expression",
"should",
"be",
"symbolic",
"tensors",
"of",
"the",
"*",
"same",
"shape",
"*",
"."
] | 3301089b48c42b87b396e246ea3f56fa4bfc9678 | https://github.com/openai/baselines/blob/3301089b48c42b87b396e246ea3f56fa4bfc9678/baselines/common/tf_util.py#L9-L24 | valid |
openai/baselines | baselines/common/tf_util.py | huber_loss | def huber_loss(x, delta=1.0):
"""Reference: https://en.wikipedia.org/wiki/Huber_loss"""
return tf.where(
tf.abs(x) < delta,
tf.square(x) * 0.5,
delta * (tf.abs(x) - 0.5 * delta)
) | python | def huber_loss(x, delta=1.0):
"""Reference: https://en.wikipedia.org/wiki/Huber_loss"""
return tf.where(
tf.abs(x) < delta,
tf.square(x) * 0.5,
delta * (tf.abs(x) - 0.5 * delta)
) | [
"def",
"huber_loss",
"(",
"x",
",",
"delta",
"=",
"1.0",
")",
":",
"return",
"tf",
".",
"where",
"(",
"tf",
".",
"abs",
"(",
"x",
")",
"<",
"delta",
",",
"tf",
".",
"square",
"(",
"x",
")",
"*",
"0.5",
",",
"delta",
"*",
"(",
"tf",
".",
"abs",
"(",
"x",
")",
"-",
"0.5",
"*",
"delta",
")",
")"
] | Reference: https://en.wikipedia.org/wiki/Huber_loss | [
"Reference",
":",
"https",
":",
"//",
"en",
".",
"wikipedia",
".",
"org",
"/",
"wiki",
"/",
"Huber_loss"
] | 3301089b48c42b87b396e246ea3f56fa4bfc9678 | https://github.com/openai/baselines/blob/3301089b48c42b87b396e246ea3f56fa4bfc9678/baselines/common/tf_util.py#L39-L45 | valid |
openai/baselines | baselines/common/tf_util.py | get_session | def get_session(config=None):
"""Get default session or create one with a given config"""
sess = tf.get_default_session()
if sess is None:
sess = make_session(config=config, make_default=True)
return sess | python | def get_session(config=None):
"""Get default session or create one with a given config"""
sess = tf.get_default_session()
if sess is None:
sess = make_session(config=config, make_default=True)
return sess | [
"def",
"get_session",
"(",
"config",
"=",
"None",
")",
":",
"sess",
"=",
"tf",
".",
"get_default_session",
"(",
")",
"if",
"sess",
"is",
"None",
":",
"sess",
"=",
"make_session",
"(",
"config",
"=",
"config",
",",
"make_default",
"=",
"True",
")",
"return",
"sess"
] | Get default session or create one with a given config | [
"Get",
"default",
"session",
"or",
"create",
"one",
"with",
"a",
"given",
"config"
] | 3301089b48c42b87b396e246ea3f56fa4bfc9678 | https://github.com/openai/baselines/blob/3301089b48c42b87b396e246ea3f56fa4bfc9678/baselines/common/tf_util.py#L51-L56 | valid |
openai/baselines | baselines/common/tf_util.py | make_session | def make_session(config=None, num_cpu=None, make_default=False, graph=None):
"""Returns a session that will use <num_cpu> CPU's only"""
if num_cpu is None:
num_cpu = int(os.getenv('RCALL_NUM_CPU', multiprocessing.cpu_count()))
if config is None:
config = tf.ConfigProto(
allow_soft_placement=True,
inter_op_parallelism_threads=num_cpu,
intra_op_parallelism_threads=num_cpu)
config.gpu_options.allow_growth = True
if make_default:
return tf.InteractiveSession(config=config, graph=graph)
else:
return tf.Session(config=config, graph=graph) | python | def make_session(config=None, num_cpu=None, make_default=False, graph=None):
"""Returns a session that will use <num_cpu> CPU's only"""
if num_cpu is None:
num_cpu = int(os.getenv('RCALL_NUM_CPU', multiprocessing.cpu_count()))
if config is None:
config = tf.ConfigProto(
allow_soft_placement=True,
inter_op_parallelism_threads=num_cpu,
intra_op_parallelism_threads=num_cpu)
config.gpu_options.allow_growth = True
if make_default:
return tf.InteractiveSession(config=config, graph=graph)
else:
return tf.Session(config=config, graph=graph) | [
"def",
"make_session",
"(",
"config",
"=",
"None",
",",
"num_cpu",
"=",
"None",
",",
"make_default",
"=",
"False",
",",
"graph",
"=",
"None",
")",
":",
"if",
"num_cpu",
"is",
"None",
":",
"num_cpu",
"=",
"int",
"(",
"os",
".",
"getenv",
"(",
"'RCALL_NUM_CPU'",
",",
"multiprocessing",
".",
"cpu_count",
"(",
")",
")",
")",
"if",
"config",
"is",
"None",
":",
"config",
"=",
"tf",
".",
"ConfigProto",
"(",
"allow_soft_placement",
"=",
"True",
",",
"inter_op_parallelism_threads",
"=",
"num_cpu",
",",
"intra_op_parallelism_threads",
"=",
"num_cpu",
")",
"config",
".",
"gpu_options",
".",
"allow_growth",
"=",
"True",
"if",
"make_default",
":",
"return",
"tf",
".",
"InteractiveSession",
"(",
"config",
"=",
"config",
",",
"graph",
"=",
"graph",
")",
"else",
":",
"return",
"tf",
".",
"Session",
"(",
"config",
"=",
"config",
",",
"graph",
"=",
"graph",
")"
] | Returns a session that will use <num_cpu> CPU's only | [
"Returns",
"a",
"session",
"that",
"will",
"use",
"<num_cpu",
">",
"CPU",
"s",
"only"
] | 3301089b48c42b87b396e246ea3f56fa4bfc9678 | https://github.com/openai/baselines/blob/3301089b48c42b87b396e246ea3f56fa4bfc9678/baselines/common/tf_util.py#L58-L72 | valid |
openai/baselines | baselines/common/tf_util.py | initialize | def initialize():
"""Initialize all the uninitialized variables in the global scope."""
new_variables = set(tf.global_variables()) - ALREADY_INITIALIZED
get_session().run(tf.variables_initializer(new_variables))
ALREADY_INITIALIZED.update(new_variables) | python | def initialize():
"""Initialize all the uninitialized variables in the global scope."""
new_variables = set(tf.global_variables()) - ALREADY_INITIALIZED
get_session().run(tf.variables_initializer(new_variables))
ALREADY_INITIALIZED.update(new_variables) | [
"def",
"initialize",
"(",
")",
":",
"new_variables",
"=",
"set",
"(",
"tf",
".",
"global_variables",
"(",
")",
")",
"-",
"ALREADY_INITIALIZED",
"get_session",
"(",
")",
".",
"run",
"(",
"tf",
".",
"variables_initializer",
"(",
"new_variables",
")",
")",
"ALREADY_INITIALIZED",
".",
"update",
"(",
"new_variables",
")"
] | Initialize all the uninitialized variables in the global scope. | [
"Initialize",
"all",
"the",
"uninitialized",
"variables",
"in",
"the",
"global",
"scope",
"."
] | 3301089b48c42b87b396e246ea3f56fa4bfc9678 | https://github.com/openai/baselines/blob/3301089b48c42b87b396e246ea3f56fa4bfc9678/baselines/common/tf_util.py#L87-L91 | valid |
openai/baselines | baselines/common/tf_util.py | function | def function(inputs, outputs, updates=None, givens=None):
"""Just like Theano function. Take a bunch of tensorflow placeholders and expressions
computed based on those placeholders and produces f(inputs) -> outputs. Function f takes
values to be fed to the input's placeholders and produces the values of the expressions
in outputs.
Input values can be passed in the same order as inputs or can be provided as kwargs based
on placeholder name (passed to constructor or accessible via placeholder.op.name).
Example:
x = tf.placeholder(tf.int32, (), name="x")
y = tf.placeholder(tf.int32, (), name="y")
z = 3 * x + 2 * y
lin = function([x, y], z, givens={y: 0})
with single_threaded_session():
initialize()
assert lin(2) == 6
assert lin(x=3) == 9
assert lin(2, 2) == 10
assert lin(x=2, y=3) == 12
Parameters
----------
inputs: [tf.placeholder, tf.constant, or object with make_feed_dict method]
list of input arguments
outputs: [tf.Variable] or tf.Variable
list of outputs or a single output to be returned from function. Returned
value will also have the same shape.
updates: [tf.Operation] or tf.Operation
list of update functions or single update function that will be run whenever
the function is called. The return is ignored.
"""
if isinstance(outputs, list):
return _Function(inputs, outputs, updates, givens=givens)
elif isinstance(outputs, (dict, collections.OrderedDict)):
f = _Function(inputs, outputs.values(), updates, givens=givens)
return lambda *args, **kwargs: type(outputs)(zip(outputs.keys(), f(*args, **kwargs)))
else:
f = _Function(inputs, [outputs], updates, givens=givens)
return lambda *args, **kwargs: f(*args, **kwargs)[0] | python | def function(inputs, outputs, updates=None, givens=None):
"""Just like Theano function. Take a bunch of tensorflow placeholders and expressions
computed based on those placeholders and produces f(inputs) -> outputs. Function f takes
values to be fed to the input's placeholders and produces the values of the expressions
in outputs.
Input values can be passed in the same order as inputs or can be provided as kwargs based
on placeholder name (passed to constructor or accessible via placeholder.op.name).
Example:
x = tf.placeholder(tf.int32, (), name="x")
y = tf.placeholder(tf.int32, (), name="y")
z = 3 * x + 2 * y
lin = function([x, y], z, givens={y: 0})
with single_threaded_session():
initialize()
assert lin(2) == 6
assert lin(x=3) == 9
assert lin(2, 2) == 10
assert lin(x=2, y=3) == 12
Parameters
----------
inputs: [tf.placeholder, tf.constant, or object with make_feed_dict method]
list of input arguments
outputs: [tf.Variable] or tf.Variable
list of outputs or a single output to be returned from function. Returned
value will also have the same shape.
updates: [tf.Operation] or tf.Operation
list of update functions or single update function that will be run whenever
the function is called. The return is ignored.
"""
if isinstance(outputs, list):
return _Function(inputs, outputs, updates, givens=givens)
elif isinstance(outputs, (dict, collections.OrderedDict)):
f = _Function(inputs, outputs.values(), updates, givens=givens)
return lambda *args, **kwargs: type(outputs)(zip(outputs.keys(), f(*args, **kwargs)))
else:
f = _Function(inputs, [outputs], updates, givens=givens)
return lambda *args, **kwargs: f(*args, **kwargs)[0] | [
"def",
"function",
"(",
"inputs",
",",
"outputs",
",",
"updates",
"=",
"None",
",",
"givens",
"=",
"None",
")",
":",
"if",
"isinstance",
"(",
"outputs",
",",
"list",
")",
":",
"return",
"_Function",
"(",
"inputs",
",",
"outputs",
",",
"updates",
",",
"givens",
"=",
"givens",
")",
"elif",
"isinstance",
"(",
"outputs",
",",
"(",
"dict",
",",
"collections",
".",
"OrderedDict",
")",
")",
":",
"f",
"=",
"_Function",
"(",
"inputs",
",",
"outputs",
".",
"values",
"(",
")",
",",
"updates",
",",
"givens",
"=",
"givens",
")",
"return",
"lambda",
"*",
"args",
",",
"*",
"*",
"kwargs",
":",
"type",
"(",
"outputs",
")",
"(",
"zip",
"(",
"outputs",
".",
"keys",
"(",
")",
",",
"f",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
")",
")",
"else",
":",
"f",
"=",
"_Function",
"(",
"inputs",
",",
"[",
"outputs",
"]",
",",
"updates",
",",
"givens",
"=",
"givens",
")",
"return",
"lambda",
"*",
"args",
",",
"*",
"*",
"kwargs",
":",
"f",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"[",
"0",
"]"
] | Just like Theano function. Take a bunch of tensorflow placeholders and expressions
computed based on those placeholders and produces f(inputs) -> outputs. Function f takes
values to be fed to the input's placeholders and produces the values of the expressions
in outputs.
Input values can be passed in the same order as inputs or can be provided as kwargs based
on placeholder name (passed to constructor or accessible via placeholder.op.name).
Example:
x = tf.placeholder(tf.int32, (), name="x")
y = tf.placeholder(tf.int32, (), name="y")
z = 3 * x + 2 * y
lin = function([x, y], z, givens={y: 0})
with single_threaded_session():
initialize()
assert lin(2) == 6
assert lin(x=3) == 9
assert lin(2, 2) == 10
assert lin(x=2, y=3) == 12
Parameters
----------
inputs: [tf.placeholder, tf.constant, or object with make_feed_dict method]
list of input arguments
outputs: [tf.Variable] or tf.Variable
list of outputs or a single output to be returned from function. Returned
value will also have the same shape.
updates: [tf.Operation] or tf.Operation
list of update functions or single update function that will be run whenever
the function is called. The return is ignored. | [
"Just",
"like",
"Theano",
"function",
".",
"Take",
"a",
"bunch",
"of",
"tensorflow",
"placeholders",
"and",
"expressions",
"computed",
"based",
"on",
"those",
"placeholders",
"and",
"produces",
"f",
"(",
"inputs",
")",
"-",
">",
"outputs",
".",
"Function",
"f",
"takes",
"values",
"to",
"be",
"fed",
"to",
"the",
"input",
"s",
"placeholders",
"and",
"produces",
"the",
"values",
"of",
"the",
"expressions",
"in",
"outputs",
"."
] | 3301089b48c42b87b396e246ea3f56fa4bfc9678 | https://github.com/openai/baselines/blob/3301089b48c42b87b396e246ea3f56fa4bfc9678/baselines/common/tf_util.py#L137-L179 | valid |
openai/baselines | baselines/common/tf_util.py | adjust_shape | def adjust_shape(placeholder, data):
'''
adjust shape of the data to the shape of the placeholder if possible.
If shape is incompatible, AssertionError is thrown
Parameters:
placeholder tensorflow input placeholder
data input data to be (potentially) reshaped to be fed into placeholder
Returns:
reshaped data
'''
if not isinstance(data, np.ndarray) and not isinstance(data, list):
return data
if isinstance(data, list):
data = np.array(data)
placeholder_shape = [x or -1 for x in placeholder.shape.as_list()]
assert _check_shape(placeholder_shape, data.shape), \
'Shape of data {} is not compatible with shape of the placeholder {}'.format(data.shape, placeholder_shape)
return np.reshape(data, placeholder_shape) | python | def adjust_shape(placeholder, data):
'''
adjust shape of the data to the shape of the placeholder if possible.
If shape is incompatible, AssertionError is thrown
Parameters:
placeholder tensorflow input placeholder
data input data to be (potentially) reshaped to be fed into placeholder
Returns:
reshaped data
'''
if not isinstance(data, np.ndarray) and not isinstance(data, list):
return data
if isinstance(data, list):
data = np.array(data)
placeholder_shape = [x or -1 for x in placeholder.shape.as_list()]
assert _check_shape(placeholder_shape, data.shape), \
'Shape of data {} is not compatible with shape of the placeholder {}'.format(data.shape, placeholder_shape)
return np.reshape(data, placeholder_shape) | [
"def",
"adjust_shape",
"(",
"placeholder",
",",
"data",
")",
":",
"if",
"not",
"isinstance",
"(",
"data",
",",
"np",
".",
"ndarray",
")",
"and",
"not",
"isinstance",
"(",
"data",
",",
"list",
")",
":",
"return",
"data",
"if",
"isinstance",
"(",
"data",
",",
"list",
")",
":",
"data",
"=",
"np",
".",
"array",
"(",
"data",
")",
"placeholder_shape",
"=",
"[",
"x",
"or",
"-",
"1",
"for",
"x",
"in",
"placeholder",
".",
"shape",
".",
"as_list",
"(",
")",
"]",
"assert",
"_check_shape",
"(",
"placeholder_shape",
",",
"data",
".",
"shape",
")",
",",
"'Shape of data {} is not compatible with shape of the placeholder {}'",
".",
"format",
"(",
"data",
".",
"shape",
",",
"placeholder_shape",
")",
"return",
"np",
".",
"reshape",
"(",
"data",
",",
"placeholder_shape",
")"
] | adjust shape of the data to the shape of the placeholder if possible.
If shape is incompatible, AssertionError is thrown
Parameters:
placeholder tensorflow input placeholder
data input data to be (potentially) reshaped to be fed into placeholder
Returns:
reshaped data | [
"adjust",
"shape",
"of",
"the",
"data",
"to",
"the",
"shape",
"of",
"the",
"placeholder",
"if",
"possible",
".",
"If",
"shape",
"is",
"incompatible",
"AssertionError",
"is",
"thrown"
] | 3301089b48c42b87b396e246ea3f56fa4bfc9678 | https://github.com/openai/baselines/blob/3301089b48c42b87b396e246ea3f56fa4bfc9678/baselines/common/tf_util.py#L377-L401 | valid |
openai/baselines | baselines/common/tf_util.py | _check_shape | def _check_shape(placeholder_shape, data_shape):
''' check if two shapes are compatible (i.e. differ only by dimensions of size 1, or by the batch dimension)'''
return True
squeezed_placeholder_shape = _squeeze_shape(placeholder_shape)
squeezed_data_shape = _squeeze_shape(data_shape)
for i, s_data in enumerate(squeezed_data_shape):
s_placeholder = squeezed_placeholder_shape[i]
if s_placeholder != -1 and s_data != s_placeholder:
return False
return True | python | def _check_shape(placeholder_shape, data_shape):
''' check if two shapes are compatible (i.e. differ only by dimensions of size 1, or by the batch dimension)'''
return True
squeezed_placeholder_shape = _squeeze_shape(placeholder_shape)
squeezed_data_shape = _squeeze_shape(data_shape)
for i, s_data in enumerate(squeezed_data_shape):
s_placeholder = squeezed_placeholder_shape[i]
if s_placeholder != -1 and s_data != s_placeholder:
return False
return True | [
"def",
"_check_shape",
"(",
"placeholder_shape",
",",
"data_shape",
")",
":",
"return",
"True",
"squeezed_placeholder_shape",
"=",
"_squeeze_shape",
"(",
"placeholder_shape",
")",
"squeezed_data_shape",
"=",
"_squeeze_shape",
"(",
"data_shape",
")",
"for",
"i",
",",
"s_data",
"in",
"enumerate",
"(",
"squeezed_data_shape",
")",
":",
"s_placeholder",
"=",
"squeezed_placeholder_shape",
"[",
"i",
"]",
"if",
"s_placeholder",
"!=",
"-",
"1",
"and",
"s_data",
"!=",
"s_placeholder",
":",
"return",
"False",
"return",
"True"
] | check if two shapes are compatible (i.e. differ only by dimensions of size 1, or by the batch dimension) | [
"check",
"if",
"two",
"shapes",
"are",
"compatible",
"(",
"i",
".",
"e",
".",
"differ",
"only",
"by",
"dimensions",
"of",
"size",
"1",
"or",
"by",
"the",
"batch",
"dimension",
")"
] | 3301089b48c42b87b396e246ea3f56fa4bfc9678 | https://github.com/openai/baselines/blob/3301089b48c42b87b396e246ea3f56fa4bfc9678/baselines/common/tf_util.py#L404-L416 | valid |
openai/baselines | baselines/logger.py | profile | def profile(n):
"""
Usage:
@profile("my_func")
def my_func(): code
"""
def decorator_with_name(func):
def func_wrapper(*args, **kwargs):
with profile_kv(n):
return func(*args, **kwargs)
return func_wrapper
return decorator_with_name | python | def profile(n):
"""
Usage:
@profile("my_func")
def my_func(): code
"""
def decorator_with_name(func):
def func_wrapper(*args, **kwargs):
with profile_kv(n):
return func(*args, **kwargs)
return func_wrapper
return decorator_with_name | [
"def",
"profile",
"(",
"n",
")",
":",
"def",
"decorator_with_name",
"(",
"func",
")",
":",
"def",
"func_wrapper",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"with",
"profile_kv",
"(",
"n",
")",
":",
"return",
"func",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"return",
"func_wrapper",
"return",
"decorator_with_name"
] | Usage:
@profile("my_func")
def my_func(): code | [
"Usage",
":"
] | 3301089b48c42b87b396e246ea3f56fa4bfc9678 | https://github.com/openai/baselines/blob/3301089b48c42b87b396e246ea3f56fa4bfc9678/baselines/logger.py#L272-L283 | valid |
openai/baselines | baselines/common/atari_wrappers.py | wrap_deepmind | def wrap_deepmind(env, episode_life=True, clip_rewards=True, frame_stack=False, scale=False):
"""Configure environment for DeepMind-style Atari.
"""
if episode_life:
env = EpisodicLifeEnv(env)
if 'FIRE' in env.unwrapped.get_action_meanings():
env = FireResetEnv(env)
env = WarpFrame(env)
if scale:
env = ScaledFloatFrame(env)
if clip_rewards:
env = ClipRewardEnv(env)
if frame_stack:
env = FrameStack(env, 4)
return env | python | def wrap_deepmind(env, episode_life=True, clip_rewards=True, frame_stack=False, scale=False):
"""Configure environment for DeepMind-style Atari.
"""
if episode_life:
env = EpisodicLifeEnv(env)
if 'FIRE' in env.unwrapped.get_action_meanings():
env = FireResetEnv(env)
env = WarpFrame(env)
if scale:
env = ScaledFloatFrame(env)
if clip_rewards:
env = ClipRewardEnv(env)
if frame_stack:
env = FrameStack(env, 4)
return env | [
"def",
"wrap_deepmind",
"(",
"env",
",",
"episode_life",
"=",
"True",
",",
"clip_rewards",
"=",
"True",
",",
"frame_stack",
"=",
"False",
",",
"scale",
"=",
"False",
")",
":",
"if",
"episode_life",
":",
"env",
"=",
"EpisodicLifeEnv",
"(",
"env",
")",
"if",
"'FIRE'",
"in",
"env",
".",
"unwrapped",
".",
"get_action_meanings",
"(",
")",
":",
"env",
"=",
"FireResetEnv",
"(",
"env",
")",
"env",
"=",
"WarpFrame",
"(",
"env",
")",
"if",
"scale",
":",
"env",
"=",
"ScaledFloatFrame",
"(",
"env",
")",
"if",
"clip_rewards",
":",
"env",
"=",
"ClipRewardEnv",
"(",
"env",
")",
"if",
"frame_stack",
":",
"env",
"=",
"FrameStack",
"(",
"env",
",",
"4",
")",
"return",
"env"
] | Configure environment for DeepMind-style Atari. | [
"Configure",
"environment",
"for",
"DeepMind",
"-",
"style",
"Atari",
"."
] | 3301089b48c42b87b396e246ea3f56fa4bfc9678 | https://github.com/openai/baselines/blob/3301089b48c42b87b396e246ea3f56fa4bfc9678/baselines/common/atari_wrappers.py#L235-L249 | valid |
openai/baselines | baselines/common/atari_wrappers.py | EpisodicLifeEnv.reset | def reset(self, **kwargs):
"""Reset only when lives are exhausted.
This way all states are still reachable even though lives are episodic,
and the learner need not know about any of this behind-the-scenes.
"""
if self.was_real_done:
obs = self.env.reset(**kwargs)
else:
# no-op step to advance from terminal/lost life state
obs, _, _, _ = self.env.step(0)
self.lives = self.env.unwrapped.ale.lives()
return obs | python | def reset(self, **kwargs):
"""Reset only when lives are exhausted.
This way all states are still reachable even though lives are episodic,
and the learner need not know about any of this behind-the-scenes.
"""
if self.was_real_done:
obs = self.env.reset(**kwargs)
else:
# no-op step to advance from terminal/lost life state
obs, _, _, _ = self.env.step(0)
self.lives = self.env.unwrapped.ale.lives()
return obs | [
"def",
"reset",
"(",
"self",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"self",
".",
"was_real_done",
":",
"obs",
"=",
"self",
".",
"env",
".",
"reset",
"(",
"*",
"*",
"kwargs",
")",
"else",
":",
"# no-op step to advance from terminal/lost life state",
"obs",
",",
"_",
",",
"_",
",",
"_",
"=",
"self",
".",
"env",
".",
"step",
"(",
"0",
")",
"self",
".",
"lives",
"=",
"self",
".",
"env",
".",
"unwrapped",
".",
"ale",
".",
"lives",
"(",
")",
"return",
"obs"
] | Reset only when lives are exhausted.
This way all states are still reachable even though lives are episodic,
and the learner need not know about any of this behind-the-scenes. | [
"Reset",
"only",
"when",
"lives",
"are",
"exhausted",
".",
"This",
"way",
"all",
"states",
"are",
"still",
"reachable",
"even",
"though",
"lives",
"are",
"episodic",
"and",
"the",
"learner",
"need",
"not",
"know",
"about",
"any",
"of",
"this",
"behind",
"-",
"the",
"-",
"scenes",
"."
] | 3301089b48c42b87b396e246ea3f56fa4bfc9678 | https://github.com/openai/baselines/blob/3301089b48c42b87b396e246ea3f56fa4bfc9678/baselines/common/atari_wrappers.py#L84-L95 | valid |
openai/baselines | baselines/common/mpi_util.py | sync_from_root | def sync_from_root(sess, variables, comm=None):
"""
Send the root node's parameters to every worker.
Arguments:
sess: the TensorFlow session.
variables: all parameter variables including optimizer's
"""
if comm is None: comm = MPI.COMM_WORLD
import tensorflow as tf
values = comm.bcast(sess.run(variables))
sess.run([tf.assign(var, val)
for (var, val) in zip(variables, values)]) | python | def sync_from_root(sess, variables, comm=None):
"""
Send the root node's parameters to every worker.
Arguments:
sess: the TensorFlow session.
variables: all parameter variables including optimizer's
"""
if comm is None: comm = MPI.COMM_WORLD
import tensorflow as tf
values = comm.bcast(sess.run(variables))
sess.run([tf.assign(var, val)
for (var, val) in zip(variables, values)]) | [
"def",
"sync_from_root",
"(",
"sess",
",",
"variables",
",",
"comm",
"=",
"None",
")",
":",
"if",
"comm",
"is",
"None",
":",
"comm",
"=",
"MPI",
".",
"COMM_WORLD",
"import",
"tensorflow",
"as",
"tf",
"values",
"=",
"comm",
".",
"bcast",
"(",
"sess",
".",
"run",
"(",
"variables",
")",
")",
"sess",
".",
"run",
"(",
"[",
"tf",
".",
"assign",
"(",
"var",
",",
"val",
")",
"for",
"(",
"var",
",",
"val",
")",
"in",
"zip",
"(",
"variables",
",",
"values",
")",
"]",
")"
] | Send the root node's parameters to every worker.
Arguments:
sess: the TensorFlow session.
variables: all parameter variables including optimizer's | [
"Send",
"the",
"root",
"node",
"s",
"parameters",
"to",
"every",
"worker",
".",
"Arguments",
":",
"sess",
":",
"the",
"TensorFlow",
"session",
".",
"variables",
":",
"all",
"parameter",
"variables",
"including",
"optimizer",
"s"
] | 3301089b48c42b87b396e246ea3f56fa4bfc9678 | https://github.com/openai/baselines/blob/3301089b48c42b87b396e246ea3f56fa4bfc9678/baselines/common/mpi_util.py#L15-L26 | valid |
openai/baselines | baselines/common/mpi_util.py | gpu_count | def gpu_count():
"""
Count the GPUs on this machine.
"""
if shutil.which('nvidia-smi') is None:
return 0
output = subprocess.check_output(['nvidia-smi', '--query-gpu=gpu_name', '--format=csv'])
return max(0, len(output.split(b'\n')) - 2) | python | def gpu_count():
"""
Count the GPUs on this machine.
"""
if shutil.which('nvidia-smi') is None:
return 0
output = subprocess.check_output(['nvidia-smi', '--query-gpu=gpu_name', '--format=csv'])
return max(0, len(output.split(b'\n')) - 2) | [
"def",
"gpu_count",
"(",
")",
":",
"if",
"shutil",
".",
"which",
"(",
"'nvidia-smi'",
")",
"is",
"None",
":",
"return",
"0",
"output",
"=",
"subprocess",
".",
"check_output",
"(",
"[",
"'nvidia-smi'",
",",
"'--query-gpu=gpu_name'",
",",
"'--format=csv'",
"]",
")",
"return",
"max",
"(",
"0",
",",
"len",
"(",
"output",
".",
"split",
"(",
"b'\\n'",
")",
")",
"-",
"2",
")"
] | Count the GPUs on this machine. | [
"Count",
"the",
"GPUs",
"on",
"this",
"machine",
"."
] | 3301089b48c42b87b396e246ea3f56fa4bfc9678 | https://github.com/openai/baselines/blob/3301089b48c42b87b396e246ea3f56fa4bfc9678/baselines/common/mpi_util.py#L28-L35 | valid |
openai/baselines | baselines/common/mpi_util.py | setup_mpi_gpus | def setup_mpi_gpus():
"""
Set CUDA_VISIBLE_DEVICES to MPI rank if not already set
"""
if 'CUDA_VISIBLE_DEVICES' not in os.environ:
if sys.platform == 'darwin': # This Assumes if you're on OSX you're just
ids = [] # doing a smoke test and don't want GPUs
else:
lrank, _lsize = get_local_rank_size(MPI.COMM_WORLD)
ids = [lrank]
os.environ["CUDA_VISIBLE_DEVICES"] = ",".join(map(str, ids)) | python | def setup_mpi_gpus():
"""
Set CUDA_VISIBLE_DEVICES to MPI rank if not already set
"""
if 'CUDA_VISIBLE_DEVICES' not in os.environ:
if sys.platform == 'darwin': # This Assumes if you're on OSX you're just
ids = [] # doing a smoke test and don't want GPUs
else:
lrank, _lsize = get_local_rank_size(MPI.COMM_WORLD)
ids = [lrank]
os.environ["CUDA_VISIBLE_DEVICES"] = ",".join(map(str, ids)) | [
"def",
"setup_mpi_gpus",
"(",
")",
":",
"if",
"'CUDA_VISIBLE_DEVICES'",
"not",
"in",
"os",
".",
"environ",
":",
"if",
"sys",
".",
"platform",
"==",
"'darwin'",
":",
"# This Assumes if you're on OSX you're just",
"ids",
"=",
"[",
"]",
"# doing a smoke test and don't want GPUs",
"else",
":",
"lrank",
",",
"_lsize",
"=",
"get_local_rank_size",
"(",
"MPI",
".",
"COMM_WORLD",
")",
"ids",
"=",
"[",
"lrank",
"]",
"os",
".",
"environ",
"[",
"\"CUDA_VISIBLE_DEVICES\"",
"]",
"=",
"\",\"",
".",
"join",
"(",
"map",
"(",
"str",
",",
"ids",
")",
")"
] | Set CUDA_VISIBLE_DEVICES to MPI rank if not already set | [
"Set",
"CUDA_VISIBLE_DEVICES",
"to",
"MPI",
"rank",
"if",
"not",
"already",
"set"
] | 3301089b48c42b87b396e246ea3f56fa4bfc9678 | https://github.com/openai/baselines/blob/3301089b48c42b87b396e246ea3f56fa4bfc9678/baselines/common/mpi_util.py#L37-L47 | valid |
openai/baselines | baselines/common/mpi_util.py | get_local_rank_size | def get_local_rank_size(comm):
"""
Returns the rank of each process on its machine
The processes on a given machine will be assigned ranks
0, 1, 2, ..., N-1,
where N is the number of processes on this machine.
Useful if you want to assign one gpu per machine
"""
this_node = platform.node()
ranks_nodes = comm.allgather((comm.Get_rank(), this_node))
node2rankssofar = defaultdict(int)
local_rank = None
for (rank, node) in ranks_nodes:
if rank == comm.Get_rank():
local_rank = node2rankssofar[node]
node2rankssofar[node] += 1
assert local_rank is not None
return local_rank, node2rankssofar[this_node] | python | def get_local_rank_size(comm):
"""
Returns the rank of each process on its machine
The processes on a given machine will be assigned ranks
0, 1, 2, ..., N-1,
where N is the number of processes on this machine.
Useful if you want to assign one gpu per machine
"""
this_node = platform.node()
ranks_nodes = comm.allgather((comm.Get_rank(), this_node))
node2rankssofar = defaultdict(int)
local_rank = None
for (rank, node) in ranks_nodes:
if rank == comm.Get_rank():
local_rank = node2rankssofar[node]
node2rankssofar[node] += 1
assert local_rank is not None
return local_rank, node2rankssofar[this_node] | [
"def",
"get_local_rank_size",
"(",
"comm",
")",
":",
"this_node",
"=",
"platform",
".",
"node",
"(",
")",
"ranks_nodes",
"=",
"comm",
".",
"allgather",
"(",
"(",
"comm",
".",
"Get_rank",
"(",
")",
",",
"this_node",
")",
")",
"node2rankssofar",
"=",
"defaultdict",
"(",
"int",
")",
"local_rank",
"=",
"None",
"for",
"(",
"rank",
",",
"node",
")",
"in",
"ranks_nodes",
":",
"if",
"rank",
"==",
"comm",
".",
"Get_rank",
"(",
")",
":",
"local_rank",
"=",
"node2rankssofar",
"[",
"node",
"]",
"node2rankssofar",
"[",
"node",
"]",
"+=",
"1",
"assert",
"local_rank",
"is",
"not",
"None",
"return",
"local_rank",
",",
"node2rankssofar",
"[",
"this_node",
"]"
] | Returns the rank of each process on its machine
The processes on a given machine will be assigned ranks
0, 1, 2, ..., N-1,
where N is the number of processes on this machine.
Useful if you want to assign one gpu per machine | [
"Returns",
"the",
"rank",
"of",
"each",
"process",
"on",
"its",
"machine",
"The",
"processes",
"on",
"a",
"given",
"machine",
"will",
"be",
"assigned",
"ranks",
"0",
"1",
"2",
"...",
"N",
"-",
"1",
"where",
"N",
"is",
"the",
"number",
"of",
"processes",
"on",
"this",
"machine",
"."
] | 3301089b48c42b87b396e246ea3f56fa4bfc9678 | https://github.com/openai/baselines/blob/3301089b48c42b87b396e246ea3f56fa4bfc9678/baselines/common/mpi_util.py#L49-L67 | valid |
openai/baselines | baselines/common/mpi_util.py | share_file | def share_file(comm, path):
"""
Copies the file from rank 0 to all other ranks
Puts it in the same place on all machines
"""
localrank, _ = get_local_rank_size(comm)
if comm.Get_rank() == 0:
with open(path, 'rb') as fh:
data = fh.read()
comm.bcast(data)
else:
data = comm.bcast(None)
if localrank == 0:
os.makedirs(os.path.dirname(path), exist_ok=True)
with open(path, 'wb') as fh:
fh.write(data)
comm.Barrier() | python | def share_file(comm, path):
"""
Copies the file from rank 0 to all other ranks
Puts it in the same place on all machines
"""
localrank, _ = get_local_rank_size(comm)
if comm.Get_rank() == 0:
with open(path, 'rb') as fh:
data = fh.read()
comm.bcast(data)
else:
data = comm.bcast(None)
if localrank == 0:
os.makedirs(os.path.dirname(path), exist_ok=True)
with open(path, 'wb') as fh:
fh.write(data)
comm.Barrier() | [
"def",
"share_file",
"(",
"comm",
",",
"path",
")",
":",
"localrank",
",",
"_",
"=",
"get_local_rank_size",
"(",
"comm",
")",
"if",
"comm",
".",
"Get_rank",
"(",
")",
"==",
"0",
":",
"with",
"open",
"(",
"path",
",",
"'rb'",
")",
"as",
"fh",
":",
"data",
"=",
"fh",
".",
"read",
"(",
")",
"comm",
".",
"bcast",
"(",
"data",
")",
"else",
":",
"data",
"=",
"comm",
".",
"bcast",
"(",
"None",
")",
"if",
"localrank",
"==",
"0",
":",
"os",
".",
"makedirs",
"(",
"os",
".",
"path",
".",
"dirname",
"(",
"path",
")",
",",
"exist_ok",
"=",
"True",
")",
"with",
"open",
"(",
"path",
",",
"'wb'",
")",
"as",
"fh",
":",
"fh",
".",
"write",
"(",
"data",
")",
"comm",
".",
"Barrier",
"(",
")"
] | Copies the file from rank 0 to all other ranks
Puts it in the same place on all machines | [
"Copies",
"the",
"file",
"from",
"rank",
"0",
"to",
"all",
"other",
"ranks",
"Puts",
"it",
"in",
"the",
"same",
"place",
"on",
"all",
"machines"
] | 3301089b48c42b87b396e246ea3f56fa4bfc9678 | https://github.com/openai/baselines/blob/3301089b48c42b87b396e246ea3f56fa4bfc9678/baselines/common/mpi_util.py#L69-L85 | valid |
openai/baselines | baselines/common/mpi_util.py | dict_gather | def dict_gather(comm, d, op='mean', assert_all_have_data=True):
"""
Perform a reduction operation over dicts
"""
if comm is None: return d
alldicts = comm.allgather(d)
size = comm.size
k2li = defaultdict(list)
for d in alldicts:
for (k,v) in d.items():
k2li[k].append(v)
result = {}
for (k,li) in k2li.items():
if assert_all_have_data:
assert len(li)==size, "only %i out of %i MPI workers have sent '%s'" % (len(li), size, k)
if op=='mean':
result[k] = np.mean(li, axis=0)
elif op=='sum':
result[k] = np.sum(li, axis=0)
else:
assert 0, op
return result | python | def dict_gather(comm, d, op='mean', assert_all_have_data=True):
"""
Perform a reduction operation over dicts
"""
if comm is None: return d
alldicts = comm.allgather(d)
size = comm.size
k2li = defaultdict(list)
for d in alldicts:
for (k,v) in d.items():
k2li[k].append(v)
result = {}
for (k,li) in k2li.items():
if assert_all_have_data:
assert len(li)==size, "only %i out of %i MPI workers have sent '%s'" % (len(li), size, k)
if op=='mean':
result[k] = np.mean(li, axis=0)
elif op=='sum':
result[k] = np.sum(li, axis=0)
else:
assert 0, op
return result | [
"def",
"dict_gather",
"(",
"comm",
",",
"d",
",",
"op",
"=",
"'mean'",
",",
"assert_all_have_data",
"=",
"True",
")",
":",
"if",
"comm",
"is",
"None",
":",
"return",
"d",
"alldicts",
"=",
"comm",
".",
"allgather",
"(",
"d",
")",
"size",
"=",
"comm",
".",
"size",
"k2li",
"=",
"defaultdict",
"(",
"list",
")",
"for",
"d",
"in",
"alldicts",
":",
"for",
"(",
"k",
",",
"v",
")",
"in",
"d",
".",
"items",
"(",
")",
":",
"k2li",
"[",
"k",
"]",
".",
"append",
"(",
"v",
")",
"result",
"=",
"{",
"}",
"for",
"(",
"k",
",",
"li",
")",
"in",
"k2li",
".",
"items",
"(",
")",
":",
"if",
"assert_all_have_data",
":",
"assert",
"len",
"(",
"li",
")",
"==",
"size",
",",
"\"only %i out of %i MPI workers have sent '%s'\"",
"%",
"(",
"len",
"(",
"li",
")",
",",
"size",
",",
"k",
")",
"if",
"op",
"==",
"'mean'",
":",
"result",
"[",
"k",
"]",
"=",
"np",
".",
"mean",
"(",
"li",
",",
"axis",
"=",
"0",
")",
"elif",
"op",
"==",
"'sum'",
":",
"result",
"[",
"k",
"]",
"=",
"np",
".",
"sum",
"(",
"li",
",",
"axis",
"=",
"0",
")",
"else",
":",
"assert",
"0",
",",
"op",
"return",
"result"
] | Perform a reduction operation over dicts | [
"Perform",
"a",
"reduction",
"operation",
"over",
"dicts"
] | 3301089b48c42b87b396e246ea3f56fa4bfc9678 | https://github.com/openai/baselines/blob/3301089b48c42b87b396e246ea3f56fa4bfc9678/baselines/common/mpi_util.py#L87-L108 | valid |
openai/baselines | baselines/common/mpi_util.py | mpi_weighted_mean | def mpi_weighted_mean(comm, local_name2valcount):
"""
Perform a weighted average over dicts that are each on a different node
Input: local_name2valcount: dict mapping key -> (value, count)
Returns: key -> mean
"""
all_name2valcount = comm.gather(local_name2valcount)
if comm.rank == 0:
name2sum = defaultdict(float)
name2count = defaultdict(float)
for n2vc in all_name2valcount:
for (name, (val, count)) in n2vc.items():
try:
val = float(val)
except ValueError:
if comm.rank == 0:
warnings.warn('WARNING: tried to compute mean on non-float {}={}'.format(name, val))
else:
name2sum[name] += val * count
name2count[name] += count
return {name : name2sum[name] / name2count[name] for name in name2sum}
else:
return {} | python | def mpi_weighted_mean(comm, local_name2valcount):
"""
Perform a weighted average over dicts that are each on a different node
Input: local_name2valcount: dict mapping key -> (value, count)
Returns: key -> mean
"""
all_name2valcount = comm.gather(local_name2valcount)
if comm.rank == 0:
name2sum = defaultdict(float)
name2count = defaultdict(float)
for n2vc in all_name2valcount:
for (name, (val, count)) in n2vc.items():
try:
val = float(val)
except ValueError:
if comm.rank == 0:
warnings.warn('WARNING: tried to compute mean on non-float {}={}'.format(name, val))
else:
name2sum[name] += val * count
name2count[name] += count
return {name : name2sum[name] / name2count[name] for name in name2sum}
else:
return {} | [
"def",
"mpi_weighted_mean",
"(",
"comm",
",",
"local_name2valcount",
")",
":",
"all_name2valcount",
"=",
"comm",
".",
"gather",
"(",
"local_name2valcount",
")",
"if",
"comm",
".",
"rank",
"==",
"0",
":",
"name2sum",
"=",
"defaultdict",
"(",
"float",
")",
"name2count",
"=",
"defaultdict",
"(",
"float",
")",
"for",
"n2vc",
"in",
"all_name2valcount",
":",
"for",
"(",
"name",
",",
"(",
"val",
",",
"count",
")",
")",
"in",
"n2vc",
".",
"items",
"(",
")",
":",
"try",
":",
"val",
"=",
"float",
"(",
"val",
")",
"except",
"ValueError",
":",
"if",
"comm",
".",
"rank",
"==",
"0",
":",
"warnings",
".",
"warn",
"(",
"'WARNING: tried to compute mean on non-float {}={}'",
".",
"format",
"(",
"name",
",",
"val",
")",
")",
"else",
":",
"name2sum",
"[",
"name",
"]",
"+=",
"val",
"*",
"count",
"name2count",
"[",
"name",
"]",
"+=",
"count",
"return",
"{",
"name",
":",
"name2sum",
"[",
"name",
"]",
"/",
"name2count",
"[",
"name",
"]",
"for",
"name",
"in",
"name2sum",
"}",
"else",
":",
"return",
"{",
"}"
] | Perform a weighted average over dicts that are each on a different node
Input: local_name2valcount: dict mapping key -> (value, count)
Returns: key -> mean | [
"Perform",
"a",
"weighted",
"average",
"over",
"dicts",
"that",
"are",
"each",
"on",
"a",
"different",
"node",
"Input",
":",
"local_name2valcount",
":",
"dict",
"mapping",
"key",
"-",
">",
"(",
"value",
"count",
")",
"Returns",
":",
"key",
"-",
">",
"mean"
] | 3301089b48c42b87b396e246ea3f56fa4bfc9678 | https://github.com/openai/baselines/blob/3301089b48c42b87b396e246ea3f56fa4bfc9678/baselines/common/mpi_util.py#L110-L132 | valid |
openai/baselines | baselines/trpo_mpi/trpo_mpi.py | learn | def learn(*,
network,
env,
total_timesteps,
timesteps_per_batch=1024, # what to train on
max_kl=0.001,
cg_iters=10,
gamma=0.99,
lam=1.0, # advantage estimation
seed=None,
ent_coef=0.0,
cg_damping=1e-2,
vf_stepsize=3e-4,
vf_iters =3,
max_episodes=0, max_iters=0, # time constraint
callback=None,
load_path=None,
**network_kwargs
):
'''
learn a policy function with TRPO algorithm
Parameters:
----------
network neural network to learn. Can be either string ('mlp', 'cnn', 'lstm', 'lnlstm' for basic types)
or function that takes input placeholder and returns tuple (output, None) for feedforward nets
or (output, (state_placeholder, state_output, mask_placeholder)) for recurrent nets
env environment (one of the gym environments or wrapped via baselines.common.vec_env.VecEnv-type class
timesteps_per_batch timesteps per gradient estimation batch
max_kl max KL divergence between old policy and new policy ( KL(pi_old || pi) )
ent_coef coefficient of policy entropy term in the optimization objective
cg_iters number of iterations of conjugate gradient algorithm
cg_damping conjugate gradient damping
vf_stepsize learning rate for adam optimizer used to optimie value function loss
vf_iters number of iterations of value function optimization iterations per each policy optimization step
total_timesteps max number of timesteps
max_episodes max number of episodes
max_iters maximum number of policy optimization iterations
callback function to be called with (locals(), globals()) each policy optimization step
load_path str, path to load the model from (default: None, i.e. no model is loaded)
**network_kwargs keyword arguments to the policy / network builder. See baselines.common/policies.py/build_policy and arguments to a particular type of network
Returns:
-------
learnt model
'''
if MPI is not None:
nworkers = MPI.COMM_WORLD.Get_size()
rank = MPI.COMM_WORLD.Get_rank()
else:
nworkers = 1
rank = 0
cpus_per_worker = 1
U.get_session(config=tf.ConfigProto(
allow_soft_placement=True,
inter_op_parallelism_threads=cpus_per_worker,
intra_op_parallelism_threads=cpus_per_worker
))
policy = build_policy(env, network, value_network='copy', **network_kwargs)
set_global_seeds(seed)
np.set_printoptions(precision=3)
# Setup losses and stuff
# ----------------------------------------
ob_space = env.observation_space
ac_space = env.action_space
ob = observation_placeholder(ob_space)
with tf.variable_scope("pi"):
pi = policy(observ_placeholder=ob)
with tf.variable_scope("oldpi"):
oldpi = policy(observ_placeholder=ob)
atarg = tf.placeholder(dtype=tf.float32, shape=[None]) # Target advantage function (if applicable)
ret = tf.placeholder(dtype=tf.float32, shape=[None]) # Empirical return
ac = pi.pdtype.sample_placeholder([None])
kloldnew = oldpi.pd.kl(pi.pd)
ent = pi.pd.entropy()
meankl = tf.reduce_mean(kloldnew)
meanent = tf.reduce_mean(ent)
entbonus = ent_coef * meanent
vferr = tf.reduce_mean(tf.square(pi.vf - ret))
ratio = tf.exp(pi.pd.logp(ac) - oldpi.pd.logp(ac)) # advantage * pnew / pold
surrgain = tf.reduce_mean(ratio * atarg)
optimgain = surrgain + entbonus
losses = [optimgain, meankl, entbonus, surrgain, meanent]
loss_names = ["optimgain", "meankl", "entloss", "surrgain", "entropy"]
dist = meankl
all_var_list = get_trainable_variables("pi")
# var_list = [v for v in all_var_list if v.name.split("/")[1].startswith("pol")]
# vf_var_list = [v for v in all_var_list if v.name.split("/")[1].startswith("vf")]
var_list = get_pi_trainable_variables("pi")
vf_var_list = get_vf_trainable_variables("pi")
vfadam = MpiAdam(vf_var_list)
get_flat = U.GetFlat(var_list)
set_from_flat = U.SetFromFlat(var_list)
klgrads = tf.gradients(dist, var_list)
flat_tangent = tf.placeholder(dtype=tf.float32, shape=[None], name="flat_tan")
shapes = [var.get_shape().as_list() for var in var_list]
start = 0
tangents = []
for shape in shapes:
sz = U.intprod(shape)
tangents.append(tf.reshape(flat_tangent[start:start+sz], shape))
start += sz
gvp = tf.add_n([tf.reduce_sum(g*tangent) for (g, tangent) in zipsame(klgrads, tangents)]) #pylint: disable=E1111
fvp = U.flatgrad(gvp, var_list)
assign_old_eq_new = U.function([],[], updates=[tf.assign(oldv, newv)
for (oldv, newv) in zipsame(get_variables("oldpi"), get_variables("pi"))])
compute_losses = U.function([ob, ac, atarg], losses)
compute_lossandgrad = U.function([ob, ac, atarg], losses + [U.flatgrad(optimgain, var_list)])
compute_fvp = U.function([flat_tangent, ob, ac, atarg], fvp)
compute_vflossandgrad = U.function([ob, ret], U.flatgrad(vferr, vf_var_list))
@contextmanager
def timed(msg):
if rank == 0:
print(colorize(msg, color='magenta'))
tstart = time.time()
yield
print(colorize("done in %.3f seconds"%(time.time() - tstart), color='magenta'))
else:
yield
def allmean(x):
assert isinstance(x, np.ndarray)
if MPI is not None:
out = np.empty_like(x)
MPI.COMM_WORLD.Allreduce(x, out, op=MPI.SUM)
out /= nworkers
else:
out = np.copy(x)
return out
U.initialize()
if load_path is not None:
pi.load(load_path)
th_init = get_flat()
if MPI is not None:
MPI.COMM_WORLD.Bcast(th_init, root=0)
set_from_flat(th_init)
vfadam.sync()
print("Init param sum", th_init.sum(), flush=True)
# Prepare for rollouts
# ----------------------------------------
seg_gen = traj_segment_generator(pi, env, timesteps_per_batch, stochastic=True)
episodes_so_far = 0
timesteps_so_far = 0
iters_so_far = 0
tstart = time.time()
lenbuffer = deque(maxlen=40) # rolling buffer for episode lengths
rewbuffer = deque(maxlen=40) # rolling buffer for episode rewards
if sum([max_iters>0, total_timesteps>0, max_episodes>0])==0:
# noththing to be done
return pi
assert sum([max_iters>0, total_timesteps>0, max_episodes>0]) < 2, \
'out of max_iters, total_timesteps, and max_episodes only one should be specified'
while True:
if callback: callback(locals(), globals())
if total_timesteps and timesteps_so_far >= total_timesteps:
break
elif max_episodes and episodes_so_far >= max_episodes:
break
elif max_iters and iters_so_far >= max_iters:
break
logger.log("********** Iteration %i ************"%iters_so_far)
with timed("sampling"):
seg = seg_gen.__next__()
add_vtarg_and_adv(seg, gamma, lam)
# ob, ac, atarg, ret, td1ret = map(np.concatenate, (obs, acs, atargs, rets, td1rets))
ob, ac, atarg, tdlamret = seg["ob"], seg["ac"], seg["adv"], seg["tdlamret"]
vpredbefore = seg["vpred"] # predicted value function before udpate
atarg = (atarg - atarg.mean()) / atarg.std() # standardized advantage function estimate
if hasattr(pi, "ret_rms"): pi.ret_rms.update(tdlamret)
if hasattr(pi, "ob_rms"): pi.ob_rms.update(ob) # update running mean/std for policy
args = seg["ob"], seg["ac"], atarg
fvpargs = [arr[::5] for arr in args]
def fisher_vector_product(p):
return allmean(compute_fvp(p, *fvpargs)) + cg_damping * p
assign_old_eq_new() # set old parameter values to new parameter values
with timed("computegrad"):
*lossbefore, g = compute_lossandgrad(*args)
lossbefore = allmean(np.array(lossbefore))
g = allmean(g)
if np.allclose(g, 0):
logger.log("Got zero gradient. not updating")
else:
with timed("cg"):
stepdir = cg(fisher_vector_product, g, cg_iters=cg_iters, verbose=rank==0)
assert np.isfinite(stepdir).all()
shs = .5*stepdir.dot(fisher_vector_product(stepdir))
lm = np.sqrt(shs / max_kl)
# logger.log("lagrange multiplier:", lm, "gnorm:", np.linalg.norm(g))
fullstep = stepdir / lm
expectedimprove = g.dot(fullstep)
surrbefore = lossbefore[0]
stepsize = 1.0
thbefore = get_flat()
for _ in range(10):
thnew = thbefore + fullstep * stepsize
set_from_flat(thnew)
meanlosses = surr, kl, *_ = allmean(np.array(compute_losses(*args)))
improve = surr - surrbefore
logger.log("Expected: %.3f Actual: %.3f"%(expectedimprove, improve))
if not np.isfinite(meanlosses).all():
logger.log("Got non-finite value of losses -- bad!")
elif kl > max_kl * 1.5:
logger.log("violated KL constraint. shrinking step.")
elif improve < 0:
logger.log("surrogate didn't improve. shrinking step.")
else:
logger.log("Stepsize OK!")
break
stepsize *= .5
else:
logger.log("couldn't compute a good step")
set_from_flat(thbefore)
if nworkers > 1 and iters_so_far % 20 == 0:
paramsums = MPI.COMM_WORLD.allgather((thnew.sum(), vfadam.getflat().sum())) # list of tuples
assert all(np.allclose(ps, paramsums[0]) for ps in paramsums[1:])
for (lossname, lossval) in zip(loss_names, meanlosses):
logger.record_tabular(lossname, lossval)
with timed("vf"):
for _ in range(vf_iters):
for (mbob, mbret) in dataset.iterbatches((seg["ob"], seg["tdlamret"]),
include_final_partial_batch=False, batch_size=64):
g = allmean(compute_vflossandgrad(mbob, mbret))
vfadam.update(g, vf_stepsize)
logger.record_tabular("ev_tdlam_before", explained_variance(vpredbefore, tdlamret))
lrlocal = (seg["ep_lens"], seg["ep_rets"]) # local values
if MPI is not None:
listoflrpairs = MPI.COMM_WORLD.allgather(lrlocal) # list of tuples
else:
listoflrpairs = [lrlocal]
lens, rews = map(flatten_lists, zip(*listoflrpairs))
lenbuffer.extend(lens)
rewbuffer.extend(rews)
logger.record_tabular("EpLenMean", np.mean(lenbuffer))
logger.record_tabular("EpRewMean", np.mean(rewbuffer))
logger.record_tabular("EpThisIter", len(lens))
episodes_so_far += len(lens)
timesteps_so_far += sum(lens)
iters_so_far += 1
logger.record_tabular("EpisodesSoFar", episodes_so_far)
logger.record_tabular("TimestepsSoFar", timesteps_so_far)
logger.record_tabular("TimeElapsed", time.time() - tstart)
if rank==0:
logger.dump_tabular()
return pi | python | def learn(*,
network,
env,
total_timesteps,
timesteps_per_batch=1024, # what to train on
max_kl=0.001,
cg_iters=10,
gamma=0.99,
lam=1.0, # advantage estimation
seed=None,
ent_coef=0.0,
cg_damping=1e-2,
vf_stepsize=3e-4,
vf_iters =3,
max_episodes=0, max_iters=0, # time constraint
callback=None,
load_path=None,
**network_kwargs
):
'''
learn a policy function with TRPO algorithm
Parameters:
----------
network neural network to learn. Can be either string ('mlp', 'cnn', 'lstm', 'lnlstm' for basic types)
or function that takes input placeholder and returns tuple (output, None) for feedforward nets
or (output, (state_placeholder, state_output, mask_placeholder)) for recurrent nets
env environment (one of the gym environments or wrapped via baselines.common.vec_env.VecEnv-type class
timesteps_per_batch timesteps per gradient estimation batch
max_kl max KL divergence between old policy and new policy ( KL(pi_old || pi) )
ent_coef coefficient of policy entropy term in the optimization objective
cg_iters number of iterations of conjugate gradient algorithm
cg_damping conjugate gradient damping
vf_stepsize learning rate for adam optimizer used to optimie value function loss
vf_iters number of iterations of value function optimization iterations per each policy optimization step
total_timesteps max number of timesteps
max_episodes max number of episodes
max_iters maximum number of policy optimization iterations
callback function to be called with (locals(), globals()) each policy optimization step
load_path str, path to load the model from (default: None, i.e. no model is loaded)
**network_kwargs keyword arguments to the policy / network builder. See baselines.common/policies.py/build_policy and arguments to a particular type of network
Returns:
-------
learnt model
'''
if MPI is not None:
nworkers = MPI.COMM_WORLD.Get_size()
rank = MPI.COMM_WORLD.Get_rank()
else:
nworkers = 1
rank = 0
cpus_per_worker = 1
U.get_session(config=tf.ConfigProto(
allow_soft_placement=True,
inter_op_parallelism_threads=cpus_per_worker,
intra_op_parallelism_threads=cpus_per_worker
))
policy = build_policy(env, network, value_network='copy', **network_kwargs)
set_global_seeds(seed)
np.set_printoptions(precision=3)
# Setup losses and stuff
# ----------------------------------------
ob_space = env.observation_space
ac_space = env.action_space
ob = observation_placeholder(ob_space)
with tf.variable_scope("pi"):
pi = policy(observ_placeholder=ob)
with tf.variable_scope("oldpi"):
oldpi = policy(observ_placeholder=ob)
atarg = tf.placeholder(dtype=tf.float32, shape=[None]) # Target advantage function (if applicable)
ret = tf.placeholder(dtype=tf.float32, shape=[None]) # Empirical return
ac = pi.pdtype.sample_placeholder([None])
kloldnew = oldpi.pd.kl(pi.pd)
ent = pi.pd.entropy()
meankl = tf.reduce_mean(kloldnew)
meanent = tf.reduce_mean(ent)
entbonus = ent_coef * meanent
vferr = tf.reduce_mean(tf.square(pi.vf - ret))
ratio = tf.exp(pi.pd.logp(ac) - oldpi.pd.logp(ac)) # advantage * pnew / pold
surrgain = tf.reduce_mean(ratio * atarg)
optimgain = surrgain + entbonus
losses = [optimgain, meankl, entbonus, surrgain, meanent]
loss_names = ["optimgain", "meankl", "entloss", "surrgain", "entropy"]
dist = meankl
all_var_list = get_trainable_variables("pi")
# var_list = [v for v in all_var_list if v.name.split("/")[1].startswith("pol")]
# vf_var_list = [v for v in all_var_list if v.name.split("/")[1].startswith("vf")]
var_list = get_pi_trainable_variables("pi")
vf_var_list = get_vf_trainable_variables("pi")
vfadam = MpiAdam(vf_var_list)
get_flat = U.GetFlat(var_list)
set_from_flat = U.SetFromFlat(var_list)
klgrads = tf.gradients(dist, var_list)
flat_tangent = tf.placeholder(dtype=tf.float32, shape=[None], name="flat_tan")
shapes = [var.get_shape().as_list() for var in var_list]
start = 0
tangents = []
for shape in shapes:
sz = U.intprod(shape)
tangents.append(tf.reshape(flat_tangent[start:start+sz], shape))
start += sz
gvp = tf.add_n([tf.reduce_sum(g*tangent) for (g, tangent) in zipsame(klgrads, tangents)]) #pylint: disable=E1111
fvp = U.flatgrad(gvp, var_list)
assign_old_eq_new = U.function([],[], updates=[tf.assign(oldv, newv)
for (oldv, newv) in zipsame(get_variables("oldpi"), get_variables("pi"))])
compute_losses = U.function([ob, ac, atarg], losses)
compute_lossandgrad = U.function([ob, ac, atarg], losses + [U.flatgrad(optimgain, var_list)])
compute_fvp = U.function([flat_tangent, ob, ac, atarg], fvp)
compute_vflossandgrad = U.function([ob, ret], U.flatgrad(vferr, vf_var_list))
@contextmanager
def timed(msg):
if rank == 0:
print(colorize(msg, color='magenta'))
tstart = time.time()
yield
print(colorize("done in %.3f seconds"%(time.time() - tstart), color='magenta'))
else:
yield
def allmean(x):
assert isinstance(x, np.ndarray)
if MPI is not None:
out = np.empty_like(x)
MPI.COMM_WORLD.Allreduce(x, out, op=MPI.SUM)
out /= nworkers
else:
out = np.copy(x)
return out
U.initialize()
if load_path is not None:
pi.load(load_path)
th_init = get_flat()
if MPI is not None:
MPI.COMM_WORLD.Bcast(th_init, root=0)
set_from_flat(th_init)
vfadam.sync()
print("Init param sum", th_init.sum(), flush=True)
# Prepare for rollouts
# ----------------------------------------
seg_gen = traj_segment_generator(pi, env, timesteps_per_batch, stochastic=True)
episodes_so_far = 0
timesteps_so_far = 0
iters_so_far = 0
tstart = time.time()
lenbuffer = deque(maxlen=40) # rolling buffer for episode lengths
rewbuffer = deque(maxlen=40) # rolling buffer for episode rewards
if sum([max_iters>0, total_timesteps>0, max_episodes>0])==0:
# noththing to be done
return pi
assert sum([max_iters>0, total_timesteps>0, max_episodes>0]) < 2, \
'out of max_iters, total_timesteps, and max_episodes only one should be specified'
while True:
if callback: callback(locals(), globals())
if total_timesteps and timesteps_so_far >= total_timesteps:
break
elif max_episodes and episodes_so_far >= max_episodes:
break
elif max_iters and iters_so_far >= max_iters:
break
logger.log("********** Iteration %i ************"%iters_so_far)
with timed("sampling"):
seg = seg_gen.__next__()
add_vtarg_and_adv(seg, gamma, lam)
# ob, ac, atarg, ret, td1ret = map(np.concatenate, (obs, acs, atargs, rets, td1rets))
ob, ac, atarg, tdlamret = seg["ob"], seg["ac"], seg["adv"], seg["tdlamret"]
vpredbefore = seg["vpred"] # predicted value function before udpate
atarg = (atarg - atarg.mean()) / atarg.std() # standardized advantage function estimate
if hasattr(pi, "ret_rms"): pi.ret_rms.update(tdlamret)
if hasattr(pi, "ob_rms"): pi.ob_rms.update(ob) # update running mean/std for policy
args = seg["ob"], seg["ac"], atarg
fvpargs = [arr[::5] for arr in args]
def fisher_vector_product(p):
return allmean(compute_fvp(p, *fvpargs)) + cg_damping * p
assign_old_eq_new() # set old parameter values to new parameter values
with timed("computegrad"):
*lossbefore, g = compute_lossandgrad(*args)
lossbefore = allmean(np.array(lossbefore))
g = allmean(g)
if np.allclose(g, 0):
logger.log("Got zero gradient. not updating")
else:
with timed("cg"):
stepdir = cg(fisher_vector_product, g, cg_iters=cg_iters, verbose=rank==0)
assert np.isfinite(stepdir).all()
shs = .5*stepdir.dot(fisher_vector_product(stepdir))
lm = np.sqrt(shs / max_kl)
# logger.log("lagrange multiplier:", lm, "gnorm:", np.linalg.norm(g))
fullstep = stepdir / lm
expectedimprove = g.dot(fullstep)
surrbefore = lossbefore[0]
stepsize = 1.0
thbefore = get_flat()
for _ in range(10):
thnew = thbefore + fullstep * stepsize
set_from_flat(thnew)
meanlosses = surr, kl, *_ = allmean(np.array(compute_losses(*args)))
improve = surr - surrbefore
logger.log("Expected: %.3f Actual: %.3f"%(expectedimprove, improve))
if not np.isfinite(meanlosses).all():
logger.log("Got non-finite value of losses -- bad!")
elif kl > max_kl * 1.5:
logger.log("violated KL constraint. shrinking step.")
elif improve < 0:
logger.log("surrogate didn't improve. shrinking step.")
else:
logger.log("Stepsize OK!")
break
stepsize *= .5
else:
logger.log("couldn't compute a good step")
set_from_flat(thbefore)
if nworkers > 1 and iters_so_far % 20 == 0:
paramsums = MPI.COMM_WORLD.allgather((thnew.sum(), vfadam.getflat().sum())) # list of tuples
assert all(np.allclose(ps, paramsums[0]) for ps in paramsums[1:])
for (lossname, lossval) in zip(loss_names, meanlosses):
logger.record_tabular(lossname, lossval)
with timed("vf"):
for _ in range(vf_iters):
for (mbob, mbret) in dataset.iterbatches((seg["ob"], seg["tdlamret"]),
include_final_partial_batch=False, batch_size=64):
g = allmean(compute_vflossandgrad(mbob, mbret))
vfadam.update(g, vf_stepsize)
logger.record_tabular("ev_tdlam_before", explained_variance(vpredbefore, tdlamret))
lrlocal = (seg["ep_lens"], seg["ep_rets"]) # local values
if MPI is not None:
listoflrpairs = MPI.COMM_WORLD.allgather(lrlocal) # list of tuples
else:
listoflrpairs = [lrlocal]
lens, rews = map(flatten_lists, zip(*listoflrpairs))
lenbuffer.extend(lens)
rewbuffer.extend(rews)
logger.record_tabular("EpLenMean", np.mean(lenbuffer))
logger.record_tabular("EpRewMean", np.mean(rewbuffer))
logger.record_tabular("EpThisIter", len(lens))
episodes_so_far += len(lens)
timesteps_so_far += sum(lens)
iters_so_far += 1
logger.record_tabular("EpisodesSoFar", episodes_so_far)
logger.record_tabular("TimestepsSoFar", timesteps_so_far)
logger.record_tabular("TimeElapsed", time.time() - tstart)
if rank==0:
logger.dump_tabular()
return pi | [
"def",
"learn",
"(",
"*",
",",
"network",
",",
"env",
",",
"total_timesteps",
",",
"timesteps_per_batch",
"=",
"1024",
",",
"# what to train on",
"max_kl",
"=",
"0.001",
",",
"cg_iters",
"=",
"10",
",",
"gamma",
"=",
"0.99",
",",
"lam",
"=",
"1.0",
",",
"# advantage estimation",
"seed",
"=",
"None",
",",
"ent_coef",
"=",
"0.0",
",",
"cg_damping",
"=",
"1e-2",
",",
"vf_stepsize",
"=",
"3e-4",
",",
"vf_iters",
"=",
"3",
",",
"max_episodes",
"=",
"0",
",",
"max_iters",
"=",
"0",
",",
"# time constraint",
"callback",
"=",
"None",
",",
"load_path",
"=",
"None",
",",
"*",
"*",
"network_kwargs",
")",
":",
"if",
"MPI",
"is",
"not",
"None",
":",
"nworkers",
"=",
"MPI",
".",
"COMM_WORLD",
".",
"Get_size",
"(",
")",
"rank",
"=",
"MPI",
".",
"COMM_WORLD",
".",
"Get_rank",
"(",
")",
"else",
":",
"nworkers",
"=",
"1",
"rank",
"=",
"0",
"cpus_per_worker",
"=",
"1",
"U",
".",
"get_session",
"(",
"config",
"=",
"tf",
".",
"ConfigProto",
"(",
"allow_soft_placement",
"=",
"True",
",",
"inter_op_parallelism_threads",
"=",
"cpus_per_worker",
",",
"intra_op_parallelism_threads",
"=",
"cpus_per_worker",
")",
")",
"policy",
"=",
"build_policy",
"(",
"env",
",",
"network",
",",
"value_network",
"=",
"'copy'",
",",
"*",
"*",
"network_kwargs",
")",
"set_global_seeds",
"(",
"seed",
")",
"np",
".",
"set_printoptions",
"(",
"precision",
"=",
"3",
")",
"# Setup losses and stuff",
"# ----------------------------------------",
"ob_space",
"=",
"env",
".",
"observation_space",
"ac_space",
"=",
"env",
".",
"action_space",
"ob",
"=",
"observation_placeholder",
"(",
"ob_space",
")",
"with",
"tf",
".",
"variable_scope",
"(",
"\"pi\"",
")",
":",
"pi",
"=",
"policy",
"(",
"observ_placeholder",
"=",
"ob",
")",
"with",
"tf",
".",
"variable_scope",
"(",
"\"oldpi\"",
")",
":",
"oldpi",
"=",
"policy",
"(",
"observ_placeholder",
"=",
"ob",
")",
"atarg",
"=",
"tf",
".",
"placeholder",
"(",
"dtype",
"=",
"tf",
".",
"float32",
",",
"shape",
"=",
"[",
"None",
"]",
")",
"# Target advantage function (if applicable)",
"ret",
"=",
"tf",
".",
"placeholder",
"(",
"dtype",
"=",
"tf",
".",
"float32",
",",
"shape",
"=",
"[",
"None",
"]",
")",
"# Empirical return",
"ac",
"=",
"pi",
".",
"pdtype",
".",
"sample_placeholder",
"(",
"[",
"None",
"]",
")",
"kloldnew",
"=",
"oldpi",
".",
"pd",
".",
"kl",
"(",
"pi",
".",
"pd",
")",
"ent",
"=",
"pi",
".",
"pd",
".",
"entropy",
"(",
")",
"meankl",
"=",
"tf",
".",
"reduce_mean",
"(",
"kloldnew",
")",
"meanent",
"=",
"tf",
".",
"reduce_mean",
"(",
"ent",
")",
"entbonus",
"=",
"ent_coef",
"*",
"meanent",
"vferr",
"=",
"tf",
".",
"reduce_mean",
"(",
"tf",
".",
"square",
"(",
"pi",
".",
"vf",
"-",
"ret",
")",
")",
"ratio",
"=",
"tf",
".",
"exp",
"(",
"pi",
".",
"pd",
".",
"logp",
"(",
"ac",
")",
"-",
"oldpi",
".",
"pd",
".",
"logp",
"(",
"ac",
")",
")",
"# advantage * pnew / pold",
"surrgain",
"=",
"tf",
".",
"reduce_mean",
"(",
"ratio",
"*",
"atarg",
")",
"optimgain",
"=",
"surrgain",
"+",
"entbonus",
"losses",
"=",
"[",
"optimgain",
",",
"meankl",
",",
"entbonus",
",",
"surrgain",
",",
"meanent",
"]",
"loss_names",
"=",
"[",
"\"optimgain\"",
",",
"\"meankl\"",
",",
"\"entloss\"",
",",
"\"surrgain\"",
",",
"\"entropy\"",
"]",
"dist",
"=",
"meankl",
"all_var_list",
"=",
"get_trainable_variables",
"(",
"\"pi\"",
")",
"# var_list = [v for v in all_var_list if v.name.split(\"/\")[1].startswith(\"pol\")]",
"# vf_var_list = [v for v in all_var_list if v.name.split(\"/\")[1].startswith(\"vf\")]",
"var_list",
"=",
"get_pi_trainable_variables",
"(",
"\"pi\"",
")",
"vf_var_list",
"=",
"get_vf_trainable_variables",
"(",
"\"pi\"",
")",
"vfadam",
"=",
"MpiAdam",
"(",
"vf_var_list",
")",
"get_flat",
"=",
"U",
".",
"GetFlat",
"(",
"var_list",
")",
"set_from_flat",
"=",
"U",
".",
"SetFromFlat",
"(",
"var_list",
")",
"klgrads",
"=",
"tf",
".",
"gradients",
"(",
"dist",
",",
"var_list",
")",
"flat_tangent",
"=",
"tf",
".",
"placeholder",
"(",
"dtype",
"=",
"tf",
".",
"float32",
",",
"shape",
"=",
"[",
"None",
"]",
",",
"name",
"=",
"\"flat_tan\"",
")",
"shapes",
"=",
"[",
"var",
".",
"get_shape",
"(",
")",
".",
"as_list",
"(",
")",
"for",
"var",
"in",
"var_list",
"]",
"start",
"=",
"0",
"tangents",
"=",
"[",
"]",
"for",
"shape",
"in",
"shapes",
":",
"sz",
"=",
"U",
".",
"intprod",
"(",
"shape",
")",
"tangents",
".",
"append",
"(",
"tf",
".",
"reshape",
"(",
"flat_tangent",
"[",
"start",
":",
"start",
"+",
"sz",
"]",
",",
"shape",
")",
")",
"start",
"+=",
"sz",
"gvp",
"=",
"tf",
".",
"add_n",
"(",
"[",
"tf",
".",
"reduce_sum",
"(",
"g",
"*",
"tangent",
")",
"for",
"(",
"g",
",",
"tangent",
")",
"in",
"zipsame",
"(",
"klgrads",
",",
"tangents",
")",
"]",
")",
"#pylint: disable=E1111",
"fvp",
"=",
"U",
".",
"flatgrad",
"(",
"gvp",
",",
"var_list",
")",
"assign_old_eq_new",
"=",
"U",
".",
"function",
"(",
"[",
"]",
",",
"[",
"]",
",",
"updates",
"=",
"[",
"tf",
".",
"assign",
"(",
"oldv",
",",
"newv",
")",
"for",
"(",
"oldv",
",",
"newv",
")",
"in",
"zipsame",
"(",
"get_variables",
"(",
"\"oldpi\"",
")",
",",
"get_variables",
"(",
"\"pi\"",
")",
")",
"]",
")",
"compute_losses",
"=",
"U",
".",
"function",
"(",
"[",
"ob",
",",
"ac",
",",
"atarg",
"]",
",",
"losses",
")",
"compute_lossandgrad",
"=",
"U",
".",
"function",
"(",
"[",
"ob",
",",
"ac",
",",
"atarg",
"]",
",",
"losses",
"+",
"[",
"U",
".",
"flatgrad",
"(",
"optimgain",
",",
"var_list",
")",
"]",
")",
"compute_fvp",
"=",
"U",
".",
"function",
"(",
"[",
"flat_tangent",
",",
"ob",
",",
"ac",
",",
"atarg",
"]",
",",
"fvp",
")",
"compute_vflossandgrad",
"=",
"U",
".",
"function",
"(",
"[",
"ob",
",",
"ret",
"]",
",",
"U",
".",
"flatgrad",
"(",
"vferr",
",",
"vf_var_list",
")",
")",
"@",
"contextmanager",
"def",
"timed",
"(",
"msg",
")",
":",
"if",
"rank",
"==",
"0",
":",
"print",
"(",
"colorize",
"(",
"msg",
",",
"color",
"=",
"'magenta'",
")",
")",
"tstart",
"=",
"time",
".",
"time",
"(",
")",
"yield",
"print",
"(",
"colorize",
"(",
"\"done in %.3f seconds\"",
"%",
"(",
"time",
".",
"time",
"(",
")",
"-",
"tstart",
")",
",",
"color",
"=",
"'magenta'",
")",
")",
"else",
":",
"yield",
"def",
"allmean",
"(",
"x",
")",
":",
"assert",
"isinstance",
"(",
"x",
",",
"np",
".",
"ndarray",
")",
"if",
"MPI",
"is",
"not",
"None",
":",
"out",
"=",
"np",
".",
"empty_like",
"(",
"x",
")",
"MPI",
".",
"COMM_WORLD",
".",
"Allreduce",
"(",
"x",
",",
"out",
",",
"op",
"=",
"MPI",
".",
"SUM",
")",
"out",
"/=",
"nworkers",
"else",
":",
"out",
"=",
"np",
".",
"copy",
"(",
"x",
")",
"return",
"out",
"U",
".",
"initialize",
"(",
")",
"if",
"load_path",
"is",
"not",
"None",
":",
"pi",
".",
"load",
"(",
"load_path",
")",
"th_init",
"=",
"get_flat",
"(",
")",
"if",
"MPI",
"is",
"not",
"None",
":",
"MPI",
".",
"COMM_WORLD",
".",
"Bcast",
"(",
"th_init",
",",
"root",
"=",
"0",
")",
"set_from_flat",
"(",
"th_init",
")",
"vfadam",
".",
"sync",
"(",
")",
"print",
"(",
"\"Init param sum\"",
",",
"th_init",
".",
"sum",
"(",
")",
",",
"flush",
"=",
"True",
")",
"# Prepare for rollouts",
"# ----------------------------------------",
"seg_gen",
"=",
"traj_segment_generator",
"(",
"pi",
",",
"env",
",",
"timesteps_per_batch",
",",
"stochastic",
"=",
"True",
")",
"episodes_so_far",
"=",
"0",
"timesteps_so_far",
"=",
"0",
"iters_so_far",
"=",
"0",
"tstart",
"=",
"time",
".",
"time",
"(",
")",
"lenbuffer",
"=",
"deque",
"(",
"maxlen",
"=",
"40",
")",
"# rolling buffer for episode lengths",
"rewbuffer",
"=",
"deque",
"(",
"maxlen",
"=",
"40",
")",
"# rolling buffer for episode rewards",
"if",
"sum",
"(",
"[",
"max_iters",
">",
"0",
",",
"total_timesteps",
">",
"0",
",",
"max_episodes",
">",
"0",
"]",
")",
"==",
"0",
":",
"# noththing to be done",
"return",
"pi",
"assert",
"sum",
"(",
"[",
"max_iters",
">",
"0",
",",
"total_timesteps",
">",
"0",
",",
"max_episodes",
">",
"0",
"]",
")",
"<",
"2",
",",
"'out of max_iters, total_timesteps, and max_episodes only one should be specified'",
"while",
"True",
":",
"if",
"callback",
":",
"callback",
"(",
"locals",
"(",
")",
",",
"globals",
"(",
")",
")",
"if",
"total_timesteps",
"and",
"timesteps_so_far",
">=",
"total_timesteps",
":",
"break",
"elif",
"max_episodes",
"and",
"episodes_so_far",
">=",
"max_episodes",
":",
"break",
"elif",
"max_iters",
"and",
"iters_so_far",
">=",
"max_iters",
":",
"break",
"logger",
".",
"log",
"(",
"\"********** Iteration %i ************\"",
"%",
"iters_so_far",
")",
"with",
"timed",
"(",
"\"sampling\"",
")",
":",
"seg",
"=",
"seg_gen",
".",
"__next__",
"(",
")",
"add_vtarg_and_adv",
"(",
"seg",
",",
"gamma",
",",
"lam",
")",
"# ob, ac, atarg, ret, td1ret = map(np.concatenate, (obs, acs, atargs, rets, td1rets))",
"ob",
",",
"ac",
",",
"atarg",
",",
"tdlamret",
"=",
"seg",
"[",
"\"ob\"",
"]",
",",
"seg",
"[",
"\"ac\"",
"]",
",",
"seg",
"[",
"\"adv\"",
"]",
",",
"seg",
"[",
"\"tdlamret\"",
"]",
"vpredbefore",
"=",
"seg",
"[",
"\"vpred\"",
"]",
"# predicted value function before udpate",
"atarg",
"=",
"(",
"atarg",
"-",
"atarg",
".",
"mean",
"(",
")",
")",
"/",
"atarg",
".",
"std",
"(",
")",
"# standardized advantage function estimate",
"if",
"hasattr",
"(",
"pi",
",",
"\"ret_rms\"",
")",
":",
"pi",
".",
"ret_rms",
".",
"update",
"(",
"tdlamret",
")",
"if",
"hasattr",
"(",
"pi",
",",
"\"ob_rms\"",
")",
":",
"pi",
".",
"ob_rms",
".",
"update",
"(",
"ob",
")",
"# update running mean/std for policy",
"args",
"=",
"seg",
"[",
"\"ob\"",
"]",
",",
"seg",
"[",
"\"ac\"",
"]",
",",
"atarg",
"fvpargs",
"=",
"[",
"arr",
"[",
":",
":",
"5",
"]",
"for",
"arr",
"in",
"args",
"]",
"def",
"fisher_vector_product",
"(",
"p",
")",
":",
"return",
"allmean",
"(",
"compute_fvp",
"(",
"p",
",",
"*",
"fvpargs",
")",
")",
"+",
"cg_damping",
"*",
"p",
"assign_old_eq_new",
"(",
")",
"# set old parameter values to new parameter values",
"with",
"timed",
"(",
"\"computegrad\"",
")",
":",
"*",
"lossbefore",
",",
"g",
"=",
"compute_lossandgrad",
"(",
"*",
"args",
")",
"lossbefore",
"=",
"allmean",
"(",
"np",
".",
"array",
"(",
"lossbefore",
")",
")",
"g",
"=",
"allmean",
"(",
"g",
")",
"if",
"np",
".",
"allclose",
"(",
"g",
",",
"0",
")",
":",
"logger",
".",
"log",
"(",
"\"Got zero gradient. not updating\"",
")",
"else",
":",
"with",
"timed",
"(",
"\"cg\"",
")",
":",
"stepdir",
"=",
"cg",
"(",
"fisher_vector_product",
",",
"g",
",",
"cg_iters",
"=",
"cg_iters",
",",
"verbose",
"=",
"rank",
"==",
"0",
")",
"assert",
"np",
".",
"isfinite",
"(",
"stepdir",
")",
".",
"all",
"(",
")",
"shs",
"=",
".5",
"*",
"stepdir",
".",
"dot",
"(",
"fisher_vector_product",
"(",
"stepdir",
")",
")",
"lm",
"=",
"np",
".",
"sqrt",
"(",
"shs",
"/",
"max_kl",
")",
"# logger.log(\"lagrange multiplier:\", lm, \"gnorm:\", np.linalg.norm(g))",
"fullstep",
"=",
"stepdir",
"/",
"lm",
"expectedimprove",
"=",
"g",
".",
"dot",
"(",
"fullstep",
")",
"surrbefore",
"=",
"lossbefore",
"[",
"0",
"]",
"stepsize",
"=",
"1.0",
"thbefore",
"=",
"get_flat",
"(",
")",
"for",
"_",
"in",
"range",
"(",
"10",
")",
":",
"thnew",
"=",
"thbefore",
"+",
"fullstep",
"*",
"stepsize",
"set_from_flat",
"(",
"thnew",
")",
"meanlosses",
"=",
"surr",
",",
"kl",
",",
"",
"*",
"_",
"=",
"allmean",
"(",
"np",
".",
"array",
"(",
"compute_losses",
"(",
"*",
"args",
")",
")",
")",
"improve",
"=",
"surr",
"-",
"surrbefore",
"logger",
".",
"log",
"(",
"\"Expected: %.3f Actual: %.3f\"",
"%",
"(",
"expectedimprove",
",",
"improve",
")",
")",
"if",
"not",
"np",
".",
"isfinite",
"(",
"meanlosses",
")",
".",
"all",
"(",
")",
":",
"logger",
".",
"log",
"(",
"\"Got non-finite value of losses -- bad!\"",
")",
"elif",
"kl",
">",
"max_kl",
"*",
"1.5",
":",
"logger",
".",
"log",
"(",
"\"violated KL constraint. shrinking step.\"",
")",
"elif",
"improve",
"<",
"0",
":",
"logger",
".",
"log",
"(",
"\"surrogate didn't improve. shrinking step.\"",
")",
"else",
":",
"logger",
".",
"log",
"(",
"\"Stepsize OK!\"",
")",
"break",
"stepsize",
"*=",
".5",
"else",
":",
"logger",
".",
"log",
"(",
"\"couldn't compute a good step\"",
")",
"set_from_flat",
"(",
"thbefore",
")",
"if",
"nworkers",
">",
"1",
"and",
"iters_so_far",
"%",
"20",
"==",
"0",
":",
"paramsums",
"=",
"MPI",
".",
"COMM_WORLD",
".",
"allgather",
"(",
"(",
"thnew",
".",
"sum",
"(",
")",
",",
"vfadam",
".",
"getflat",
"(",
")",
".",
"sum",
"(",
")",
")",
")",
"# list of tuples",
"assert",
"all",
"(",
"np",
".",
"allclose",
"(",
"ps",
",",
"paramsums",
"[",
"0",
"]",
")",
"for",
"ps",
"in",
"paramsums",
"[",
"1",
":",
"]",
")",
"for",
"(",
"lossname",
",",
"lossval",
")",
"in",
"zip",
"(",
"loss_names",
",",
"meanlosses",
")",
":",
"logger",
".",
"record_tabular",
"(",
"lossname",
",",
"lossval",
")",
"with",
"timed",
"(",
"\"vf\"",
")",
":",
"for",
"_",
"in",
"range",
"(",
"vf_iters",
")",
":",
"for",
"(",
"mbob",
",",
"mbret",
")",
"in",
"dataset",
".",
"iterbatches",
"(",
"(",
"seg",
"[",
"\"ob\"",
"]",
",",
"seg",
"[",
"\"tdlamret\"",
"]",
")",
",",
"include_final_partial_batch",
"=",
"False",
",",
"batch_size",
"=",
"64",
")",
":",
"g",
"=",
"allmean",
"(",
"compute_vflossandgrad",
"(",
"mbob",
",",
"mbret",
")",
")",
"vfadam",
".",
"update",
"(",
"g",
",",
"vf_stepsize",
")",
"logger",
".",
"record_tabular",
"(",
"\"ev_tdlam_before\"",
",",
"explained_variance",
"(",
"vpredbefore",
",",
"tdlamret",
")",
")",
"lrlocal",
"=",
"(",
"seg",
"[",
"\"ep_lens\"",
"]",
",",
"seg",
"[",
"\"ep_rets\"",
"]",
")",
"# local values",
"if",
"MPI",
"is",
"not",
"None",
":",
"listoflrpairs",
"=",
"MPI",
".",
"COMM_WORLD",
".",
"allgather",
"(",
"lrlocal",
")",
"# list of tuples",
"else",
":",
"listoflrpairs",
"=",
"[",
"lrlocal",
"]",
"lens",
",",
"rews",
"=",
"map",
"(",
"flatten_lists",
",",
"zip",
"(",
"*",
"listoflrpairs",
")",
")",
"lenbuffer",
".",
"extend",
"(",
"lens",
")",
"rewbuffer",
".",
"extend",
"(",
"rews",
")",
"logger",
".",
"record_tabular",
"(",
"\"EpLenMean\"",
",",
"np",
".",
"mean",
"(",
"lenbuffer",
")",
")",
"logger",
".",
"record_tabular",
"(",
"\"EpRewMean\"",
",",
"np",
".",
"mean",
"(",
"rewbuffer",
")",
")",
"logger",
".",
"record_tabular",
"(",
"\"EpThisIter\"",
",",
"len",
"(",
"lens",
")",
")",
"episodes_so_far",
"+=",
"len",
"(",
"lens",
")",
"timesteps_so_far",
"+=",
"sum",
"(",
"lens",
")",
"iters_so_far",
"+=",
"1",
"logger",
".",
"record_tabular",
"(",
"\"EpisodesSoFar\"",
",",
"episodes_so_far",
")",
"logger",
".",
"record_tabular",
"(",
"\"TimestepsSoFar\"",
",",
"timesteps_so_far",
")",
"logger",
".",
"record_tabular",
"(",
"\"TimeElapsed\"",
",",
"time",
".",
"time",
"(",
")",
"-",
"tstart",
")",
"if",
"rank",
"==",
"0",
":",
"logger",
".",
"dump_tabular",
"(",
")",
"return",
"pi"
] | learn a policy function with TRPO algorithm
Parameters:
----------
network neural network to learn. Can be either string ('mlp', 'cnn', 'lstm', 'lnlstm' for basic types)
or function that takes input placeholder and returns tuple (output, None) for feedforward nets
or (output, (state_placeholder, state_output, mask_placeholder)) for recurrent nets
env environment (one of the gym environments or wrapped via baselines.common.vec_env.VecEnv-type class
timesteps_per_batch timesteps per gradient estimation batch
max_kl max KL divergence between old policy and new policy ( KL(pi_old || pi) )
ent_coef coefficient of policy entropy term in the optimization objective
cg_iters number of iterations of conjugate gradient algorithm
cg_damping conjugate gradient damping
vf_stepsize learning rate for adam optimizer used to optimie value function loss
vf_iters number of iterations of value function optimization iterations per each policy optimization step
total_timesteps max number of timesteps
max_episodes max number of episodes
max_iters maximum number of policy optimization iterations
callback function to be called with (locals(), globals()) each policy optimization step
load_path str, path to load the model from (default: None, i.e. no model is loaded)
**network_kwargs keyword arguments to the policy / network builder. See baselines.common/policies.py/build_policy and arguments to a particular type of network
Returns:
-------
learnt model | [
"learn",
"a",
"policy",
"function",
"with",
"TRPO",
"algorithm"
] | 3301089b48c42b87b396e246ea3f56fa4bfc9678 | https://github.com/openai/baselines/blob/3301089b48c42b87b396e246ea3f56fa4bfc9678/baselines/trpo_mpi/trpo_mpi.py#L89-L392 | valid |
openai/baselines | baselines/common/math_util.py | discount | def discount(x, gamma):
"""
computes discounted sums along 0th dimension of x.
inputs
------
x: ndarray
gamma: float
outputs
-------
y: ndarray with same shape as x, satisfying
y[t] = x[t] + gamma*x[t+1] + gamma^2*x[t+2] + ... + gamma^k x[t+k],
where k = len(x) - t - 1
"""
assert x.ndim >= 1
return scipy.signal.lfilter([1],[1,-gamma],x[::-1], axis=0)[::-1] | python | def discount(x, gamma):
"""
computes discounted sums along 0th dimension of x.
inputs
------
x: ndarray
gamma: float
outputs
-------
y: ndarray with same shape as x, satisfying
y[t] = x[t] + gamma*x[t+1] + gamma^2*x[t+2] + ... + gamma^k x[t+k],
where k = len(x) - t - 1
"""
assert x.ndim >= 1
return scipy.signal.lfilter([1],[1,-gamma],x[::-1], axis=0)[::-1] | [
"def",
"discount",
"(",
"x",
",",
"gamma",
")",
":",
"assert",
"x",
".",
"ndim",
">=",
"1",
"return",
"scipy",
".",
"signal",
".",
"lfilter",
"(",
"[",
"1",
"]",
",",
"[",
"1",
",",
"-",
"gamma",
"]",
",",
"x",
"[",
":",
":",
"-",
"1",
"]",
",",
"axis",
"=",
"0",
")",
"[",
":",
":",
"-",
"1",
"]"
] | computes discounted sums along 0th dimension of x.
inputs
------
x: ndarray
gamma: float
outputs
-------
y: ndarray with same shape as x, satisfying
y[t] = x[t] + gamma*x[t+1] + gamma^2*x[t+2] + ... + gamma^k x[t+k],
where k = len(x) - t - 1 | [
"computes",
"discounted",
"sums",
"along",
"0th",
"dimension",
"of",
"x",
"."
] | 3301089b48c42b87b396e246ea3f56fa4bfc9678 | https://github.com/openai/baselines/blob/3301089b48c42b87b396e246ea3f56fa4bfc9678/baselines/common/math_util.py#L5-L23 | valid |
openai/baselines | baselines/common/math_util.py | explained_variance | def explained_variance(ypred,y):
"""
Computes fraction of variance that ypred explains about y.
Returns 1 - Var[y-ypred] / Var[y]
interpretation:
ev=0 => might as well have predicted zero
ev=1 => perfect prediction
ev<0 => worse than just predicting zero
"""
assert y.ndim == 1 and ypred.ndim == 1
vary = np.var(y)
return np.nan if vary==0 else 1 - np.var(y-ypred)/vary | python | def explained_variance(ypred,y):
"""
Computes fraction of variance that ypred explains about y.
Returns 1 - Var[y-ypred] / Var[y]
interpretation:
ev=0 => might as well have predicted zero
ev=1 => perfect prediction
ev<0 => worse than just predicting zero
"""
assert y.ndim == 1 and ypred.ndim == 1
vary = np.var(y)
return np.nan if vary==0 else 1 - np.var(y-ypred)/vary | [
"def",
"explained_variance",
"(",
"ypred",
",",
"y",
")",
":",
"assert",
"y",
".",
"ndim",
"==",
"1",
"and",
"ypred",
".",
"ndim",
"==",
"1",
"vary",
"=",
"np",
".",
"var",
"(",
"y",
")",
"return",
"np",
".",
"nan",
"if",
"vary",
"==",
"0",
"else",
"1",
"-",
"np",
".",
"var",
"(",
"y",
"-",
"ypred",
")",
"/",
"vary"
] | Computes fraction of variance that ypred explains about y.
Returns 1 - Var[y-ypred] / Var[y]
interpretation:
ev=0 => might as well have predicted zero
ev=1 => perfect prediction
ev<0 => worse than just predicting zero | [
"Computes",
"fraction",
"of",
"variance",
"that",
"ypred",
"explains",
"about",
"y",
".",
"Returns",
"1",
"-",
"Var",
"[",
"y",
"-",
"ypred",
"]",
"/",
"Var",
"[",
"y",
"]"
] | 3301089b48c42b87b396e246ea3f56fa4bfc9678 | https://github.com/openai/baselines/blob/3301089b48c42b87b396e246ea3f56fa4bfc9678/baselines/common/math_util.py#L25-L38 | valid |
openai/baselines | baselines/common/math_util.py | discount_with_boundaries | def discount_with_boundaries(X, New, gamma):
"""
X: 2d array of floats, time x features
New: 2d array of bools, indicating when a new episode has started
"""
Y = np.zeros_like(X)
T = X.shape[0]
Y[T-1] = X[T-1]
for t in range(T-2, -1, -1):
Y[t] = X[t] + gamma * Y[t+1] * (1 - New[t+1])
return Y | python | def discount_with_boundaries(X, New, gamma):
"""
X: 2d array of floats, time x features
New: 2d array of bools, indicating when a new episode has started
"""
Y = np.zeros_like(X)
T = X.shape[0]
Y[T-1] = X[T-1]
for t in range(T-2, -1, -1):
Y[t] = X[t] + gamma * Y[t+1] * (1 - New[t+1])
return Y | [
"def",
"discount_with_boundaries",
"(",
"X",
",",
"New",
",",
"gamma",
")",
":",
"Y",
"=",
"np",
".",
"zeros_like",
"(",
"X",
")",
"T",
"=",
"X",
".",
"shape",
"[",
"0",
"]",
"Y",
"[",
"T",
"-",
"1",
"]",
"=",
"X",
"[",
"T",
"-",
"1",
"]",
"for",
"t",
"in",
"range",
"(",
"T",
"-",
"2",
",",
"-",
"1",
",",
"-",
"1",
")",
":",
"Y",
"[",
"t",
"]",
"=",
"X",
"[",
"t",
"]",
"+",
"gamma",
"*",
"Y",
"[",
"t",
"+",
"1",
"]",
"*",
"(",
"1",
"-",
"New",
"[",
"t",
"+",
"1",
"]",
")",
"return",
"Y"
] | X: 2d array of floats, time x features
New: 2d array of bools, indicating when a new episode has started | [
"X",
":",
"2d",
"array",
"of",
"floats",
"time",
"x",
"features",
"New",
":",
"2d",
"array",
"of",
"bools",
"indicating",
"when",
"a",
"new",
"episode",
"has",
"started"
] | 3301089b48c42b87b396e246ea3f56fa4bfc9678 | https://github.com/openai/baselines/blob/3301089b48c42b87b396e246ea3f56fa4bfc9678/baselines/common/math_util.py#L63-L73 | valid |
openai/baselines | baselines/deepq/replay_buffer.py | ReplayBuffer.sample | def sample(self, batch_size):
"""Sample a batch of experiences.
Parameters
----------
batch_size: int
How many transitions to sample.
Returns
-------
obs_batch: np.array
batch of observations
act_batch: np.array
batch of actions executed given obs_batch
rew_batch: np.array
rewards received as results of executing act_batch
next_obs_batch: np.array
next set of observations seen after executing act_batch
done_mask: np.array
done_mask[i] = 1 if executing act_batch[i] resulted in
the end of an episode and 0 otherwise.
"""
idxes = [random.randint(0, len(self._storage) - 1) for _ in range(batch_size)]
return self._encode_sample(idxes) | python | def sample(self, batch_size):
"""Sample a batch of experiences.
Parameters
----------
batch_size: int
How many transitions to sample.
Returns
-------
obs_batch: np.array
batch of observations
act_batch: np.array
batch of actions executed given obs_batch
rew_batch: np.array
rewards received as results of executing act_batch
next_obs_batch: np.array
next set of observations seen after executing act_batch
done_mask: np.array
done_mask[i] = 1 if executing act_batch[i] resulted in
the end of an episode and 0 otherwise.
"""
idxes = [random.randint(0, len(self._storage) - 1) for _ in range(batch_size)]
return self._encode_sample(idxes) | [
"def",
"sample",
"(",
"self",
",",
"batch_size",
")",
":",
"idxes",
"=",
"[",
"random",
".",
"randint",
"(",
"0",
",",
"len",
"(",
"self",
".",
"_storage",
")",
"-",
"1",
")",
"for",
"_",
"in",
"range",
"(",
"batch_size",
")",
"]",
"return",
"self",
".",
"_encode_sample",
"(",
"idxes",
")"
] | Sample a batch of experiences.
Parameters
----------
batch_size: int
How many transitions to sample.
Returns
-------
obs_batch: np.array
batch of observations
act_batch: np.array
batch of actions executed given obs_batch
rew_batch: np.array
rewards received as results of executing act_batch
next_obs_batch: np.array
next set of observations seen after executing act_batch
done_mask: np.array
done_mask[i] = 1 if executing act_batch[i] resulted in
the end of an episode and 0 otherwise. | [
"Sample",
"a",
"batch",
"of",
"experiences",
"."
] | 3301089b48c42b87b396e246ea3f56fa4bfc9678 | https://github.com/openai/baselines/blob/3301089b48c42b87b396e246ea3f56fa4bfc9678/baselines/deepq/replay_buffer.py#L45-L68 | valid |
openai/baselines | baselines/deepq/replay_buffer.py | PrioritizedReplayBuffer.add | def add(self, *args, **kwargs):
"""See ReplayBuffer.store_effect"""
idx = self._next_idx
super().add(*args, **kwargs)
self._it_sum[idx] = self._max_priority ** self._alpha
self._it_min[idx] = self._max_priority ** self._alpha | python | def add(self, *args, **kwargs):
"""See ReplayBuffer.store_effect"""
idx = self._next_idx
super().add(*args, **kwargs)
self._it_sum[idx] = self._max_priority ** self._alpha
self._it_min[idx] = self._max_priority ** self._alpha | [
"def",
"add",
"(",
"self",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"idx",
"=",
"self",
".",
"_next_idx",
"super",
"(",
")",
".",
"add",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"self",
".",
"_it_sum",
"[",
"idx",
"]",
"=",
"self",
".",
"_max_priority",
"**",
"self",
".",
"_alpha",
"self",
".",
"_it_min",
"[",
"idx",
"]",
"=",
"self",
".",
"_max_priority",
"**",
"self",
".",
"_alpha"
] | See ReplayBuffer.store_effect | [
"See",
"ReplayBuffer",
".",
"store_effect"
] | 3301089b48c42b87b396e246ea3f56fa4bfc9678 | https://github.com/openai/baselines/blob/3301089b48c42b87b396e246ea3f56fa4bfc9678/baselines/deepq/replay_buffer.py#L100-L105 | valid |
openai/baselines | baselines/deepq/replay_buffer.py | PrioritizedReplayBuffer.sample | def sample(self, batch_size, beta):
"""Sample a batch of experiences.
compared to ReplayBuffer.sample
it also returns importance weights and idxes
of sampled experiences.
Parameters
----------
batch_size: int
How many transitions to sample.
beta: float
To what degree to use importance weights
(0 - no corrections, 1 - full correction)
Returns
-------
obs_batch: np.array
batch of observations
act_batch: np.array
batch of actions executed given obs_batch
rew_batch: np.array
rewards received as results of executing act_batch
next_obs_batch: np.array
next set of observations seen after executing act_batch
done_mask: np.array
done_mask[i] = 1 if executing act_batch[i] resulted in
the end of an episode and 0 otherwise.
weights: np.array
Array of shape (batch_size,) and dtype np.float32
denoting importance weight of each sampled transition
idxes: np.array
Array of shape (batch_size,) and dtype np.int32
idexes in buffer of sampled experiences
"""
assert beta > 0
idxes = self._sample_proportional(batch_size)
weights = []
p_min = self._it_min.min() / self._it_sum.sum()
max_weight = (p_min * len(self._storage)) ** (-beta)
for idx in idxes:
p_sample = self._it_sum[idx] / self._it_sum.sum()
weight = (p_sample * len(self._storage)) ** (-beta)
weights.append(weight / max_weight)
weights = np.array(weights)
encoded_sample = self._encode_sample(idxes)
return tuple(list(encoded_sample) + [weights, idxes]) | python | def sample(self, batch_size, beta):
"""Sample a batch of experiences.
compared to ReplayBuffer.sample
it also returns importance weights and idxes
of sampled experiences.
Parameters
----------
batch_size: int
How many transitions to sample.
beta: float
To what degree to use importance weights
(0 - no corrections, 1 - full correction)
Returns
-------
obs_batch: np.array
batch of observations
act_batch: np.array
batch of actions executed given obs_batch
rew_batch: np.array
rewards received as results of executing act_batch
next_obs_batch: np.array
next set of observations seen after executing act_batch
done_mask: np.array
done_mask[i] = 1 if executing act_batch[i] resulted in
the end of an episode and 0 otherwise.
weights: np.array
Array of shape (batch_size,) and dtype np.float32
denoting importance weight of each sampled transition
idxes: np.array
Array of shape (batch_size,) and dtype np.int32
idexes in buffer of sampled experiences
"""
assert beta > 0
idxes = self._sample_proportional(batch_size)
weights = []
p_min = self._it_min.min() / self._it_sum.sum()
max_weight = (p_min * len(self._storage)) ** (-beta)
for idx in idxes:
p_sample = self._it_sum[idx] / self._it_sum.sum()
weight = (p_sample * len(self._storage)) ** (-beta)
weights.append(weight / max_weight)
weights = np.array(weights)
encoded_sample = self._encode_sample(idxes)
return tuple(list(encoded_sample) + [weights, idxes]) | [
"def",
"sample",
"(",
"self",
",",
"batch_size",
",",
"beta",
")",
":",
"assert",
"beta",
">",
"0",
"idxes",
"=",
"self",
".",
"_sample_proportional",
"(",
"batch_size",
")",
"weights",
"=",
"[",
"]",
"p_min",
"=",
"self",
".",
"_it_min",
".",
"min",
"(",
")",
"/",
"self",
".",
"_it_sum",
".",
"sum",
"(",
")",
"max_weight",
"=",
"(",
"p_min",
"*",
"len",
"(",
"self",
".",
"_storage",
")",
")",
"**",
"(",
"-",
"beta",
")",
"for",
"idx",
"in",
"idxes",
":",
"p_sample",
"=",
"self",
".",
"_it_sum",
"[",
"idx",
"]",
"/",
"self",
".",
"_it_sum",
".",
"sum",
"(",
")",
"weight",
"=",
"(",
"p_sample",
"*",
"len",
"(",
"self",
".",
"_storage",
")",
")",
"**",
"(",
"-",
"beta",
")",
"weights",
".",
"append",
"(",
"weight",
"/",
"max_weight",
")",
"weights",
"=",
"np",
".",
"array",
"(",
"weights",
")",
"encoded_sample",
"=",
"self",
".",
"_encode_sample",
"(",
"idxes",
")",
"return",
"tuple",
"(",
"list",
"(",
"encoded_sample",
")",
"+",
"[",
"weights",
",",
"idxes",
"]",
")"
] | Sample a batch of experiences.
compared to ReplayBuffer.sample
it also returns importance weights and idxes
of sampled experiences.
Parameters
----------
batch_size: int
How many transitions to sample.
beta: float
To what degree to use importance weights
(0 - no corrections, 1 - full correction)
Returns
-------
obs_batch: np.array
batch of observations
act_batch: np.array
batch of actions executed given obs_batch
rew_batch: np.array
rewards received as results of executing act_batch
next_obs_batch: np.array
next set of observations seen after executing act_batch
done_mask: np.array
done_mask[i] = 1 if executing act_batch[i] resulted in
the end of an episode and 0 otherwise.
weights: np.array
Array of shape (batch_size,) and dtype np.float32
denoting importance weight of each sampled transition
idxes: np.array
Array of shape (batch_size,) and dtype np.int32
idexes in buffer of sampled experiences | [
"Sample",
"a",
"batch",
"of",
"experiences",
"."
] | 3301089b48c42b87b396e246ea3f56fa4bfc9678 | https://github.com/openai/baselines/blob/3301089b48c42b87b396e246ea3f56fa4bfc9678/baselines/deepq/replay_buffer.py#L117-L167 | valid |
openai/baselines | baselines/deepq/replay_buffer.py | PrioritizedReplayBuffer.update_priorities | def update_priorities(self, idxes, priorities):
"""Update priorities of sampled transitions.
sets priority of transition at index idxes[i] in buffer
to priorities[i].
Parameters
----------
idxes: [int]
List of idxes of sampled transitions
priorities: [float]
List of updated priorities corresponding to
transitions at the sampled idxes denoted by
variable `idxes`.
"""
assert len(idxes) == len(priorities)
for idx, priority in zip(idxes, priorities):
assert priority > 0
assert 0 <= idx < len(self._storage)
self._it_sum[idx] = priority ** self._alpha
self._it_min[idx] = priority ** self._alpha
self._max_priority = max(self._max_priority, priority) | python | def update_priorities(self, idxes, priorities):
"""Update priorities of sampled transitions.
sets priority of transition at index idxes[i] in buffer
to priorities[i].
Parameters
----------
idxes: [int]
List of idxes of sampled transitions
priorities: [float]
List of updated priorities corresponding to
transitions at the sampled idxes denoted by
variable `idxes`.
"""
assert len(idxes) == len(priorities)
for idx, priority in zip(idxes, priorities):
assert priority > 0
assert 0 <= idx < len(self._storage)
self._it_sum[idx] = priority ** self._alpha
self._it_min[idx] = priority ** self._alpha
self._max_priority = max(self._max_priority, priority) | [
"def",
"update_priorities",
"(",
"self",
",",
"idxes",
",",
"priorities",
")",
":",
"assert",
"len",
"(",
"idxes",
")",
"==",
"len",
"(",
"priorities",
")",
"for",
"idx",
",",
"priority",
"in",
"zip",
"(",
"idxes",
",",
"priorities",
")",
":",
"assert",
"priority",
">",
"0",
"assert",
"0",
"<=",
"idx",
"<",
"len",
"(",
"self",
".",
"_storage",
")",
"self",
".",
"_it_sum",
"[",
"idx",
"]",
"=",
"priority",
"**",
"self",
".",
"_alpha",
"self",
".",
"_it_min",
"[",
"idx",
"]",
"=",
"priority",
"**",
"self",
".",
"_alpha",
"self",
".",
"_max_priority",
"=",
"max",
"(",
"self",
".",
"_max_priority",
",",
"priority",
")"
] | Update priorities of sampled transitions.
sets priority of transition at index idxes[i] in buffer
to priorities[i].
Parameters
----------
idxes: [int]
List of idxes of sampled transitions
priorities: [float]
List of updated priorities corresponding to
transitions at the sampled idxes denoted by
variable `idxes`. | [
"Update",
"priorities",
"of",
"sampled",
"transitions",
"."
] | 3301089b48c42b87b396e246ea3f56fa4bfc9678 | https://github.com/openai/baselines/blob/3301089b48c42b87b396e246ea3f56fa4bfc9678/baselines/deepq/replay_buffer.py#L169-L191 | valid |
openai/baselines | baselines/common/retro_wrappers.py | wrap_deepmind_retro | def wrap_deepmind_retro(env, scale=True, frame_stack=4):
"""
Configure environment for retro games, using config similar to DeepMind-style Atari in wrap_deepmind
"""
env = WarpFrame(env)
env = ClipRewardEnv(env)
if frame_stack > 1:
env = FrameStack(env, frame_stack)
if scale:
env = ScaledFloatFrame(env)
return env | python | def wrap_deepmind_retro(env, scale=True, frame_stack=4):
"""
Configure environment for retro games, using config similar to DeepMind-style Atari in wrap_deepmind
"""
env = WarpFrame(env)
env = ClipRewardEnv(env)
if frame_stack > 1:
env = FrameStack(env, frame_stack)
if scale:
env = ScaledFloatFrame(env)
return env | [
"def",
"wrap_deepmind_retro",
"(",
"env",
",",
"scale",
"=",
"True",
",",
"frame_stack",
"=",
"4",
")",
":",
"env",
"=",
"WarpFrame",
"(",
"env",
")",
"env",
"=",
"ClipRewardEnv",
"(",
"env",
")",
"if",
"frame_stack",
">",
"1",
":",
"env",
"=",
"FrameStack",
"(",
"env",
",",
"frame_stack",
")",
"if",
"scale",
":",
"env",
"=",
"ScaledFloatFrame",
"(",
"env",
")",
"return",
"env"
] | Configure environment for retro games, using config similar to DeepMind-style Atari in wrap_deepmind | [
"Configure",
"environment",
"for",
"retro",
"games",
"using",
"config",
"similar",
"to",
"DeepMind",
"-",
"style",
"Atari",
"in",
"wrap_deepmind"
] | 3301089b48c42b87b396e246ea3f56fa4bfc9678 | https://github.com/openai/baselines/blob/3301089b48c42b87b396e246ea3f56fa4bfc9678/baselines/common/retro_wrappers.py#L212-L222 | valid |
openai/baselines | baselines/deepq/build_graph.py | scope_vars | def scope_vars(scope, trainable_only=False):
"""
Get variables inside a scope
The scope can be specified as a string
Parameters
----------
scope: str or VariableScope
scope in which the variables reside.
trainable_only: bool
whether or not to return only the variables that were marked as trainable.
Returns
-------
vars: [tf.Variable]
list of variables in `scope`.
"""
return tf.get_collection(
tf.GraphKeys.TRAINABLE_VARIABLES if trainable_only else tf.GraphKeys.GLOBAL_VARIABLES,
scope=scope if isinstance(scope, str) else scope.name
) | python | def scope_vars(scope, trainable_only=False):
"""
Get variables inside a scope
The scope can be specified as a string
Parameters
----------
scope: str or VariableScope
scope in which the variables reside.
trainable_only: bool
whether or not to return only the variables that were marked as trainable.
Returns
-------
vars: [tf.Variable]
list of variables in `scope`.
"""
return tf.get_collection(
tf.GraphKeys.TRAINABLE_VARIABLES if trainable_only else tf.GraphKeys.GLOBAL_VARIABLES,
scope=scope if isinstance(scope, str) else scope.name
) | [
"def",
"scope_vars",
"(",
"scope",
",",
"trainable_only",
"=",
"False",
")",
":",
"return",
"tf",
".",
"get_collection",
"(",
"tf",
".",
"GraphKeys",
".",
"TRAINABLE_VARIABLES",
"if",
"trainable_only",
"else",
"tf",
".",
"GraphKeys",
".",
"GLOBAL_VARIABLES",
",",
"scope",
"=",
"scope",
"if",
"isinstance",
"(",
"scope",
",",
"str",
")",
"else",
"scope",
".",
"name",
")"
] | Get variables inside a scope
The scope can be specified as a string
Parameters
----------
scope: str or VariableScope
scope in which the variables reside.
trainable_only: bool
whether or not to return only the variables that were marked as trainable.
Returns
-------
vars: [tf.Variable]
list of variables in `scope`. | [
"Get",
"variables",
"inside",
"a",
"scope",
"The",
"scope",
"can",
"be",
"specified",
"as",
"a",
"string",
"Parameters",
"----------",
"scope",
":",
"str",
"or",
"VariableScope",
"scope",
"in",
"which",
"the",
"variables",
"reside",
".",
"trainable_only",
":",
"bool",
"whether",
"or",
"not",
"to",
"return",
"only",
"the",
"variables",
"that",
"were",
"marked",
"as",
"trainable",
".",
"Returns",
"-------",
"vars",
":",
"[",
"tf",
".",
"Variable",
"]",
"list",
"of",
"variables",
"in",
"scope",
"."
] | 3301089b48c42b87b396e246ea3f56fa4bfc9678 | https://github.com/openai/baselines/blob/3301089b48c42b87b396e246ea3f56fa4bfc9678/baselines/deepq/build_graph.py#L100-L118 | valid |
openai/baselines | baselines/deepq/build_graph.py | build_act | def build_act(make_obs_ph, q_func, num_actions, scope="deepq", reuse=None):
"""Creates the act function:
Parameters
----------
make_obs_ph: str -> tf.placeholder or TfInput
a function that take a name and creates a placeholder of input with that name
q_func: (tf.Variable, int, str, bool) -> tf.Variable
the model that takes the following inputs:
observation_in: object
the output of observation placeholder
num_actions: int
number of actions
scope: str
reuse: bool
should be passed to outer variable scope
and returns a tensor of shape (batch_size, num_actions) with values of every action.
num_actions: int
number of actions.
scope: str or VariableScope
optional scope for variable_scope.
reuse: bool or None
whether or not the variables should be reused. To be able to reuse the scope must be given.
Returns
-------
act: (tf.Variable, bool, float) -> tf.Variable
function to select and action given observation.
` See the top of the file for details.
"""
with tf.variable_scope(scope, reuse=reuse):
observations_ph = make_obs_ph("observation")
stochastic_ph = tf.placeholder(tf.bool, (), name="stochastic")
update_eps_ph = tf.placeholder(tf.float32, (), name="update_eps")
eps = tf.get_variable("eps", (), initializer=tf.constant_initializer(0))
q_values = q_func(observations_ph.get(), num_actions, scope="q_func")
deterministic_actions = tf.argmax(q_values, axis=1)
batch_size = tf.shape(observations_ph.get())[0]
random_actions = tf.random_uniform(tf.stack([batch_size]), minval=0, maxval=num_actions, dtype=tf.int64)
chose_random = tf.random_uniform(tf.stack([batch_size]), minval=0, maxval=1, dtype=tf.float32) < eps
stochastic_actions = tf.where(chose_random, random_actions, deterministic_actions)
output_actions = tf.cond(stochastic_ph, lambda: stochastic_actions, lambda: deterministic_actions)
update_eps_expr = eps.assign(tf.cond(update_eps_ph >= 0, lambda: update_eps_ph, lambda: eps))
_act = U.function(inputs=[observations_ph, stochastic_ph, update_eps_ph],
outputs=output_actions,
givens={update_eps_ph: -1.0, stochastic_ph: True},
updates=[update_eps_expr])
def act(ob, stochastic=True, update_eps=-1):
return _act(ob, stochastic, update_eps)
return act | python | def build_act(make_obs_ph, q_func, num_actions, scope="deepq", reuse=None):
"""Creates the act function:
Parameters
----------
make_obs_ph: str -> tf.placeholder or TfInput
a function that take a name and creates a placeholder of input with that name
q_func: (tf.Variable, int, str, bool) -> tf.Variable
the model that takes the following inputs:
observation_in: object
the output of observation placeholder
num_actions: int
number of actions
scope: str
reuse: bool
should be passed to outer variable scope
and returns a tensor of shape (batch_size, num_actions) with values of every action.
num_actions: int
number of actions.
scope: str or VariableScope
optional scope for variable_scope.
reuse: bool or None
whether or not the variables should be reused. To be able to reuse the scope must be given.
Returns
-------
act: (tf.Variable, bool, float) -> tf.Variable
function to select and action given observation.
` See the top of the file for details.
"""
with tf.variable_scope(scope, reuse=reuse):
observations_ph = make_obs_ph("observation")
stochastic_ph = tf.placeholder(tf.bool, (), name="stochastic")
update_eps_ph = tf.placeholder(tf.float32, (), name="update_eps")
eps = tf.get_variable("eps", (), initializer=tf.constant_initializer(0))
q_values = q_func(observations_ph.get(), num_actions, scope="q_func")
deterministic_actions = tf.argmax(q_values, axis=1)
batch_size = tf.shape(observations_ph.get())[0]
random_actions = tf.random_uniform(tf.stack([batch_size]), minval=0, maxval=num_actions, dtype=tf.int64)
chose_random = tf.random_uniform(tf.stack([batch_size]), minval=0, maxval=1, dtype=tf.float32) < eps
stochastic_actions = tf.where(chose_random, random_actions, deterministic_actions)
output_actions = tf.cond(stochastic_ph, lambda: stochastic_actions, lambda: deterministic_actions)
update_eps_expr = eps.assign(tf.cond(update_eps_ph >= 0, lambda: update_eps_ph, lambda: eps))
_act = U.function(inputs=[observations_ph, stochastic_ph, update_eps_ph],
outputs=output_actions,
givens={update_eps_ph: -1.0, stochastic_ph: True},
updates=[update_eps_expr])
def act(ob, stochastic=True, update_eps=-1):
return _act(ob, stochastic, update_eps)
return act | [
"def",
"build_act",
"(",
"make_obs_ph",
",",
"q_func",
",",
"num_actions",
",",
"scope",
"=",
"\"deepq\"",
",",
"reuse",
"=",
"None",
")",
":",
"with",
"tf",
".",
"variable_scope",
"(",
"scope",
",",
"reuse",
"=",
"reuse",
")",
":",
"observations_ph",
"=",
"make_obs_ph",
"(",
"\"observation\"",
")",
"stochastic_ph",
"=",
"tf",
".",
"placeholder",
"(",
"tf",
".",
"bool",
",",
"(",
")",
",",
"name",
"=",
"\"stochastic\"",
")",
"update_eps_ph",
"=",
"tf",
".",
"placeholder",
"(",
"tf",
".",
"float32",
",",
"(",
")",
",",
"name",
"=",
"\"update_eps\"",
")",
"eps",
"=",
"tf",
".",
"get_variable",
"(",
"\"eps\"",
",",
"(",
")",
",",
"initializer",
"=",
"tf",
".",
"constant_initializer",
"(",
"0",
")",
")",
"q_values",
"=",
"q_func",
"(",
"observations_ph",
".",
"get",
"(",
")",
",",
"num_actions",
",",
"scope",
"=",
"\"q_func\"",
")",
"deterministic_actions",
"=",
"tf",
".",
"argmax",
"(",
"q_values",
",",
"axis",
"=",
"1",
")",
"batch_size",
"=",
"tf",
".",
"shape",
"(",
"observations_ph",
".",
"get",
"(",
")",
")",
"[",
"0",
"]",
"random_actions",
"=",
"tf",
".",
"random_uniform",
"(",
"tf",
".",
"stack",
"(",
"[",
"batch_size",
"]",
")",
",",
"minval",
"=",
"0",
",",
"maxval",
"=",
"num_actions",
",",
"dtype",
"=",
"tf",
".",
"int64",
")",
"chose_random",
"=",
"tf",
".",
"random_uniform",
"(",
"tf",
".",
"stack",
"(",
"[",
"batch_size",
"]",
")",
",",
"minval",
"=",
"0",
",",
"maxval",
"=",
"1",
",",
"dtype",
"=",
"tf",
".",
"float32",
")",
"<",
"eps",
"stochastic_actions",
"=",
"tf",
".",
"where",
"(",
"chose_random",
",",
"random_actions",
",",
"deterministic_actions",
")",
"output_actions",
"=",
"tf",
".",
"cond",
"(",
"stochastic_ph",
",",
"lambda",
":",
"stochastic_actions",
",",
"lambda",
":",
"deterministic_actions",
")",
"update_eps_expr",
"=",
"eps",
".",
"assign",
"(",
"tf",
".",
"cond",
"(",
"update_eps_ph",
">=",
"0",
",",
"lambda",
":",
"update_eps_ph",
",",
"lambda",
":",
"eps",
")",
")",
"_act",
"=",
"U",
".",
"function",
"(",
"inputs",
"=",
"[",
"observations_ph",
",",
"stochastic_ph",
",",
"update_eps_ph",
"]",
",",
"outputs",
"=",
"output_actions",
",",
"givens",
"=",
"{",
"update_eps_ph",
":",
"-",
"1.0",
",",
"stochastic_ph",
":",
"True",
"}",
",",
"updates",
"=",
"[",
"update_eps_expr",
"]",
")",
"def",
"act",
"(",
"ob",
",",
"stochastic",
"=",
"True",
",",
"update_eps",
"=",
"-",
"1",
")",
":",
"return",
"_act",
"(",
"ob",
",",
"stochastic",
",",
"update_eps",
")",
"return",
"act"
] | Creates the act function:
Parameters
----------
make_obs_ph: str -> tf.placeholder or TfInput
a function that take a name and creates a placeholder of input with that name
q_func: (tf.Variable, int, str, bool) -> tf.Variable
the model that takes the following inputs:
observation_in: object
the output of observation placeholder
num_actions: int
number of actions
scope: str
reuse: bool
should be passed to outer variable scope
and returns a tensor of shape (batch_size, num_actions) with values of every action.
num_actions: int
number of actions.
scope: str or VariableScope
optional scope for variable_scope.
reuse: bool or None
whether or not the variables should be reused. To be able to reuse the scope must be given.
Returns
-------
act: (tf.Variable, bool, float) -> tf.Variable
function to select and action given observation.
` See the top of the file for details. | [
"Creates",
"the",
"act",
"function",
":"
] | 3301089b48c42b87b396e246ea3f56fa4bfc9678 | https://github.com/openai/baselines/blob/3301089b48c42b87b396e246ea3f56fa4bfc9678/baselines/deepq/build_graph.py#L146-L199 | valid |
openai/baselines | baselines/deepq/build_graph.py | build_act_with_param_noise | def build_act_with_param_noise(make_obs_ph, q_func, num_actions, scope="deepq", reuse=None, param_noise_filter_func=None):
"""Creates the act function with support for parameter space noise exploration (https://arxiv.org/abs/1706.01905):
Parameters
----------
make_obs_ph: str -> tf.placeholder or TfInput
a function that take a name and creates a placeholder of input with that name
q_func: (tf.Variable, int, str, bool) -> tf.Variable
the model that takes the following inputs:
observation_in: object
the output of observation placeholder
num_actions: int
number of actions
scope: str
reuse: bool
should be passed to outer variable scope
and returns a tensor of shape (batch_size, num_actions) with values of every action.
num_actions: int
number of actions.
scope: str or VariableScope
optional scope for variable_scope.
reuse: bool or None
whether or not the variables should be reused. To be able to reuse the scope must be given.
param_noise_filter_func: tf.Variable -> bool
function that decides whether or not a variable should be perturbed. Only applicable
if param_noise is True. If set to None, default_param_noise_filter is used by default.
Returns
-------
act: (tf.Variable, bool, float, bool, float, bool) -> tf.Variable
function to select and action given observation.
` See the top of the file for details.
"""
if param_noise_filter_func is None:
param_noise_filter_func = default_param_noise_filter
with tf.variable_scope(scope, reuse=reuse):
observations_ph = make_obs_ph("observation")
stochastic_ph = tf.placeholder(tf.bool, (), name="stochastic")
update_eps_ph = tf.placeholder(tf.float32, (), name="update_eps")
update_param_noise_threshold_ph = tf.placeholder(tf.float32, (), name="update_param_noise_threshold")
update_param_noise_scale_ph = tf.placeholder(tf.bool, (), name="update_param_noise_scale")
reset_ph = tf.placeholder(tf.bool, (), name="reset")
eps = tf.get_variable("eps", (), initializer=tf.constant_initializer(0))
param_noise_scale = tf.get_variable("param_noise_scale", (), initializer=tf.constant_initializer(0.01), trainable=False)
param_noise_threshold = tf.get_variable("param_noise_threshold", (), initializer=tf.constant_initializer(0.05), trainable=False)
# Unmodified Q.
q_values = q_func(observations_ph.get(), num_actions, scope="q_func")
# Perturbable Q used for the actual rollout.
q_values_perturbed = q_func(observations_ph.get(), num_actions, scope="perturbed_q_func")
# We have to wrap this code into a function due to the way tf.cond() works. See
# https://stackoverflow.com/questions/37063952/confused-by-the-behavior-of-tf-cond for
# a more detailed discussion.
def perturb_vars(original_scope, perturbed_scope):
all_vars = scope_vars(absolute_scope_name(original_scope))
all_perturbed_vars = scope_vars(absolute_scope_name(perturbed_scope))
assert len(all_vars) == len(all_perturbed_vars)
perturb_ops = []
for var, perturbed_var in zip(all_vars, all_perturbed_vars):
if param_noise_filter_func(perturbed_var):
# Perturb this variable.
op = tf.assign(perturbed_var, var + tf.random_normal(shape=tf.shape(var), mean=0., stddev=param_noise_scale))
else:
# Do not perturb, just assign.
op = tf.assign(perturbed_var, var)
perturb_ops.append(op)
assert len(perturb_ops) == len(all_vars)
return tf.group(*perturb_ops)
# Set up functionality to re-compute `param_noise_scale`. This perturbs yet another copy
# of the network and measures the effect of that perturbation in action space. If the perturbation
# is too big, reduce scale of perturbation, otherwise increase.
q_values_adaptive = q_func(observations_ph.get(), num_actions, scope="adaptive_q_func")
perturb_for_adaption = perturb_vars(original_scope="q_func", perturbed_scope="adaptive_q_func")
kl = tf.reduce_sum(tf.nn.softmax(q_values) * (tf.log(tf.nn.softmax(q_values)) - tf.log(tf.nn.softmax(q_values_adaptive))), axis=-1)
mean_kl = tf.reduce_mean(kl)
def update_scale():
with tf.control_dependencies([perturb_for_adaption]):
update_scale_expr = tf.cond(mean_kl < param_noise_threshold,
lambda: param_noise_scale.assign(param_noise_scale * 1.01),
lambda: param_noise_scale.assign(param_noise_scale / 1.01),
)
return update_scale_expr
# Functionality to update the threshold for parameter space noise.
update_param_noise_threshold_expr = param_noise_threshold.assign(tf.cond(update_param_noise_threshold_ph >= 0,
lambda: update_param_noise_threshold_ph, lambda: param_noise_threshold))
# Put everything together.
deterministic_actions = tf.argmax(q_values_perturbed, axis=1)
batch_size = tf.shape(observations_ph.get())[0]
random_actions = tf.random_uniform(tf.stack([batch_size]), minval=0, maxval=num_actions, dtype=tf.int64)
chose_random = tf.random_uniform(tf.stack([batch_size]), minval=0, maxval=1, dtype=tf.float32) < eps
stochastic_actions = tf.where(chose_random, random_actions, deterministic_actions)
output_actions = tf.cond(stochastic_ph, lambda: stochastic_actions, lambda: deterministic_actions)
update_eps_expr = eps.assign(tf.cond(update_eps_ph >= 0, lambda: update_eps_ph, lambda: eps))
updates = [
update_eps_expr,
tf.cond(reset_ph, lambda: perturb_vars(original_scope="q_func", perturbed_scope="perturbed_q_func"), lambda: tf.group(*[])),
tf.cond(update_param_noise_scale_ph, lambda: update_scale(), lambda: tf.Variable(0., trainable=False)),
update_param_noise_threshold_expr,
]
_act = U.function(inputs=[observations_ph, stochastic_ph, update_eps_ph, reset_ph, update_param_noise_threshold_ph, update_param_noise_scale_ph],
outputs=output_actions,
givens={update_eps_ph: -1.0, stochastic_ph: True, reset_ph: False, update_param_noise_threshold_ph: False, update_param_noise_scale_ph: False},
updates=updates)
def act(ob, reset=False, update_param_noise_threshold=False, update_param_noise_scale=False, stochastic=True, update_eps=-1):
return _act(ob, stochastic, update_eps, reset, update_param_noise_threshold, update_param_noise_scale)
return act | python | def build_act_with_param_noise(make_obs_ph, q_func, num_actions, scope="deepq", reuse=None, param_noise_filter_func=None):
"""Creates the act function with support for parameter space noise exploration (https://arxiv.org/abs/1706.01905):
Parameters
----------
make_obs_ph: str -> tf.placeholder or TfInput
a function that take a name and creates a placeholder of input with that name
q_func: (tf.Variable, int, str, bool) -> tf.Variable
the model that takes the following inputs:
observation_in: object
the output of observation placeholder
num_actions: int
number of actions
scope: str
reuse: bool
should be passed to outer variable scope
and returns a tensor of shape (batch_size, num_actions) with values of every action.
num_actions: int
number of actions.
scope: str or VariableScope
optional scope for variable_scope.
reuse: bool or None
whether or not the variables should be reused. To be able to reuse the scope must be given.
param_noise_filter_func: tf.Variable -> bool
function that decides whether or not a variable should be perturbed. Only applicable
if param_noise is True. If set to None, default_param_noise_filter is used by default.
Returns
-------
act: (tf.Variable, bool, float, bool, float, bool) -> tf.Variable
function to select and action given observation.
` See the top of the file for details.
"""
if param_noise_filter_func is None:
param_noise_filter_func = default_param_noise_filter
with tf.variable_scope(scope, reuse=reuse):
observations_ph = make_obs_ph("observation")
stochastic_ph = tf.placeholder(tf.bool, (), name="stochastic")
update_eps_ph = tf.placeholder(tf.float32, (), name="update_eps")
update_param_noise_threshold_ph = tf.placeholder(tf.float32, (), name="update_param_noise_threshold")
update_param_noise_scale_ph = tf.placeholder(tf.bool, (), name="update_param_noise_scale")
reset_ph = tf.placeholder(tf.bool, (), name="reset")
eps = tf.get_variable("eps", (), initializer=tf.constant_initializer(0))
param_noise_scale = tf.get_variable("param_noise_scale", (), initializer=tf.constant_initializer(0.01), trainable=False)
param_noise_threshold = tf.get_variable("param_noise_threshold", (), initializer=tf.constant_initializer(0.05), trainable=False)
# Unmodified Q.
q_values = q_func(observations_ph.get(), num_actions, scope="q_func")
# Perturbable Q used for the actual rollout.
q_values_perturbed = q_func(observations_ph.get(), num_actions, scope="perturbed_q_func")
# We have to wrap this code into a function due to the way tf.cond() works. See
# https://stackoverflow.com/questions/37063952/confused-by-the-behavior-of-tf-cond for
# a more detailed discussion.
def perturb_vars(original_scope, perturbed_scope):
all_vars = scope_vars(absolute_scope_name(original_scope))
all_perturbed_vars = scope_vars(absolute_scope_name(perturbed_scope))
assert len(all_vars) == len(all_perturbed_vars)
perturb_ops = []
for var, perturbed_var in zip(all_vars, all_perturbed_vars):
if param_noise_filter_func(perturbed_var):
# Perturb this variable.
op = tf.assign(perturbed_var, var + tf.random_normal(shape=tf.shape(var), mean=0., stddev=param_noise_scale))
else:
# Do not perturb, just assign.
op = tf.assign(perturbed_var, var)
perturb_ops.append(op)
assert len(perturb_ops) == len(all_vars)
return tf.group(*perturb_ops)
# Set up functionality to re-compute `param_noise_scale`. This perturbs yet another copy
# of the network and measures the effect of that perturbation in action space. If the perturbation
# is too big, reduce scale of perturbation, otherwise increase.
q_values_adaptive = q_func(observations_ph.get(), num_actions, scope="adaptive_q_func")
perturb_for_adaption = perturb_vars(original_scope="q_func", perturbed_scope="adaptive_q_func")
kl = tf.reduce_sum(tf.nn.softmax(q_values) * (tf.log(tf.nn.softmax(q_values)) - tf.log(tf.nn.softmax(q_values_adaptive))), axis=-1)
mean_kl = tf.reduce_mean(kl)
def update_scale():
with tf.control_dependencies([perturb_for_adaption]):
update_scale_expr = tf.cond(mean_kl < param_noise_threshold,
lambda: param_noise_scale.assign(param_noise_scale * 1.01),
lambda: param_noise_scale.assign(param_noise_scale / 1.01),
)
return update_scale_expr
# Functionality to update the threshold for parameter space noise.
update_param_noise_threshold_expr = param_noise_threshold.assign(tf.cond(update_param_noise_threshold_ph >= 0,
lambda: update_param_noise_threshold_ph, lambda: param_noise_threshold))
# Put everything together.
deterministic_actions = tf.argmax(q_values_perturbed, axis=1)
batch_size = tf.shape(observations_ph.get())[0]
random_actions = tf.random_uniform(tf.stack([batch_size]), minval=0, maxval=num_actions, dtype=tf.int64)
chose_random = tf.random_uniform(tf.stack([batch_size]), minval=0, maxval=1, dtype=tf.float32) < eps
stochastic_actions = tf.where(chose_random, random_actions, deterministic_actions)
output_actions = tf.cond(stochastic_ph, lambda: stochastic_actions, lambda: deterministic_actions)
update_eps_expr = eps.assign(tf.cond(update_eps_ph >= 0, lambda: update_eps_ph, lambda: eps))
updates = [
update_eps_expr,
tf.cond(reset_ph, lambda: perturb_vars(original_scope="q_func", perturbed_scope="perturbed_q_func"), lambda: tf.group(*[])),
tf.cond(update_param_noise_scale_ph, lambda: update_scale(), lambda: tf.Variable(0., trainable=False)),
update_param_noise_threshold_expr,
]
_act = U.function(inputs=[observations_ph, stochastic_ph, update_eps_ph, reset_ph, update_param_noise_threshold_ph, update_param_noise_scale_ph],
outputs=output_actions,
givens={update_eps_ph: -1.0, stochastic_ph: True, reset_ph: False, update_param_noise_threshold_ph: False, update_param_noise_scale_ph: False},
updates=updates)
def act(ob, reset=False, update_param_noise_threshold=False, update_param_noise_scale=False, stochastic=True, update_eps=-1):
return _act(ob, stochastic, update_eps, reset, update_param_noise_threshold, update_param_noise_scale)
return act | [
"def",
"build_act_with_param_noise",
"(",
"make_obs_ph",
",",
"q_func",
",",
"num_actions",
",",
"scope",
"=",
"\"deepq\"",
",",
"reuse",
"=",
"None",
",",
"param_noise_filter_func",
"=",
"None",
")",
":",
"if",
"param_noise_filter_func",
"is",
"None",
":",
"param_noise_filter_func",
"=",
"default_param_noise_filter",
"with",
"tf",
".",
"variable_scope",
"(",
"scope",
",",
"reuse",
"=",
"reuse",
")",
":",
"observations_ph",
"=",
"make_obs_ph",
"(",
"\"observation\"",
")",
"stochastic_ph",
"=",
"tf",
".",
"placeholder",
"(",
"tf",
".",
"bool",
",",
"(",
")",
",",
"name",
"=",
"\"stochastic\"",
")",
"update_eps_ph",
"=",
"tf",
".",
"placeholder",
"(",
"tf",
".",
"float32",
",",
"(",
")",
",",
"name",
"=",
"\"update_eps\"",
")",
"update_param_noise_threshold_ph",
"=",
"tf",
".",
"placeholder",
"(",
"tf",
".",
"float32",
",",
"(",
")",
",",
"name",
"=",
"\"update_param_noise_threshold\"",
")",
"update_param_noise_scale_ph",
"=",
"tf",
".",
"placeholder",
"(",
"tf",
".",
"bool",
",",
"(",
")",
",",
"name",
"=",
"\"update_param_noise_scale\"",
")",
"reset_ph",
"=",
"tf",
".",
"placeholder",
"(",
"tf",
".",
"bool",
",",
"(",
")",
",",
"name",
"=",
"\"reset\"",
")",
"eps",
"=",
"tf",
".",
"get_variable",
"(",
"\"eps\"",
",",
"(",
")",
",",
"initializer",
"=",
"tf",
".",
"constant_initializer",
"(",
"0",
")",
")",
"param_noise_scale",
"=",
"tf",
".",
"get_variable",
"(",
"\"param_noise_scale\"",
",",
"(",
")",
",",
"initializer",
"=",
"tf",
".",
"constant_initializer",
"(",
"0.01",
")",
",",
"trainable",
"=",
"False",
")",
"param_noise_threshold",
"=",
"tf",
".",
"get_variable",
"(",
"\"param_noise_threshold\"",
",",
"(",
")",
",",
"initializer",
"=",
"tf",
".",
"constant_initializer",
"(",
"0.05",
")",
",",
"trainable",
"=",
"False",
")",
"# Unmodified Q.",
"q_values",
"=",
"q_func",
"(",
"observations_ph",
".",
"get",
"(",
")",
",",
"num_actions",
",",
"scope",
"=",
"\"q_func\"",
")",
"# Perturbable Q used for the actual rollout.",
"q_values_perturbed",
"=",
"q_func",
"(",
"observations_ph",
".",
"get",
"(",
")",
",",
"num_actions",
",",
"scope",
"=",
"\"perturbed_q_func\"",
")",
"# We have to wrap this code into a function due to the way tf.cond() works. See",
"# https://stackoverflow.com/questions/37063952/confused-by-the-behavior-of-tf-cond for",
"# a more detailed discussion.",
"def",
"perturb_vars",
"(",
"original_scope",
",",
"perturbed_scope",
")",
":",
"all_vars",
"=",
"scope_vars",
"(",
"absolute_scope_name",
"(",
"original_scope",
")",
")",
"all_perturbed_vars",
"=",
"scope_vars",
"(",
"absolute_scope_name",
"(",
"perturbed_scope",
")",
")",
"assert",
"len",
"(",
"all_vars",
")",
"==",
"len",
"(",
"all_perturbed_vars",
")",
"perturb_ops",
"=",
"[",
"]",
"for",
"var",
",",
"perturbed_var",
"in",
"zip",
"(",
"all_vars",
",",
"all_perturbed_vars",
")",
":",
"if",
"param_noise_filter_func",
"(",
"perturbed_var",
")",
":",
"# Perturb this variable.",
"op",
"=",
"tf",
".",
"assign",
"(",
"perturbed_var",
",",
"var",
"+",
"tf",
".",
"random_normal",
"(",
"shape",
"=",
"tf",
".",
"shape",
"(",
"var",
")",
",",
"mean",
"=",
"0.",
",",
"stddev",
"=",
"param_noise_scale",
")",
")",
"else",
":",
"# Do not perturb, just assign.",
"op",
"=",
"tf",
".",
"assign",
"(",
"perturbed_var",
",",
"var",
")",
"perturb_ops",
".",
"append",
"(",
"op",
")",
"assert",
"len",
"(",
"perturb_ops",
")",
"==",
"len",
"(",
"all_vars",
")",
"return",
"tf",
".",
"group",
"(",
"*",
"perturb_ops",
")",
"# Set up functionality to re-compute `param_noise_scale`. This perturbs yet another copy",
"# of the network and measures the effect of that perturbation in action space. If the perturbation",
"# is too big, reduce scale of perturbation, otherwise increase.",
"q_values_adaptive",
"=",
"q_func",
"(",
"observations_ph",
".",
"get",
"(",
")",
",",
"num_actions",
",",
"scope",
"=",
"\"adaptive_q_func\"",
")",
"perturb_for_adaption",
"=",
"perturb_vars",
"(",
"original_scope",
"=",
"\"q_func\"",
",",
"perturbed_scope",
"=",
"\"adaptive_q_func\"",
")",
"kl",
"=",
"tf",
".",
"reduce_sum",
"(",
"tf",
".",
"nn",
".",
"softmax",
"(",
"q_values",
")",
"*",
"(",
"tf",
".",
"log",
"(",
"tf",
".",
"nn",
".",
"softmax",
"(",
"q_values",
")",
")",
"-",
"tf",
".",
"log",
"(",
"tf",
".",
"nn",
".",
"softmax",
"(",
"q_values_adaptive",
")",
")",
")",
",",
"axis",
"=",
"-",
"1",
")",
"mean_kl",
"=",
"tf",
".",
"reduce_mean",
"(",
"kl",
")",
"def",
"update_scale",
"(",
")",
":",
"with",
"tf",
".",
"control_dependencies",
"(",
"[",
"perturb_for_adaption",
"]",
")",
":",
"update_scale_expr",
"=",
"tf",
".",
"cond",
"(",
"mean_kl",
"<",
"param_noise_threshold",
",",
"lambda",
":",
"param_noise_scale",
".",
"assign",
"(",
"param_noise_scale",
"*",
"1.01",
")",
",",
"lambda",
":",
"param_noise_scale",
".",
"assign",
"(",
"param_noise_scale",
"/",
"1.01",
")",
",",
")",
"return",
"update_scale_expr",
"# Functionality to update the threshold for parameter space noise.",
"update_param_noise_threshold_expr",
"=",
"param_noise_threshold",
".",
"assign",
"(",
"tf",
".",
"cond",
"(",
"update_param_noise_threshold_ph",
">=",
"0",
",",
"lambda",
":",
"update_param_noise_threshold_ph",
",",
"lambda",
":",
"param_noise_threshold",
")",
")",
"# Put everything together.",
"deterministic_actions",
"=",
"tf",
".",
"argmax",
"(",
"q_values_perturbed",
",",
"axis",
"=",
"1",
")",
"batch_size",
"=",
"tf",
".",
"shape",
"(",
"observations_ph",
".",
"get",
"(",
")",
")",
"[",
"0",
"]",
"random_actions",
"=",
"tf",
".",
"random_uniform",
"(",
"tf",
".",
"stack",
"(",
"[",
"batch_size",
"]",
")",
",",
"minval",
"=",
"0",
",",
"maxval",
"=",
"num_actions",
",",
"dtype",
"=",
"tf",
".",
"int64",
")",
"chose_random",
"=",
"tf",
".",
"random_uniform",
"(",
"tf",
".",
"stack",
"(",
"[",
"batch_size",
"]",
")",
",",
"minval",
"=",
"0",
",",
"maxval",
"=",
"1",
",",
"dtype",
"=",
"tf",
".",
"float32",
")",
"<",
"eps",
"stochastic_actions",
"=",
"tf",
".",
"where",
"(",
"chose_random",
",",
"random_actions",
",",
"deterministic_actions",
")",
"output_actions",
"=",
"tf",
".",
"cond",
"(",
"stochastic_ph",
",",
"lambda",
":",
"stochastic_actions",
",",
"lambda",
":",
"deterministic_actions",
")",
"update_eps_expr",
"=",
"eps",
".",
"assign",
"(",
"tf",
".",
"cond",
"(",
"update_eps_ph",
">=",
"0",
",",
"lambda",
":",
"update_eps_ph",
",",
"lambda",
":",
"eps",
")",
")",
"updates",
"=",
"[",
"update_eps_expr",
",",
"tf",
".",
"cond",
"(",
"reset_ph",
",",
"lambda",
":",
"perturb_vars",
"(",
"original_scope",
"=",
"\"q_func\"",
",",
"perturbed_scope",
"=",
"\"perturbed_q_func\"",
")",
",",
"lambda",
":",
"tf",
".",
"group",
"(",
"*",
"[",
"]",
")",
")",
",",
"tf",
".",
"cond",
"(",
"update_param_noise_scale_ph",
",",
"lambda",
":",
"update_scale",
"(",
")",
",",
"lambda",
":",
"tf",
".",
"Variable",
"(",
"0.",
",",
"trainable",
"=",
"False",
")",
")",
",",
"update_param_noise_threshold_expr",
",",
"]",
"_act",
"=",
"U",
".",
"function",
"(",
"inputs",
"=",
"[",
"observations_ph",
",",
"stochastic_ph",
",",
"update_eps_ph",
",",
"reset_ph",
",",
"update_param_noise_threshold_ph",
",",
"update_param_noise_scale_ph",
"]",
",",
"outputs",
"=",
"output_actions",
",",
"givens",
"=",
"{",
"update_eps_ph",
":",
"-",
"1.0",
",",
"stochastic_ph",
":",
"True",
",",
"reset_ph",
":",
"False",
",",
"update_param_noise_threshold_ph",
":",
"False",
",",
"update_param_noise_scale_ph",
":",
"False",
"}",
",",
"updates",
"=",
"updates",
")",
"def",
"act",
"(",
"ob",
",",
"reset",
"=",
"False",
",",
"update_param_noise_threshold",
"=",
"False",
",",
"update_param_noise_scale",
"=",
"False",
",",
"stochastic",
"=",
"True",
",",
"update_eps",
"=",
"-",
"1",
")",
":",
"return",
"_act",
"(",
"ob",
",",
"stochastic",
",",
"update_eps",
",",
"reset",
",",
"update_param_noise_threshold",
",",
"update_param_noise_scale",
")",
"return",
"act"
] | Creates the act function with support for parameter space noise exploration (https://arxiv.org/abs/1706.01905):
Parameters
----------
make_obs_ph: str -> tf.placeholder or TfInput
a function that take a name and creates a placeholder of input with that name
q_func: (tf.Variable, int, str, bool) -> tf.Variable
the model that takes the following inputs:
observation_in: object
the output of observation placeholder
num_actions: int
number of actions
scope: str
reuse: bool
should be passed to outer variable scope
and returns a tensor of shape (batch_size, num_actions) with values of every action.
num_actions: int
number of actions.
scope: str or VariableScope
optional scope for variable_scope.
reuse: bool or None
whether or not the variables should be reused. To be able to reuse the scope must be given.
param_noise_filter_func: tf.Variable -> bool
function that decides whether or not a variable should be perturbed. Only applicable
if param_noise is True. If set to None, default_param_noise_filter is used by default.
Returns
-------
act: (tf.Variable, bool, float, bool, float, bool) -> tf.Variable
function to select and action given observation.
` See the top of the file for details. | [
"Creates",
"the",
"act",
"function",
"with",
"support",
"for",
"parameter",
"space",
"noise",
"exploration",
"(",
"https",
":",
"//",
"arxiv",
".",
"org",
"/",
"abs",
"/",
"1706",
".",
"01905",
")",
":"
] | 3301089b48c42b87b396e246ea3f56fa4bfc9678 | https://github.com/openai/baselines/blob/3301089b48c42b87b396e246ea3f56fa4bfc9678/baselines/deepq/build_graph.py#L202-L314 | valid |
openai/baselines | baselines/deepq/build_graph.py | build_train | def build_train(make_obs_ph, q_func, num_actions, optimizer, grad_norm_clipping=None, gamma=1.0,
double_q=True, scope="deepq", reuse=None, param_noise=False, param_noise_filter_func=None):
"""Creates the train function:
Parameters
----------
make_obs_ph: str -> tf.placeholder or TfInput
a function that takes a name and creates a placeholder of input with that name
q_func: (tf.Variable, int, str, bool) -> tf.Variable
the model that takes the following inputs:
observation_in: object
the output of observation placeholder
num_actions: int
number of actions
scope: str
reuse: bool
should be passed to outer variable scope
and returns a tensor of shape (batch_size, num_actions) with values of every action.
num_actions: int
number of actions
reuse: bool
whether or not to reuse the graph variables
optimizer: tf.train.Optimizer
optimizer to use for the Q-learning objective.
grad_norm_clipping: float or None
clip gradient norms to this value. If None no clipping is performed.
gamma: float
discount rate.
double_q: bool
if true will use Double Q Learning (https://arxiv.org/abs/1509.06461).
In general it is a good idea to keep it enabled.
scope: str or VariableScope
optional scope for variable_scope.
reuse: bool or None
whether or not the variables should be reused. To be able to reuse the scope must be given.
param_noise: bool
whether or not to use parameter space noise (https://arxiv.org/abs/1706.01905)
param_noise_filter_func: tf.Variable -> bool
function that decides whether or not a variable should be perturbed. Only applicable
if param_noise is True. If set to None, default_param_noise_filter is used by default.
Returns
-------
act: (tf.Variable, bool, float) -> tf.Variable
function to select and action given observation.
` See the top of the file for details.
train: (object, np.array, np.array, object, np.array, np.array) -> np.array
optimize the error in Bellman's equation.
` See the top of the file for details.
update_target: () -> ()
copy the parameters from optimized Q function to the target Q function.
` See the top of the file for details.
debug: {str: function}
a bunch of functions to print debug data like q_values.
"""
if param_noise:
act_f = build_act_with_param_noise(make_obs_ph, q_func, num_actions, scope=scope, reuse=reuse,
param_noise_filter_func=param_noise_filter_func)
else:
act_f = build_act(make_obs_ph, q_func, num_actions, scope=scope, reuse=reuse)
with tf.variable_scope(scope, reuse=reuse):
# set up placeholders
obs_t_input = make_obs_ph("obs_t")
act_t_ph = tf.placeholder(tf.int32, [None], name="action")
rew_t_ph = tf.placeholder(tf.float32, [None], name="reward")
obs_tp1_input = make_obs_ph("obs_tp1")
done_mask_ph = tf.placeholder(tf.float32, [None], name="done")
importance_weights_ph = tf.placeholder(tf.float32, [None], name="weight")
# q network evaluation
q_t = q_func(obs_t_input.get(), num_actions, scope="q_func", reuse=True) # reuse parameters from act
q_func_vars = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope=tf.get_variable_scope().name + "/q_func")
# target q network evalution
q_tp1 = q_func(obs_tp1_input.get(), num_actions, scope="target_q_func")
target_q_func_vars = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope=tf.get_variable_scope().name + "/target_q_func")
# q scores for actions which we know were selected in the given state.
q_t_selected = tf.reduce_sum(q_t * tf.one_hot(act_t_ph, num_actions), 1)
# compute estimate of best possible value starting from state at t + 1
if double_q:
q_tp1_using_online_net = q_func(obs_tp1_input.get(), num_actions, scope="q_func", reuse=True)
q_tp1_best_using_online_net = tf.argmax(q_tp1_using_online_net, 1)
q_tp1_best = tf.reduce_sum(q_tp1 * tf.one_hot(q_tp1_best_using_online_net, num_actions), 1)
else:
q_tp1_best = tf.reduce_max(q_tp1, 1)
q_tp1_best_masked = (1.0 - done_mask_ph) * q_tp1_best
# compute RHS of bellman equation
q_t_selected_target = rew_t_ph + gamma * q_tp1_best_masked
# compute the error (potentially clipped)
td_error = q_t_selected - tf.stop_gradient(q_t_selected_target)
errors = U.huber_loss(td_error)
weighted_error = tf.reduce_mean(importance_weights_ph * errors)
# compute optimization op (potentially with gradient clipping)
if grad_norm_clipping is not None:
gradients = optimizer.compute_gradients(weighted_error, var_list=q_func_vars)
for i, (grad, var) in enumerate(gradients):
if grad is not None:
gradients[i] = (tf.clip_by_norm(grad, grad_norm_clipping), var)
optimize_expr = optimizer.apply_gradients(gradients)
else:
optimize_expr = optimizer.minimize(weighted_error, var_list=q_func_vars)
# update_target_fn will be called periodically to copy Q network to target Q network
update_target_expr = []
for var, var_target in zip(sorted(q_func_vars, key=lambda v: v.name),
sorted(target_q_func_vars, key=lambda v: v.name)):
update_target_expr.append(var_target.assign(var))
update_target_expr = tf.group(*update_target_expr)
# Create callable functions
train = U.function(
inputs=[
obs_t_input,
act_t_ph,
rew_t_ph,
obs_tp1_input,
done_mask_ph,
importance_weights_ph
],
outputs=td_error,
updates=[optimize_expr]
)
update_target = U.function([], [], updates=[update_target_expr])
q_values = U.function([obs_t_input], q_t)
return act_f, train, update_target, {'q_values': q_values} | python | def build_train(make_obs_ph, q_func, num_actions, optimizer, grad_norm_clipping=None, gamma=1.0,
double_q=True, scope="deepq", reuse=None, param_noise=False, param_noise_filter_func=None):
"""Creates the train function:
Parameters
----------
make_obs_ph: str -> tf.placeholder or TfInput
a function that takes a name and creates a placeholder of input with that name
q_func: (tf.Variable, int, str, bool) -> tf.Variable
the model that takes the following inputs:
observation_in: object
the output of observation placeholder
num_actions: int
number of actions
scope: str
reuse: bool
should be passed to outer variable scope
and returns a tensor of shape (batch_size, num_actions) with values of every action.
num_actions: int
number of actions
reuse: bool
whether or not to reuse the graph variables
optimizer: tf.train.Optimizer
optimizer to use for the Q-learning objective.
grad_norm_clipping: float or None
clip gradient norms to this value. If None no clipping is performed.
gamma: float
discount rate.
double_q: bool
if true will use Double Q Learning (https://arxiv.org/abs/1509.06461).
In general it is a good idea to keep it enabled.
scope: str or VariableScope
optional scope for variable_scope.
reuse: bool or None
whether or not the variables should be reused. To be able to reuse the scope must be given.
param_noise: bool
whether or not to use parameter space noise (https://arxiv.org/abs/1706.01905)
param_noise_filter_func: tf.Variable -> bool
function that decides whether or not a variable should be perturbed. Only applicable
if param_noise is True. If set to None, default_param_noise_filter is used by default.
Returns
-------
act: (tf.Variable, bool, float) -> tf.Variable
function to select and action given observation.
` See the top of the file for details.
train: (object, np.array, np.array, object, np.array, np.array) -> np.array
optimize the error in Bellman's equation.
` See the top of the file for details.
update_target: () -> ()
copy the parameters from optimized Q function to the target Q function.
` See the top of the file for details.
debug: {str: function}
a bunch of functions to print debug data like q_values.
"""
if param_noise:
act_f = build_act_with_param_noise(make_obs_ph, q_func, num_actions, scope=scope, reuse=reuse,
param_noise_filter_func=param_noise_filter_func)
else:
act_f = build_act(make_obs_ph, q_func, num_actions, scope=scope, reuse=reuse)
with tf.variable_scope(scope, reuse=reuse):
# set up placeholders
obs_t_input = make_obs_ph("obs_t")
act_t_ph = tf.placeholder(tf.int32, [None], name="action")
rew_t_ph = tf.placeholder(tf.float32, [None], name="reward")
obs_tp1_input = make_obs_ph("obs_tp1")
done_mask_ph = tf.placeholder(tf.float32, [None], name="done")
importance_weights_ph = tf.placeholder(tf.float32, [None], name="weight")
# q network evaluation
q_t = q_func(obs_t_input.get(), num_actions, scope="q_func", reuse=True) # reuse parameters from act
q_func_vars = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope=tf.get_variable_scope().name + "/q_func")
# target q network evalution
q_tp1 = q_func(obs_tp1_input.get(), num_actions, scope="target_q_func")
target_q_func_vars = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope=tf.get_variable_scope().name + "/target_q_func")
# q scores for actions which we know were selected in the given state.
q_t_selected = tf.reduce_sum(q_t * tf.one_hot(act_t_ph, num_actions), 1)
# compute estimate of best possible value starting from state at t + 1
if double_q:
q_tp1_using_online_net = q_func(obs_tp1_input.get(), num_actions, scope="q_func", reuse=True)
q_tp1_best_using_online_net = tf.argmax(q_tp1_using_online_net, 1)
q_tp1_best = tf.reduce_sum(q_tp1 * tf.one_hot(q_tp1_best_using_online_net, num_actions), 1)
else:
q_tp1_best = tf.reduce_max(q_tp1, 1)
q_tp1_best_masked = (1.0 - done_mask_ph) * q_tp1_best
# compute RHS of bellman equation
q_t_selected_target = rew_t_ph + gamma * q_tp1_best_masked
# compute the error (potentially clipped)
td_error = q_t_selected - tf.stop_gradient(q_t_selected_target)
errors = U.huber_loss(td_error)
weighted_error = tf.reduce_mean(importance_weights_ph * errors)
# compute optimization op (potentially with gradient clipping)
if grad_norm_clipping is not None:
gradients = optimizer.compute_gradients(weighted_error, var_list=q_func_vars)
for i, (grad, var) in enumerate(gradients):
if grad is not None:
gradients[i] = (tf.clip_by_norm(grad, grad_norm_clipping), var)
optimize_expr = optimizer.apply_gradients(gradients)
else:
optimize_expr = optimizer.minimize(weighted_error, var_list=q_func_vars)
# update_target_fn will be called periodically to copy Q network to target Q network
update_target_expr = []
for var, var_target in zip(sorted(q_func_vars, key=lambda v: v.name),
sorted(target_q_func_vars, key=lambda v: v.name)):
update_target_expr.append(var_target.assign(var))
update_target_expr = tf.group(*update_target_expr)
# Create callable functions
train = U.function(
inputs=[
obs_t_input,
act_t_ph,
rew_t_ph,
obs_tp1_input,
done_mask_ph,
importance_weights_ph
],
outputs=td_error,
updates=[optimize_expr]
)
update_target = U.function([], [], updates=[update_target_expr])
q_values = U.function([obs_t_input], q_t)
return act_f, train, update_target, {'q_values': q_values} | [
"def",
"build_train",
"(",
"make_obs_ph",
",",
"q_func",
",",
"num_actions",
",",
"optimizer",
",",
"grad_norm_clipping",
"=",
"None",
",",
"gamma",
"=",
"1.0",
",",
"double_q",
"=",
"True",
",",
"scope",
"=",
"\"deepq\"",
",",
"reuse",
"=",
"None",
",",
"param_noise",
"=",
"False",
",",
"param_noise_filter_func",
"=",
"None",
")",
":",
"if",
"param_noise",
":",
"act_f",
"=",
"build_act_with_param_noise",
"(",
"make_obs_ph",
",",
"q_func",
",",
"num_actions",
",",
"scope",
"=",
"scope",
",",
"reuse",
"=",
"reuse",
",",
"param_noise_filter_func",
"=",
"param_noise_filter_func",
")",
"else",
":",
"act_f",
"=",
"build_act",
"(",
"make_obs_ph",
",",
"q_func",
",",
"num_actions",
",",
"scope",
"=",
"scope",
",",
"reuse",
"=",
"reuse",
")",
"with",
"tf",
".",
"variable_scope",
"(",
"scope",
",",
"reuse",
"=",
"reuse",
")",
":",
"# set up placeholders",
"obs_t_input",
"=",
"make_obs_ph",
"(",
"\"obs_t\"",
")",
"act_t_ph",
"=",
"tf",
".",
"placeholder",
"(",
"tf",
".",
"int32",
",",
"[",
"None",
"]",
",",
"name",
"=",
"\"action\"",
")",
"rew_t_ph",
"=",
"tf",
".",
"placeholder",
"(",
"tf",
".",
"float32",
",",
"[",
"None",
"]",
",",
"name",
"=",
"\"reward\"",
")",
"obs_tp1_input",
"=",
"make_obs_ph",
"(",
"\"obs_tp1\"",
")",
"done_mask_ph",
"=",
"tf",
".",
"placeholder",
"(",
"tf",
".",
"float32",
",",
"[",
"None",
"]",
",",
"name",
"=",
"\"done\"",
")",
"importance_weights_ph",
"=",
"tf",
".",
"placeholder",
"(",
"tf",
".",
"float32",
",",
"[",
"None",
"]",
",",
"name",
"=",
"\"weight\"",
")",
"# q network evaluation",
"q_t",
"=",
"q_func",
"(",
"obs_t_input",
".",
"get",
"(",
")",
",",
"num_actions",
",",
"scope",
"=",
"\"q_func\"",
",",
"reuse",
"=",
"True",
")",
"# reuse parameters from act",
"q_func_vars",
"=",
"tf",
".",
"get_collection",
"(",
"tf",
".",
"GraphKeys",
".",
"GLOBAL_VARIABLES",
",",
"scope",
"=",
"tf",
".",
"get_variable_scope",
"(",
")",
".",
"name",
"+",
"\"/q_func\"",
")",
"# target q network evalution",
"q_tp1",
"=",
"q_func",
"(",
"obs_tp1_input",
".",
"get",
"(",
")",
",",
"num_actions",
",",
"scope",
"=",
"\"target_q_func\"",
")",
"target_q_func_vars",
"=",
"tf",
".",
"get_collection",
"(",
"tf",
".",
"GraphKeys",
".",
"GLOBAL_VARIABLES",
",",
"scope",
"=",
"tf",
".",
"get_variable_scope",
"(",
")",
".",
"name",
"+",
"\"/target_q_func\"",
")",
"# q scores for actions which we know were selected in the given state.",
"q_t_selected",
"=",
"tf",
".",
"reduce_sum",
"(",
"q_t",
"*",
"tf",
".",
"one_hot",
"(",
"act_t_ph",
",",
"num_actions",
")",
",",
"1",
")",
"# compute estimate of best possible value starting from state at t + 1",
"if",
"double_q",
":",
"q_tp1_using_online_net",
"=",
"q_func",
"(",
"obs_tp1_input",
".",
"get",
"(",
")",
",",
"num_actions",
",",
"scope",
"=",
"\"q_func\"",
",",
"reuse",
"=",
"True",
")",
"q_tp1_best_using_online_net",
"=",
"tf",
".",
"argmax",
"(",
"q_tp1_using_online_net",
",",
"1",
")",
"q_tp1_best",
"=",
"tf",
".",
"reduce_sum",
"(",
"q_tp1",
"*",
"tf",
".",
"one_hot",
"(",
"q_tp1_best_using_online_net",
",",
"num_actions",
")",
",",
"1",
")",
"else",
":",
"q_tp1_best",
"=",
"tf",
".",
"reduce_max",
"(",
"q_tp1",
",",
"1",
")",
"q_tp1_best_masked",
"=",
"(",
"1.0",
"-",
"done_mask_ph",
")",
"*",
"q_tp1_best",
"# compute RHS of bellman equation",
"q_t_selected_target",
"=",
"rew_t_ph",
"+",
"gamma",
"*",
"q_tp1_best_masked",
"# compute the error (potentially clipped)",
"td_error",
"=",
"q_t_selected",
"-",
"tf",
".",
"stop_gradient",
"(",
"q_t_selected_target",
")",
"errors",
"=",
"U",
".",
"huber_loss",
"(",
"td_error",
")",
"weighted_error",
"=",
"tf",
".",
"reduce_mean",
"(",
"importance_weights_ph",
"*",
"errors",
")",
"# compute optimization op (potentially with gradient clipping)",
"if",
"grad_norm_clipping",
"is",
"not",
"None",
":",
"gradients",
"=",
"optimizer",
".",
"compute_gradients",
"(",
"weighted_error",
",",
"var_list",
"=",
"q_func_vars",
")",
"for",
"i",
",",
"(",
"grad",
",",
"var",
")",
"in",
"enumerate",
"(",
"gradients",
")",
":",
"if",
"grad",
"is",
"not",
"None",
":",
"gradients",
"[",
"i",
"]",
"=",
"(",
"tf",
".",
"clip_by_norm",
"(",
"grad",
",",
"grad_norm_clipping",
")",
",",
"var",
")",
"optimize_expr",
"=",
"optimizer",
".",
"apply_gradients",
"(",
"gradients",
")",
"else",
":",
"optimize_expr",
"=",
"optimizer",
".",
"minimize",
"(",
"weighted_error",
",",
"var_list",
"=",
"q_func_vars",
")",
"# update_target_fn will be called periodically to copy Q network to target Q network",
"update_target_expr",
"=",
"[",
"]",
"for",
"var",
",",
"var_target",
"in",
"zip",
"(",
"sorted",
"(",
"q_func_vars",
",",
"key",
"=",
"lambda",
"v",
":",
"v",
".",
"name",
")",
",",
"sorted",
"(",
"target_q_func_vars",
",",
"key",
"=",
"lambda",
"v",
":",
"v",
".",
"name",
")",
")",
":",
"update_target_expr",
".",
"append",
"(",
"var_target",
".",
"assign",
"(",
"var",
")",
")",
"update_target_expr",
"=",
"tf",
".",
"group",
"(",
"*",
"update_target_expr",
")",
"# Create callable functions",
"train",
"=",
"U",
".",
"function",
"(",
"inputs",
"=",
"[",
"obs_t_input",
",",
"act_t_ph",
",",
"rew_t_ph",
",",
"obs_tp1_input",
",",
"done_mask_ph",
",",
"importance_weights_ph",
"]",
",",
"outputs",
"=",
"td_error",
",",
"updates",
"=",
"[",
"optimize_expr",
"]",
")",
"update_target",
"=",
"U",
".",
"function",
"(",
"[",
"]",
",",
"[",
"]",
",",
"updates",
"=",
"[",
"update_target_expr",
"]",
")",
"q_values",
"=",
"U",
".",
"function",
"(",
"[",
"obs_t_input",
"]",
",",
"q_t",
")",
"return",
"act_f",
",",
"train",
",",
"update_target",
",",
"{",
"'q_values'",
":",
"q_values",
"}"
] | Creates the train function:
Parameters
----------
make_obs_ph: str -> tf.placeholder or TfInput
a function that takes a name and creates a placeholder of input with that name
q_func: (tf.Variable, int, str, bool) -> tf.Variable
the model that takes the following inputs:
observation_in: object
the output of observation placeholder
num_actions: int
number of actions
scope: str
reuse: bool
should be passed to outer variable scope
and returns a tensor of shape (batch_size, num_actions) with values of every action.
num_actions: int
number of actions
reuse: bool
whether or not to reuse the graph variables
optimizer: tf.train.Optimizer
optimizer to use for the Q-learning objective.
grad_norm_clipping: float or None
clip gradient norms to this value. If None no clipping is performed.
gamma: float
discount rate.
double_q: bool
if true will use Double Q Learning (https://arxiv.org/abs/1509.06461).
In general it is a good idea to keep it enabled.
scope: str or VariableScope
optional scope for variable_scope.
reuse: bool or None
whether or not the variables should be reused. To be able to reuse the scope must be given.
param_noise: bool
whether or not to use parameter space noise (https://arxiv.org/abs/1706.01905)
param_noise_filter_func: tf.Variable -> bool
function that decides whether or not a variable should be perturbed. Only applicable
if param_noise is True. If set to None, default_param_noise_filter is used by default.
Returns
-------
act: (tf.Variable, bool, float) -> tf.Variable
function to select and action given observation.
` See the top of the file for details.
train: (object, np.array, np.array, object, np.array, np.array) -> np.array
optimize the error in Bellman's equation.
` See the top of the file for details.
update_target: () -> ()
copy the parameters from optimized Q function to the target Q function.
` See the top of the file for details.
debug: {str: function}
a bunch of functions to print debug data like q_values. | [
"Creates",
"the",
"train",
"function",
":"
] | 3301089b48c42b87b396e246ea3f56fa4bfc9678 | https://github.com/openai/baselines/blob/3301089b48c42b87b396e246ea3f56fa4bfc9678/baselines/deepq/build_graph.py#L317-L449 | valid |
openai/baselines | baselines/common/running_mean_std.py | profile_tf_runningmeanstd | def profile_tf_runningmeanstd():
import time
from baselines.common import tf_util
tf_util.get_session( config=tf.ConfigProto(
inter_op_parallelism_threads=1,
intra_op_parallelism_threads=1,
allow_soft_placement=True
))
x = np.random.random((376,))
n_trials = 10000
rms = RunningMeanStd()
tfrms = TfRunningMeanStd()
tic1 = time.time()
for _ in range(n_trials):
rms.update(x)
tic2 = time.time()
for _ in range(n_trials):
tfrms.update(x)
tic3 = time.time()
print('rms update time ({} trials): {} s'.format(n_trials, tic2 - tic1))
print('tfrms update time ({} trials): {} s'.format(n_trials, tic3 - tic2))
tic1 = time.time()
for _ in range(n_trials):
z1 = rms.mean
tic2 = time.time()
for _ in range(n_trials):
z2 = tfrms.mean
assert z1 == z2
tic3 = time.time()
print('rms get mean time ({} trials): {} s'.format(n_trials, tic2 - tic1))
print('tfrms get mean time ({} trials): {} s'.format(n_trials, tic3 - tic2))
'''
options = tf.RunOptions(trace_level=tf.RunOptions.FULL_TRACE) #pylint: disable=E1101
run_metadata = tf.RunMetadata()
profile_opts = dict(options=options, run_metadata=run_metadata)
from tensorflow.python.client import timeline
fetched_timeline = timeline.Timeline(run_metadata.step_stats) #pylint: disable=E1101
chrome_trace = fetched_timeline.generate_chrome_trace_format()
outfile = '/tmp/timeline.json'
with open(outfile, 'wt') as f:
f.write(chrome_trace)
print('Successfully saved profile to {}. Exiting.'.format(outfile))
exit(0)
''' | python | def profile_tf_runningmeanstd():
import time
from baselines.common import tf_util
tf_util.get_session( config=tf.ConfigProto(
inter_op_parallelism_threads=1,
intra_op_parallelism_threads=1,
allow_soft_placement=True
))
x = np.random.random((376,))
n_trials = 10000
rms = RunningMeanStd()
tfrms = TfRunningMeanStd()
tic1 = time.time()
for _ in range(n_trials):
rms.update(x)
tic2 = time.time()
for _ in range(n_trials):
tfrms.update(x)
tic3 = time.time()
print('rms update time ({} trials): {} s'.format(n_trials, tic2 - tic1))
print('tfrms update time ({} trials): {} s'.format(n_trials, tic3 - tic2))
tic1 = time.time()
for _ in range(n_trials):
z1 = rms.mean
tic2 = time.time()
for _ in range(n_trials):
z2 = tfrms.mean
assert z1 == z2
tic3 = time.time()
print('rms get mean time ({} trials): {} s'.format(n_trials, tic2 - tic1))
print('tfrms get mean time ({} trials): {} s'.format(n_trials, tic3 - tic2))
'''
options = tf.RunOptions(trace_level=tf.RunOptions.FULL_TRACE) #pylint: disable=E1101
run_metadata = tf.RunMetadata()
profile_opts = dict(options=options, run_metadata=run_metadata)
from tensorflow.python.client import timeline
fetched_timeline = timeline.Timeline(run_metadata.step_stats) #pylint: disable=E1101
chrome_trace = fetched_timeline.generate_chrome_trace_format()
outfile = '/tmp/timeline.json'
with open(outfile, 'wt') as f:
f.write(chrome_trace)
print('Successfully saved profile to {}. Exiting.'.format(outfile))
exit(0)
''' | [
"def",
"profile_tf_runningmeanstd",
"(",
")",
":",
"import",
"time",
"from",
"baselines",
".",
"common",
"import",
"tf_util",
"tf_util",
".",
"get_session",
"(",
"config",
"=",
"tf",
".",
"ConfigProto",
"(",
"inter_op_parallelism_threads",
"=",
"1",
",",
"intra_op_parallelism_threads",
"=",
"1",
",",
"allow_soft_placement",
"=",
"True",
")",
")",
"x",
"=",
"np",
".",
"random",
".",
"random",
"(",
"(",
"376",
",",
")",
")",
"n_trials",
"=",
"10000",
"rms",
"=",
"RunningMeanStd",
"(",
")",
"tfrms",
"=",
"TfRunningMeanStd",
"(",
")",
"tic1",
"=",
"time",
".",
"time",
"(",
")",
"for",
"_",
"in",
"range",
"(",
"n_trials",
")",
":",
"rms",
".",
"update",
"(",
"x",
")",
"tic2",
"=",
"time",
".",
"time",
"(",
")",
"for",
"_",
"in",
"range",
"(",
"n_trials",
")",
":",
"tfrms",
".",
"update",
"(",
"x",
")",
"tic3",
"=",
"time",
".",
"time",
"(",
")",
"print",
"(",
"'rms update time ({} trials): {} s'",
".",
"format",
"(",
"n_trials",
",",
"tic2",
"-",
"tic1",
")",
")",
"print",
"(",
"'tfrms update time ({} trials): {} s'",
".",
"format",
"(",
"n_trials",
",",
"tic3",
"-",
"tic2",
")",
")",
"tic1",
"=",
"time",
".",
"time",
"(",
")",
"for",
"_",
"in",
"range",
"(",
"n_trials",
")",
":",
"z1",
"=",
"rms",
".",
"mean",
"tic2",
"=",
"time",
".",
"time",
"(",
")",
"for",
"_",
"in",
"range",
"(",
"n_trials",
")",
":",
"z2",
"=",
"tfrms",
".",
"mean",
"assert",
"z1",
"==",
"z2",
"tic3",
"=",
"time",
".",
"time",
"(",
")",
"print",
"(",
"'rms get mean time ({} trials): {} s'",
".",
"format",
"(",
"n_trials",
",",
"tic2",
"-",
"tic1",
")",
")",
"print",
"(",
"'tfrms get mean time ({} trials): {} s'",
".",
"format",
"(",
"n_trials",
",",
"tic3",
"-",
"tic2",
")",
")"
] | options = tf.RunOptions(trace_level=tf.RunOptions.FULL_TRACE) #pylint: disable=E1101
run_metadata = tf.RunMetadata()
profile_opts = dict(options=options, run_metadata=run_metadata)
from tensorflow.python.client import timeline
fetched_timeline = timeline.Timeline(run_metadata.step_stats) #pylint: disable=E1101
chrome_trace = fetched_timeline.generate_chrome_trace_format()
outfile = '/tmp/timeline.json'
with open(outfile, 'wt') as f:
f.write(chrome_trace)
print('Successfully saved profile to {}. Exiting.'.format(outfile))
exit(0) | [
"options",
"=",
"tf",
".",
"RunOptions",
"(",
"trace_level",
"=",
"tf",
".",
"RunOptions",
".",
"FULL_TRACE",
")",
"#pylint",
":",
"disable",
"=",
"E1101",
"run_metadata",
"=",
"tf",
".",
"RunMetadata",
"()",
"profile_opts",
"=",
"dict",
"(",
"options",
"=",
"options",
"run_metadata",
"=",
"run_metadata",
")"
] | 3301089b48c42b87b396e246ea3f56fa4bfc9678 | https://github.com/openai/baselines/blob/3301089b48c42b87b396e246ea3f56fa4bfc9678/baselines/common/running_mean_std.py#L120-L182 | valid |
openai/baselines | baselines/her/her_sampler.py | make_sample_her_transitions | def make_sample_her_transitions(replay_strategy, replay_k, reward_fun):
"""Creates a sample function that can be used for HER experience replay.
Args:
replay_strategy (in ['future', 'none']): the HER replay strategy; if set to 'none',
regular DDPG experience replay is used
replay_k (int): the ratio between HER replays and regular replays (e.g. k = 4 -> 4 times
as many HER replays as regular replays are used)
reward_fun (function): function to re-compute the reward with substituted goals
"""
if replay_strategy == 'future':
future_p = 1 - (1. / (1 + replay_k))
else: # 'replay_strategy' == 'none'
future_p = 0
def _sample_her_transitions(episode_batch, batch_size_in_transitions):
"""episode_batch is {key: array(buffer_size x T x dim_key)}
"""
T = episode_batch['u'].shape[1]
rollout_batch_size = episode_batch['u'].shape[0]
batch_size = batch_size_in_transitions
# Select which episodes and time steps to use.
episode_idxs = np.random.randint(0, rollout_batch_size, batch_size)
t_samples = np.random.randint(T, size=batch_size)
transitions = {key: episode_batch[key][episode_idxs, t_samples].copy()
for key in episode_batch.keys()}
# Select future time indexes proportional with probability future_p. These
# will be used for HER replay by substituting in future goals.
her_indexes = np.where(np.random.uniform(size=batch_size) < future_p)
future_offset = np.random.uniform(size=batch_size) * (T - t_samples)
future_offset = future_offset.astype(int)
future_t = (t_samples + 1 + future_offset)[her_indexes]
# Replace goal with achieved goal but only for the previously-selected
# HER transitions (as defined by her_indexes). For the other transitions,
# keep the original goal.
future_ag = episode_batch['ag'][episode_idxs[her_indexes], future_t]
transitions['g'][her_indexes] = future_ag
# Reconstruct info dictionary for reward computation.
info = {}
for key, value in transitions.items():
if key.startswith('info_'):
info[key.replace('info_', '')] = value
# Re-compute reward since we may have substituted the goal.
reward_params = {k: transitions[k] for k in ['ag_2', 'g']}
reward_params['info'] = info
transitions['r'] = reward_fun(**reward_params)
transitions = {k: transitions[k].reshape(batch_size, *transitions[k].shape[1:])
for k in transitions.keys()}
assert(transitions['u'].shape[0] == batch_size_in_transitions)
return transitions
return _sample_her_transitions | python | def make_sample_her_transitions(replay_strategy, replay_k, reward_fun):
"""Creates a sample function that can be used for HER experience replay.
Args:
replay_strategy (in ['future', 'none']): the HER replay strategy; if set to 'none',
regular DDPG experience replay is used
replay_k (int): the ratio between HER replays and regular replays (e.g. k = 4 -> 4 times
as many HER replays as regular replays are used)
reward_fun (function): function to re-compute the reward with substituted goals
"""
if replay_strategy == 'future':
future_p = 1 - (1. / (1 + replay_k))
else: # 'replay_strategy' == 'none'
future_p = 0
def _sample_her_transitions(episode_batch, batch_size_in_transitions):
"""episode_batch is {key: array(buffer_size x T x dim_key)}
"""
T = episode_batch['u'].shape[1]
rollout_batch_size = episode_batch['u'].shape[0]
batch_size = batch_size_in_transitions
# Select which episodes and time steps to use.
episode_idxs = np.random.randint(0, rollout_batch_size, batch_size)
t_samples = np.random.randint(T, size=batch_size)
transitions = {key: episode_batch[key][episode_idxs, t_samples].copy()
for key in episode_batch.keys()}
# Select future time indexes proportional with probability future_p. These
# will be used for HER replay by substituting in future goals.
her_indexes = np.where(np.random.uniform(size=batch_size) < future_p)
future_offset = np.random.uniform(size=batch_size) * (T - t_samples)
future_offset = future_offset.astype(int)
future_t = (t_samples + 1 + future_offset)[her_indexes]
# Replace goal with achieved goal but only for the previously-selected
# HER transitions (as defined by her_indexes). For the other transitions,
# keep the original goal.
future_ag = episode_batch['ag'][episode_idxs[her_indexes], future_t]
transitions['g'][her_indexes] = future_ag
# Reconstruct info dictionary for reward computation.
info = {}
for key, value in transitions.items():
if key.startswith('info_'):
info[key.replace('info_', '')] = value
# Re-compute reward since we may have substituted the goal.
reward_params = {k: transitions[k] for k in ['ag_2', 'g']}
reward_params['info'] = info
transitions['r'] = reward_fun(**reward_params)
transitions = {k: transitions[k].reshape(batch_size, *transitions[k].shape[1:])
for k in transitions.keys()}
assert(transitions['u'].shape[0] == batch_size_in_transitions)
return transitions
return _sample_her_transitions | [
"def",
"make_sample_her_transitions",
"(",
"replay_strategy",
",",
"replay_k",
",",
"reward_fun",
")",
":",
"if",
"replay_strategy",
"==",
"'future'",
":",
"future_p",
"=",
"1",
"-",
"(",
"1.",
"/",
"(",
"1",
"+",
"replay_k",
")",
")",
"else",
":",
"# 'replay_strategy' == 'none'",
"future_p",
"=",
"0",
"def",
"_sample_her_transitions",
"(",
"episode_batch",
",",
"batch_size_in_transitions",
")",
":",
"\"\"\"episode_batch is {key: array(buffer_size x T x dim_key)}\n \"\"\"",
"T",
"=",
"episode_batch",
"[",
"'u'",
"]",
".",
"shape",
"[",
"1",
"]",
"rollout_batch_size",
"=",
"episode_batch",
"[",
"'u'",
"]",
".",
"shape",
"[",
"0",
"]",
"batch_size",
"=",
"batch_size_in_transitions",
"# Select which episodes and time steps to use.",
"episode_idxs",
"=",
"np",
".",
"random",
".",
"randint",
"(",
"0",
",",
"rollout_batch_size",
",",
"batch_size",
")",
"t_samples",
"=",
"np",
".",
"random",
".",
"randint",
"(",
"T",
",",
"size",
"=",
"batch_size",
")",
"transitions",
"=",
"{",
"key",
":",
"episode_batch",
"[",
"key",
"]",
"[",
"episode_idxs",
",",
"t_samples",
"]",
".",
"copy",
"(",
")",
"for",
"key",
"in",
"episode_batch",
".",
"keys",
"(",
")",
"}",
"# Select future time indexes proportional with probability future_p. These",
"# will be used for HER replay by substituting in future goals.",
"her_indexes",
"=",
"np",
".",
"where",
"(",
"np",
".",
"random",
".",
"uniform",
"(",
"size",
"=",
"batch_size",
")",
"<",
"future_p",
")",
"future_offset",
"=",
"np",
".",
"random",
".",
"uniform",
"(",
"size",
"=",
"batch_size",
")",
"*",
"(",
"T",
"-",
"t_samples",
")",
"future_offset",
"=",
"future_offset",
".",
"astype",
"(",
"int",
")",
"future_t",
"=",
"(",
"t_samples",
"+",
"1",
"+",
"future_offset",
")",
"[",
"her_indexes",
"]",
"# Replace goal with achieved goal but only for the previously-selected",
"# HER transitions (as defined by her_indexes). For the other transitions,",
"# keep the original goal.",
"future_ag",
"=",
"episode_batch",
"[",
"'ag'",
"]",
"[",
"episode_idxs",
"[",
"her_indexes",
"]",
",",
"future_t",
"]",
"transitions",
"[",
"'g'",
"]",
"[",
"her_indexes",
"]",
"=",
"future_ag",
"# Reconstruct info dictionary for reward computation.",
"info",
"=",
"{",
"}",
"for",
"key",
",",
"value",
"in",
"transitions",
".",
"items",
"(",
")",
":",
"if",
"key",
".",
"startswith",
"(",
"'info_'",
")",
":",
"info",
"[",
"key",
".",
"replace",
"(",
"'info_'",
",",
"''",
")",
"]",
"=",
"value",
"# Re-compute reward since we may have substituted the goal.",
"reward_params",
"=",
"{",
"k",
":",
"transitions",
"[",
"k",
"]",
"for",
"k",
"in",
"[",
"'ag_2'",
",",
"'g'",
"]",
"}",
"reward_params",
"[",
"'info'",
"]",
"=",
"info",
"transitions",
"[",
"'r'",
"]",
"=",
"reward_fun",
"(",
"*",
"*",
"reward_params",
")",
"transitions",
"=",
"{",
"k",
":",
"transitions",
"[",
"k",
"]",
".",
"reshape",
"(",
"batch_size",
",",
"*",
"transitions",
"[",
"k",
"]",
".",
"shape",
"[",
"1",
":",
"]",
")",
"for",
"k",
"in",
"transitions",
".",
"keys",
"(",
")",
"}",
"assert",
"(",
"transitions",
"[",
"'u'",
"]",
".",
"shape",
"[",
"0",
"]",
"==",
"batch_size_in_transitions",
")",
"return",
"transitions",
"return",
"_sample_her_transitions"
] | Creates a sample function that can be used for HER experience replay.
Args:
replay_strategy (in ['future', 'none']): the HER replay strategy; if set to 'none',
regular DDPG experience replay is used
replay_k (int): the ratio between HER replays and regular replays (e.g. k = 4 -> 4 times
as many HER replays as regular replays are used)
reward_fun (function): function to re-compute the reward with substituted goals | [
"Creates",
"a",
"sample",
"function",
"that",
"can",
"be",
"used",
"for",
"HER",
"experience",
"replay",
"."
] | 3301089b48c42b87b396e246ea3f56fa4bfc9678 | https://github.com/openai/baselines/blob/3301089b48c42b87b396e246ea3f56fa4bfc9678/baselines/her/her_sampler.py#L4-L63 | valid |
openai/baselines | baselines/deepq/experiments/custom_cartpole.py | model | def model(inpt, num_actions, scope, reuse=False):
"""This model takes as input an observation and returns values of all actions."""
with tf.variable_scope(scope, reuse=reuse):
out = inpt
out = layers.fully_connected(out, num_outputs=64, activation_fn=tf.nn.tanh)
out = layers.fully_connected(out, num_outputs=num_actions, activation_fn=None)
return out | python | def model(inpt, num_actions, scope, reuse=False):
"""This model takes as input an observation and returns values of all actions."""
with tf.variable_scope(scope, reuse=reuse):
out = inpt
out = layers.fully_connected(out, num_outputs=64, activation_fn=tf.nn.tanh)
out = layers.fully_connected(out, num_outputs=num_actions, activation_fn=None)
return out | [
"def",
"model",
"(",
"inpt",
",",
"num_actions",
",",
"scope",
",",
"reuse",
"=",
"False",
")",
":",
"with",
"tf",
".",
"variable_scope",
"(",
"scope",
",",
"reuse",
"=",
"reuse",
")",
":",
"out",
"=",
"inpt",
"out",
"=",
"layers",
".",
"fully_connected",
"(",
"out",
",",
"num_outputs",
"=",
"64",
",",
"activation_fn",
"=",
"tf",
".",
"nn",
".",
"tanh",
")",
"out",
"=",
"layers",
".",
"fully_connected",
"(",
"out",
",",
"num_outputs",
"=",
"num_actions",
",",
"activation_fn",
"=",
"None",
")",
"return",
"out"
] | This model takes as input an observation and returns values of all actions. | [
"This",
"model",
"takes",
"as",
"input",
"an",
"observation",
"and",
"returns",
"values",
"of",
"all",
"actions",
"."
] | 3301089b48c42b87b396e246ea3f56fa4bfc9678 | https://github.com/openai/baselines/blob/3301089b48c42b87b396e246ea3f56fa4bfc9678/baselines/deepq/experiments/custom_cartpole.py#L16-L22 | valid |
openai/baselines | baselines/her/replay_buffer.py | ReplayBuffer.sample | def sample(self, batch_size):
"""Returns a dict {key: array(batch_size x shapes[key])}
"""
buffers = {}
with self.lock:
assert self.current_size > 0
for key in self.buffers.keys():
buffers[key] = self.buffers[key][:self.current_size]
buffers['o_2'] = buffers['o'][:, 1:, :]
buffers['ag_2'] = buffers['ag'][:, 1:, :]
transitions = self.sample_transitions(buffers, batch_size)
for key in (['r', 'o_2', 'ag_2'] + list(self.buffers.keys())):
assert key in transitions, "key %s missing from transitions" % key
return transitions | python | def sample(self, batch_size):
"""Returns a dict {key: array(batch_size x shapes[key])}
"""
buffers = {}
with self.lock:
assert self.current_size > 0
for key in self.buffers.keys():
buffers[key] = self.buffers[key][:self.current_size]
buffers['o_2'] = buffers['o'][:, 1:, :]
buffers['ag_2'] = buffers['ag'][:, 1:, :]
transitions = self.sample_transitions(buffers, batch_size)
for key in (['r', 'o_2', 'ag_2'] + list(self.buffers.keys())):
assert key in transitions, "key %s missing from transitions" % key
return transitions | [
"def",
"sample",
"(",
"self",
",",
"batch_size",
")",
":",
"buffers",
"=",
"{",
"}",
"with",
"self",
".",
"lock",
":",
"assert",
"self",
".",
"current_size",
">",
"0",
"for",
"key",
"in",
"self",
".",
"buffers",
".",
"keys",
"(",
")",
":",
"buffers",
"[",
"key",
"]",
"=",
"self",
".",
"buffers",
"[",
"key",
"]",
"[",
":",
"self",
".",
"current_size",
"]",
"buffers",
"[",
"'o_2'",
"]",
"=",
"buffers",
"[",
"'o'",
"]",
"[",
":",
",",
"1",
":",
",",
":",
"]",
"buffers",
"[",
"'ag_2'",
"]",
"=",
"buffers",
"[",
"'ag'",
"]",
"[",
":",
",",
"1",
":",
",",
":",
"]",
"transitions",
"=",
"self",
".",
"sample_transitions",
"(",
"buffers",
",",
"batch_size",
")",
"for",
"key",
"in",
"(",
"[",
"'r'",
",",
"'o_2'",
",",
"'ag_2'",
"]",
"+",
"list",
"(",
"self",
".",
"buffers",
".",
"keys",
"(",
")",
")",
")",
":",
"assert",
"key",
"in",
"transitions",
",",
"\"key %s missing from transitions\"",
"%",
"key",
"return",
"transitions"
] | Returns a dict {key: array(batch_size x shapes[key])} | [
"Returns",
"a",
"dict",
"{",
"key",
":",
"array",
"(",
"batch_size",
"x",
"shapes",
"[",
"key",
"]",
")",
"}"
] | 3301089b48c42b87b396e246ea3f56fa4bfc9678 | https://github.com/openai/baselines/blob/3301089b48c42b87b396e246ea3f56fa4bfc9678/baselines/her/replay_buffer.py#L37-L55 | valid |
openai/baselines | baselines/her/replay_buffer.py | ReplayBuffer.store_episode | def store_episode(self, episode_batch):
"""episode_batch: array(batch_size x (T or T+1) x dim_key)
"""
batch_sizes = [len(episode_batch[key]) for key in episode_batch.keys()]
assert np.all(np.array(batch_sizes) == batch_sizes[0])
batch_size = batch_sizes[0]
with self.lock:
idxs = self._get_storage_idx(batch_size)
# load inputs into buffers
for key in self.buffers.keys():
self.buffers[key][idxs] = episode_batch[key]
self.n_transitions_stored += batch_size * self.T | python | def store_episode(self, episode_batch):
"""episode_batch: array(batch_size x (T or T+1) x dim_key)
"""
batch_sizes = [len(episode_batch[key]) for key in episode_batch.keys()]
assert np.all(np.array(batch_sizes) == batch_sizes[0])
batch_size = batch_sizes[0]
with self.lock:
idxs = self._get_storage_idx(batch_size)
# load inputs into buffers
for key in self.buffers.keys():
self.buffers[key][idxs] = episode_batch[key]
self.n_transitions_stored += batch_size * self.T | [
"def",
"store_episode",
"(",
"self",
",",
"episode_batch",
")",
":",
"batch_sizes",
"=",
"[",
"len",
"(",
"episode_batch",
"[",
"key",
"]",
")",
"for",
"key",
"in",
"episode_batch",
".",
"keys",
"(",
")",
"]",
"assert",
"np",
".",
"all",
"(",
"np",
".",
"array",
"(",
"batch_sizes",
")",
"==",
"batch_sizes",
"[",
"0",
"]",
")",
"batch_size",
"=",
"batch_sizes",
"[",
"0",
"]",
"with",
"self",
".",
"lock",
":",
"idxs",
"=",
"self",
".",
"_get_storage_idx",
"(",
"batch_size",
")",
"# load inputs into buffers",
"for",
"key",
"in",
"self",
".",
"buffers",
".",
"keys",
"(",
")",
":",
"self",
".",
"buffers",
"[",
"key",
"]",
"[",
"idxs",
"]",
"=",
"episode_batch",
"[",
"key",
"]",
"self",
".",
"n_transitions_stored",
"+=",
"batch_size",
"*",
"self",
".",
"T"
] | episode_batch: array(batch_size x (T or T+1) x dim_key) | [
"episode_batch",
":",
"array",
"(",
"batch_size",
"x",
"(",
"T",
"or",
"T",
"+",
"1",
")",
"x",
"dim_key",
")"
] | 3301089b48c42b87b396e246ea3f56fa4bfc9678 | https://github.com/openai/baselines/blob/3301089b48c42b87b396e246ea3f56fa4bfc9678/baselines/her/replay_buffer.py#L57-L71 | valid |
openai/baselines | baselines/her/ddpg.py | DDPG.store_episode | def store_episode(self, episode_batch, update_stats=True):
"""
episode_batch: array of batch_size x (T or T+1) x dim_key
'o' is of size T+1, others are of size T
"""
self.buffer.store_episode(episode_batch)
if update_stats:
# add transitions to normalizer
episode_batch['o_2'] = episode_batch['o'][:, 1:, :]
episode_batch['ag_2'] = episode_batch['ag'][:, 1:, :]
num_normalizing_transitions = transitions_in_episode_batch(episode_batch)
transitions = self.sample_transitions(episode_batch, num_normalizing_transitions)
o, g, ag = transitions['o'], transitions['g'], transitions['ag']
transitions['o'], transitions['g'] = self._preprocess_og(o, ag, g)
# No need to preprocess the o_2 and g_2 since this is only used for stats
self.o_stats.update(transitions['o'])
self.g_stats.update(transitions['g'])
self.o_stats.recompute_stats()
self.g_stats.recompute_stats() | python | def store_episode(self, episode_batch, update_stats=True):
"""
episode_batch: array of batch_size x (T or T+1) x dim_key
'o' is of size T+1, others are of size T
"""
self.buffer.store_episode(episode_batch)
if update_stats:
# add transitions to normalizer
episode_batch['o_2'] = episode_batch['o'][:, 1:, :]
episode_batch['ag_2'] = episode_batch['ag'][:, 1:, :]
num_normalizing_transitions = transitions_in_episode_batch(episode_batch)
transitions = self.sample_transitions(episode_batch, num_normalizing_transitions)
o, g, ag = transitions['o'], transitions['g'], transitions['ag']
transitions['o'], transitions['g'] = self._preprocess_og(o, ag, g)
# No need to preprocess the o_2 and g_2 since this is only used for stats
self.o_stats.update(transitions['o'])
self.g_stats.update(transitions['g'])
self.o_stats.recompute_stats()
self.g_stats.recompute_stats() | [
"def",
"store_episode",
"(",
"self",
",",
"episode_batch",
",",
"update_stats",
"=",
"True",
")",
":",
"self",
".",
"buffer",
".",
"store_episode",
"(",
"episode_batch",
")",
"if",
"update_stats",
":",
"# add transitions to normalizer",
"episode_batch",
"[",
"'o_2'",
"]",
"=",
"episode_batch",
"[",
"'o'",
"]",
"[",
":",
",",
"1",
":",
",",
":",
"]",
"episode_batch",
"[",
"'ag_2'",
"]",
"=",
"episode_batch",
"[",
"'ag'",
"]",
"[",
":",
",",
"1",
":",
",",
":",
"]",
"num_normalizing_transitions",
"=",
"transitions_in_episode_batch",
"(",
"episode_batch",
")",
"transitions",
"=",
"self",
".",
"sample_transitions",
"(",
"episode_batch",
",",
"num_normalizing_transitions",
")",
"o",
",",
"g",
",",
"ag",
"=",
"transitions",
"[",
"'o'",
"]",
",",
"transitions",
"[",
"'g'",
"]",
",",
"transitions",
"[",
"'ag'",
"]",
"transitions",
"[",
"'o'",
"]",
",",
"transitions",
"[",
"'g'",
"]",
"=",
"self",
".",
"_preprocess_og",
"(",
"o",
",",
"ag",
",",
"g",
")",
"# No need to preprocess the o_2 and g_2 since this is only used for stats",
"self",
".",
"o_stats",
".",
"update",
"(",
"transitions",
"[",
"'o'",
"]",
")",
"self",
".",
"g_stats",
".",
"update",
"(",
"transitions",
"[",
"'g'",
"]",
")",
"self",
".",
"o_stats",
".",
"recompute_stats",
"(",
")",
"self",
".",
"g_stats",
".",
"recompute_stats",
"(",
")"
] | episode_batch: array of batch_size x (T or T+1) x dim_key
'o' is of size T+1, others are of size T | [
"episode_batch",
":",
"array",
"of",
"batch_size",
"x",
"(",
"T",
"or",
"T",
"+",
"1",
")",
"x",
"dim_key",
"o",
"is",
"of",
"size",
"T",
"+",
"1",
"others",
"are",
"of",
"size",
"T"
] | 3301089b48c42b87b396e246ea3f56fa4bfc9678 | https://github.com/openai/baselines/blob/3301089b48c42b87b396e246ea3f56fa4bfc9678/baselines/her/ddpg.py#L217-L240 | valid |
openai/baselines | baselines/run.py | parse_cmdline_kwargs | def parse_cmdline_kwargs(args):
'''
convert a list of '='-spaced command-line arguments to a dictionary, evaluating python objects when possible
'''
def parse(v):
assert isinstance(v, str)
try:
return eval(v)
except (NameError, SyntaxError):
return v
return {k: parse(v) for k,v in parse_unknown_args(args).items()} | python | def parse_cmdline_kwargs(args):
'''
convert a list of '='-spaced command-line arguments to a dictionary, evaluating python objects when possible
'''
def parse(v):
assert isinstance(v, str)
try:
return eval(v)
except (NameError, SyntaxError):
return v
return {k: parse(v) for k,v in parse_unknown_args(args).items()} | [
"def",
"parse_cmdline_kwargs",
"(",
"args",
")",
":",
"def",
"parse",
"(",
"v",
")",
":",
"assert",
"isinstance",
"(",
"v",
",",
"str",
")",
"try",
":",
"return",
"eval",
"(",
"v",
")",
"except",
"(",
"NameError",
",",
"SyntaxError",
")",
":",
"return",
"v",
"return",
"{",
"k",
":",
"parse",
"(",
"v",
")",
"for",
"k",
",",
"v",
"in",
"parse_unknown_args",
"(",
"args",
")",
".",
"items",
"(",
")",
"}"
] | convert a list of '='-spaced command-line arguments to a dictionary, evaluating python objects when possible | [
"convert",
"a",
"list",
"of",
"=",
"-",
"spaced",
"command",
"-",
"line",
"arguments",
"to",
"a",
"dictionary",
"evaluating",
"python",
"objects",
"when",
"possible"
] | 3301089b48c42b87b396e246ea3f56fa4bfc9678 | https://github.com/openai/baselines/blob/3301089b48c42b87b396e246ea3f56fa4bfc9678/baselines/run.py#L180-L192 | valid |
openai/baselines | baselines/her/experiment/config.py | cached_make_env | def cached_make_env(make_env):
"""
Only creates a new environment from the provided function if one has not yet already been
created. This is useful here because we need to infer certain properties of the env, e.g.
its observation and action spaces, without any intend of actually using it.
"""
if make_env not in CACHED_ENVS:
env = make_env()
CACHED_ENVS[make_env] = env
return CACHED_ENVS[make_env] | python | def cached_make_env(make_env):
"""
Only creates a new environment from the provided function if one has not yet already been
created. This is useful here because we need to infer certain properties of the env, e.g.
its observation and action spaces, without any intend of actually using it.
"""
if make_env not in CACHED_ENVS:
env = make_env()
CACHED_ENVS[make_env] = env
return CACHED_ENVS[make_env] | [
"def",
"cached_make_env",
"(",
"make_env",
")",
":",
"if",
"make_env",
"not",
"in",
"CACHED_ENVS",
":",
"env",
"=",
"make_env",
"(",
")",
"CACHED_ENVS",
"[",
"make_env",
"]",
"=",
"env",
"return",
"CACHED_ENVS",
"[",
"make_env",
"]"
] | Only creates a new environment from the provided function if one has not yet already been
created. This is useful here because we need to infer certain properties of the env, e.g.
its observation and action spaces, without any intend of actually using it. | [
"Only",
"creates",
"a",
"new",
"environment",
"from",
"the",
"provided",
"function",
"if",
"one",
"has",
"not",
"yet",
"already",
"been",
"created",
".",
"This",
"is",
"useful",
"here",
"because",
"we",
"need",
"to",
"infer",
"certain",
"properties",
"of",
"the",
"env",
"e",
".",
"g",
".",
"its",
"observation",
"and",
"action",
"spaces",
"without",
"any",
"intend",
"of",
"actually",
"using",
"it",
"."
] | 3301089b48c42b87b396e246ea3f56fa4bfc9678 | https://github.com/openai/baselines/blob/3301089b48c42b87b396e246ea3f56fa4bfc9678/baselines/her/experiment/config.py#L61-L70 | valid |
aleju/imgaug | imgaug/augmentables/kps.py | compute_geometric_median | def compute_geometric_median(X, eps=1e-5):
"""
Estimate the geometric median of points in 2D.
Code from https://stackoverflow.com/a/30305181
Parameters
----------
X : (N,2) ndarray
Points in 2D. Second axis must be given in xy-form.
eps : float, optional
Distance threshold when to return the median.
Returns
-------
(2,) ndarray
Geometric median as xy-coordinate.
"""
y = np.mean(X, 0)
while True:
D = scipy.spatial.distance.cdist(X, [y])
nonzeros = (D != 0)[:, 0]
Dinv = 1 / D[nonzeros]
Dinvs = np.sum(Dinv)
W = Dinv / Dinvs
T = np.sum(W * X[nonzeros], 0)
num_zeros = len(X) - np.sum(nonzeros)
if num_zeros == 0:
y1 = T
elif num_zeros == len(X):
return y
else:
R = (T - y) * Dinvs
r = np.linalg.norm(R)
rinv = 0 if r == 0 else num_zeros/r
y1 = max(0, 1-rinv)*T + min(1, rinv)*y
if scipy.spatial.distance.euclidean(y, y1) < eps:
return y1
y = y1 | python | def compute_geometric_median(X, eps=1e-5):
"""
Estimate the geometric median of points in 2D.
Code from https://stackoverflow.com/a/30305181
Parameters
----------
X : (N,2) ndarray
Points in 2D. Second axis must be given in xy-form.
eps : float, optional
Distance threshold when to return the median.
Returns
-------
(2,) ndarray
Geometric median as xy-coordinate.
"""
y = np.mean(X, 0)
while True:
D = scipy.spatial.distance.cdist(X, [y])
nonzeros = (D != 0)[:, 0]
Dinv = 1 / D[nonzeros]
Dinvs = np.sum(Dinv)
W = Dinv / Dinvs
T = np.sum(W * X[nonzeros], 0)
num_zeros = len(X) - np.sum(nonzeros)
if num_zeros == 0:
y1 = T
elif num_zeros == len(X):
return y
else:
R = (T - y) * Dinvs
r = np.linalg.norm(R)
rinv = 0 if r == 0 else num_zeros/r
y1 = max(0, 1-rinv)*T + min(1, rinv)*y
if scipy.spatial.distance.euclidean(y, y1) < eps:
return y1
y = y1 | [
"def",
"compute_geometric_median",
"(",
"X",
",",
"eps",
"=",
"1e-5",
")",
":",
"y",
"=",
"np",
".",
"mean",
"(",
"X",
",",
"0",
")",
"while",
"True",
":",
"D",
"=",
"scipy",
".",
"spatial",
".",
"distance",
".",
"cdist",
"(",
"X",
",",
"[",
"y",
"]",
")",
"nonzeros",
"=",
"(",
"D",
"!=",
"0",
")",
"[",
":",
",",
"0",
"]",
"Dinv",
"=",
"1",
"/",
"D",
"[",
"nonzeros",
"]",
"Dinvs",
"=",
"np",
".",
"sum",
"(",
"Dinv",
")",
"W",
"=",
"Dinv",
"/",
"Dinvs",
"T",
"=",
"np",
".",
"sum",
"(",
"W",
"*",
"X",
"[",
"nonzeros",
"]",
",",
"0",
")",
"num_zeros",
"=",
"len",
"(",
"X",
")",
"-",
"np",
".",
"sum",
"(",
"nonzeros",
")",
"if",
"num_zeros",
"==",
"0",
":",
"y1",
"=",
"T",
"elif",
"num_zeros",
"==",
"len",
"(",
"X",
")",
":",
"return",
"y",
"else",
":",
"R",
"=",
"(",
"T",
"-",
"y",
")",
"*",
"Dinvs",
"r",
"=",
"np",
".",
"linalg",
".",
"norm",
"(",
"R",
")",
"rinv",
"=",
"0",
"if",
"r",
"==",
"0",
"else",
"num_zeros",
"/",
"r",
"y1",
"=",
"max",
"(",
"0",
",",
"1",
"-",
"rinv",
")",
"*",
"T",
"+",
"min",
"(",
"1",
",",
"rinv",
")",
"*",
"y",
"if",
"scipy",
".",
"spatial",
".",
"distance",
".",
"euclidean",
"(",
"y",
",",
"y1",
")",
"<",
"eps",
":",
"return",
"y1",
"y",
"=",
"y1"
] | Estimate the geometric median of points in 2D.
Code from https://stackoverflow.com/a/30305181
Parameters
----------
X : (N,2) ndarray
Points in 2D. Second axis must be given in xy-form.
eps : float, optional
Distance threshold when to return the median.
Returns
-------
(2,) ndarray
Geometric median as xy-coordinate. | [
"Estimate",
"the",
"geometric",
"median",
"of",
"points",
"in",
"2D",
"."
] | 786be74aa855513840113ea523c5df495dc6a8af | https://github.com/aleju/imgaug/blob/786be74aa855513840113ea523c5df495dc6a8af/imgaug/augmentables/kps.py#L13-L58 | valid |
aleju/imgaug | imgaug/augmentables/kps.py | Keypoint.project | def project(self, from_shape, to_shape):
"""
Project the keypoint onto a new position on a new image.
E.g. if the keypoint is on its original image at x=(10 of 100 pixels)
and y=(20 of 100 pixels) and is projected onto a new image with
size (width=200, height=200), its new position will be (20, 40).
This is intended for cases where the original image is resized.
It cannot be used for more complex changes (e.g. padding, cropping).
Parameters
----------
from_shape : tuple of int
Shape of the original image. (Before resize.)
to_shape : tuple of int
Shape of the new image. (After resize.)
Returns
-------
imgaug.Keypoint
Keypoint object with new coordinates.
"""
xy_proj = project_coords([(self.x, self.y)], from_shape, to_shape)
return self.deepcopy(x=xy_proj[0][0], y=xy_proj[0][1]) | python | def project(self, from_shape, to_shape):
"""
Project the keypoint onto a new position on a new image.
E.g. if the keypoint is on its original image at x=(10 of 100 pixels)
and y=(20 of 100 pixels) and is projected onto a new image with
size (width=200, height=200), its new position will be (20, 40).
This is intended for cases where the original image is resized.
It cannot be used for more complex changes (e.g. padding, cropping).
Parameters
----------
from_shape : tuple of int
Shape of the original image. (Before resize.)
to_shape : tuple of int
Shape of the new image. (After resize.)
Returns
-------
imgaug.Keypoint
Keypoint object with new coordinates.
"""
xy_proj = project_coords([(self.x, self.y)], from_shape, to_shape)
return self.deepcopy(x=xy_proj[0][0], y=xy_proj[0][1]) | [
"def",
"project",
"(",
"self",
",",
"from_shape",
",",
"to_shape",
")",
":",
"xy_proj",
"=",
"project_coords",
"(",
"[",
"(",
"self",
".",
"x",
",",
"self",
".",
"y",
")",
"]",
",",
"from_shape",
",",
"to_shape",
")",
"return",
"self",
".",
"deepcopy",
"(",
"x",
"=",
"xy_proj",
"[",
"0",
"]",
"[",
"0",
"]",
",",
"y",
"=",
"xy_proj",
"[",
"0",
"]",
"[",
"1",
"]",
")"
] | Project the keypoint onto a new position on a new image.
E.g. if the keypoint is on its original image at x=(10 of 100 pixels)
and y=(20 of 100 pixels) and is projected onto a new image with
size (width=200, height=200), its new position will be (20, 40).
This is intended for cases where the original image is resized.
It cannot be used for more complex changes (e.g. padding, cropping).
Parameters
----------
from_shape : tuple of int
Shape of the original image. (Before resize.)
to_shape : tuple of int
Shape of the new image. (After resize.)
Returns
-------
imgaug.Keypoint
Keypoint object with new coordinates. | [
"Project",
"the",
"keypoint",
"onto",
"a",
"new",
"position",
"on",
"a",
"new",
"image",
"."
] | 786be74aa855513840113ea523c5df495dc6a8af | https://github.com/aleju/imgaug/blob/786be74aa855513840113ea523c5df495dc6a8af/imgaug/augmentables/kps.py#L105-L131 | valid |