Upload folder using huggingface_hub
Browse files
.gitattributes
CHANGED
@@ -84,3 +84,4 @@ pointmaze/large-v2/data/main_data.hdf5 filter=lfs diff=lfs merge=lfs -text
|
|
84 |
pointmaze/medium-dense-v2/data/main_data.hdf5 filter=lfs diff=lfs merge=lfs -text
|
85 |
pointmaze/medium-v2/data/main_data.hdf5 filter=lfs diff=lfs merge=lfs -text
|
86 |
pointmaze/open-dense-v2/data/main_data.hdf5 filter=lfs diff=lfs merge=lfs -text
|
|
|
|
84 |
pointmaze/medium-dense-v2/data/main_data.hdf5 filter=lfs diff=lfs merge=lfs -text
|
85 |
pointmaze/medium-v2/data/main_data.hdf5 filter=lfs diff=lfs merge=lfs -text
|
86 |
pointmaze/open-dense-v2/data/main_data.hdf5 filter=lfs diff=lfs merge=lfs -text
|
87 |
+
pointmaze/open-v2/data/main_data.hdf5 filter=lfs diff=lfs merge=lfs -text
|
pointmaze/open-v2/data/main_data.hdf5
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:45f45bf024a5e5f1c66c06f6203921cc36123d0605761fc12f8bbc2d00d27612
|
3 |
+
size 437581416
|
pointmaze/open-v2/data/metadata.json
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
{"total_episodes": 9525, "total_steps": 1000000, "data_format": "hdf5", "observation_space": "{\"type\": \"Dict\", \"subspaces\": {\"achieved_goal\": {\"type\": \"Box\", \"dtype\": \"float64\", \"shape\": [2], \"low\": [-Infinity, -Infinity], \"high\": [Infinity, Infinity]}, \"desired_goal\": {\"type\": \"Box\", \"dtype\": \"float64\", \"shape\": [2], \"low\": [-Infinity, -Infinity], \"high\": [Infinity, Infinity]}, \"observation\": {\"type\": \"Box\", \"dtype\": \"float64\", \"shape\": [4], \"low\": [-Infinity, -Infinity, -Infinity, -Infinity], \"high\": [Infinity, Infinity, Infinity, Infinity]}}}", "action_space": "{\"type\": \"Box\", \"dtype\": \"float32\", \"shape\": [2], \"low\": [-1.0, -1.0], \"high\": [1.0, 1.0]}", "env_spec": "{\"id\": \"PointMaze_Open-v3\", \"entry_point\": \"gymnasium_robotics.envs.maze.point_maze:PointMazeEnv\", \"reward_threshold\": null, \"nondeterministic\": false, \"max_episode_steps\": 1000000, \"order_enforce\": true, \"disable_env_checker\": false, \"kwargs\": {\"maze_map\": [[1, 1, 1, 1, 1, 1, 1], [1, 0, 0, 0, 0, 0, 1], [1, 0, 0, 0, 0, 0, 1], [1, 0, 0, 0, 0, 0, 1], [1, 1, 1, 1, 1, 1, 1]], \"reward_type\": \"sparse\", \"continuing_task\": true, \"reset_target\": true}, \"additional_wrappers\": [], \"vector_entry_point\": null}", "dataset_id": "D4RL/pointmaze/open-v2", "algorithm_name": "QIteration", "author": ["Rodrigo Perez-Vicente"], "author_email": ["rperezvicente@farama.org"], "code_permalink": "https://github.com/rodrigodelazcano/d4rl-minari-dataset-generation", "minari_version": "0.4.3", "eval_env_spec": "{\"id\": \"PointMaze_Open-v3\", \"entry_point\": \"gymnasium_robotics.envs.maze.point_maze:PointMazeEnv\", \"reward_threshold\": null, \"nondeterministic\": false, \"max_episode_steps\": 300, \"order_enforce\": true, \"disable_env_checker\": false, \"kwargs\": {\"maze_map\": [[1, 1, 1, 1, 1, 1, 1], [1, 0, 0, 0, 0, 0, 1], [1, 0, 0, \"g\", 0, 0, 1], [1, 0, 0, 0, 0, 0, 1], [1, 1, 1, 1, 1, 1, 1]], \"reward_type\": \"sparse\", \"continuing_task\": true, \"reset_target\": false}, \"additional_wrappers\": [], \"vector_entry_point\": null}", "ref_max_score": 229.86000061035156, "ref_min_score": 7.199999809265137, "num_episodes_average_score": 100, "dataset_size": 437.6, "description": "The data is collected from the [`PointMaze_Open-v3`](https://robotics.farama.org/envs/maze/point_maze/) environment, which contains an open arena with only perimeter walls. The agent uses a PD controller to follow a path of waypoints generated with QIteration until it reaches the goal. The task is continuing which means that when the agent reaches the goal the environment generates a new random goal without resetting the location of the agent. The reward function is sparse, only returning a value of 1 if the goal is reached, otherwise 0. To add variance to the collected paths random noise is added to the actions taken by the agent.", "requirements": ["gymnasium-robotics>=1.2.4"]}
|