younik commited on
Commit
83c16d0
·
verified ·
1 Parent(s): 2fd1621

Upload folder using huggingface_hub

Browse files
.gitattributes CHANGED
@@ -79,3 +79,4 @@ antmaze/medium-diverse-v1/data/main_data.hdf5 filter=lfs diff=lfs merge=lfs -tex
79
  antmaze/medium-play-v1/data/main_data.hdf5 filter=lfs diff=lfs merge=lfs -text
80
  antmaze/umaze-diverse-v1/data/main_data.hdf5 filter=lfs diff=lfs merge=lfs -text
81
  antmaze/umaze-v1/data/main_data.hdf5 filter=lfs diff=lfs merge=lfs -text
 
 
79
  antmaze/medium-play-v1/data/main_data.hdf5 filter=lfs diff=lfs merge=lfs -text
80
  antmaze/umaze-diverse-v1/data/main_data.hdf5 filter=lfs diff=lfs merge=lfs -text
81
  antmaze/umaze-v1/data/main_data.hdf5 filter=lfs diff=lfs merge=lfs -text
82
+ pointmaze/large-dense-v2/data/main_data.hdf5 filter=lfs diff=lfs merge=lfs -text
pointmaze/large-dense-v2/data/main_data.hdf5 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:51feaa439ecc50f1d8b873c6493df3d284e252570a072cfbd1d234fb177245ff
3
+ size 239183968
pointmaze/large-dense-v2/data/metadata.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"total_episodes": 3360, "total_steps": 1000000, "data_format": "hdf5", "observation_space": "{\"type\": \"Dict\", \"subspaces\": {\"achieved_goal\": {\"type\": \"Box\", \"dtype\": \"float64\", \"shape\": [2], \"low\": [-Infinity, -Infinity], \"high\": [Infinity, Infinity]}, \"desired_goal\": {\"type\": \"Box\", \"dtype\": \"float64\", \"shape\": [2], \"low\": [-Infinity, -Infinity], \"high\": [Infinity, Infinity]}, \"observation\": {\"type\": \"Box\", \"dtype\": \"float64\", \"shape\": [4], \"low\": [-Infinity, -Infinity, -Infinity, -Infinity], \"high\": [Infinity, Infinity, Infinity, Infinity]}}}", "action_space": "{\"type\": \"Box\", \"dtype\": \"float32\", \"shape\": [2], \"low\": [-1.0, -1.0], \"high\": [1.0, 1.0]}", "env_spec": "{\"id\": \"PointMaze_LargeDense-v3\", \"entry_point\": \"gymnasium_robotics.envs.maze.point_maze:PointMazeEnv\", \"reward_threshold\": null, \"nondeterministic\": false, \"max_episode_steps\": 1000000, \"order_enforce\": true, \"disable_env_checker\": false, \"kwargs\": {\"maze_map\": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1], [1, 0, 1, 1, 0, 1, 0, 1, 0, 1, 0, 1], [1, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1], [1, 0, 1, 1, 1, 1, 0, 1, 1, 1, 0, 1], [1, 0, 0, 1, 0, 1, 0, 0, 0, 0, 0, 1], [1, 1, 0, 1, 0, 1, 0, 1, 0, 1, 1, 1], [1, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], \"reward_type\": \"dense\", \"continuing_task\": true, \"reset_target\": true}, \"additional_wrappers\": [], \"vector_entry_point\": null}", "dataset_id": "D4RL/pointmaze/large-dense-v2", "algorithm_name": "QIteration", "author": ["Rodrigo Perez-Vicente"], "author_email": ["rperezvicente@farama.org"], "code_permalink": "https://github.com/rodrigodelazcano/d4rl-minari-dataset-generation", "minari_version": "0.4.3", "eval_env_spec": "{\"id\": \"PointMaze_LargeDense-v3\", \"entry_point\": \"gymnasium_robotics.envs.maze.point_maze:PointMazeEnv\", \"reward_threshold\": null, \"nondeterministic\": false, \"max_episode_steps\": 800, \"order_enforce\": true, \"disable_env_checker\": false, \"kwargs\": {\"maze_map\": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1], [1, 0, 1, 1, 0, 1, 0, 1, 0, 1, 0, 1], [1, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1], [1, 0, 1, 1, 1, 1, 0, 1, 1, 1, 0, 1], [1, 0, 0, 1, 0, 1, 0, 0, 0, 0, 0, 1], [1, 1, 0, 1, 0, 1, 0, 1, 0, 1, 1, 1], [1, 0, 0, 1, 0, 0, 0, 1, 0, \"g\", 0, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], \"reward_type\": \"dense\", \"continuing_task\": true, \"reset_target\": false}, \"additional_wrappers\": [], \"vector_entry_point\": null}", "ref_max_score": 481.5344543457031, "ref_min_score": 27.165931701660156, "num_episodes_average_score": 100, "dataset_size": 239.2, "description": "The data is collected from the [`PointMaze_LargeDense-v3`](https://robotics.farama.org/envs/maze/point_maze/) environment. The agent uses a PD controller to follow a path of waypoints generated with QIteration until it reaches the goal. The task is continuing which means that when the agent reaches the goal the environment generates a new random goal without resetting the location of the agent. The reward function is dense, being the exponential negative Euclidean distance between the goal and the agent. To add variance to the collected paths random noise is added to the actions taken by the agent.", "requirements": ["gymnasium-robotics>=1.2.4"]}