Upload 13 files
Browse files- Agent.onnx +3 -0
- Agent/Agent-1200030.onnx +3 -0
- Agent/Agent-1200030.pt +3 -0
- Agent/Agent-499980.onnx +3 -0
- Agent/Agent-499980.pt +3 -0
- Agent/Agent-999989.onnx +3 -0
- Agent/Agent-999989.pt +3 -0
- Agent/checkpoint.pt +3 -0
- Agent/events.out.tfevents.1717243112.RICHARD.23980.0 +3 -0
- README.md +31 -0
- configuration.yaml +107 -0
- run_logs/timers.json +575 -0
- run_logs/training_status.json +53 -0
Agent.onnx
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:66faf0e0bb397e6334b890a17a435874e49c872ea345bea2506c1e0f76a2ef08
|
3 |
+
size 562656
|
Agent/Agent-1200030.onnx
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:66faf0e0bb397e6334b890a17a435874e49c872ea345bea2506c1e0f76a2ef08
|
3 |
+
size 562656
|
Agent/Agent-1200030.pt
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:7d94acc04a217263fc19173924c2d08d8d9d9ab61c86ebc3f8338c10873fafab
|
3 |
+
size 4530031
|
Agent/Agent-499980.onnx
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:2f4d8d4d6a88aa6c0923547f5f21c57a6f337c8e8729ab3301d22cd979c43b3f
|
3 |
+
size 562656
|
Agent/Agent-499980.pt
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:9c3ecfa68480b107d3e62a249ec9876fdd1d60b13cd1a1b6eba54bbcd57d8203
|
3 |
+
size 4530031
|
Agent/Agent-999989.onnx
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:c908493b1d0fbb45907552ee2a48dfda88e6a1dcde157cbef856b6209ce1b5b4
|
3 |
+
size 562656
|
Agent/Agent-999989.pt
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:cdb8c9733fa1b83f941e9670e63ed778018b4a0ed520ef8693f4ac4ec0e9b165
|
3 |
+
size 4530031
|
Agent/checkpoint.pt
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:7d94acc04a217263fc19173924c2d08d8d9d9ab61c86ebc3f8338c10873fafab
|
3 |
+
size 4530031
|
Agent/events.out.tfevents.1717243112.RICHARD.23980.0
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:66e8c4bcedce9865a81ef344108e0dba672ff2f0be5c4f2534a25df510a71983
|
3 |
+
size 2406103
|
README.md
ADDED
@@ -0,0 +1,31 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
---
|
2 |
+
library_name: hivex
|
3 |
+
original_train_name: DroneBasedReforestation_difficulty_4_task_1_run_id_1_train
|
4 |
+
tags:
|
5 |
+
- hivex
|
6 |
+
- hivex-drone-based-reforestation
|
7 |
+
- reinforcement-learning
|
8 |
+
- multi-agent-reinforcement-learning
|
9 |
+
model-index:
|
10 |
+
- name: hivex-DBR-PPO-baseline-task-1-difficulty-4
|
11 |
+
results:
|
12 |
+
- task:
|
13 |
+
type: sub-task
|
14 |
+
name: find_closest_forest_perimeter
|
15 |
+
task-id: 1
|
16 |
+
difficulty-id: 4
|
17 |
+
dataset:
|
18 |
+
name: hivex-drone-based-reforestation
|
19 |
+
type: hivex-drone-based-reforestation
|
20 |
+
metrics:
|
21 |
+
- type: out_of_energy_count
|
22 |
+
value: 0.009307907656766473 +/- 0.011444044459469479
|
23 |
+
name: Out of Energy Count
|
24 |
+
verified: true
|
25 |
+
- type: cumulative_reward
|
26 |
+
value: 98.75451385498047 +/- 1.5064383079350547
|
27 |
+
name: Cumulative Reward
|
28 |
+
verified: true
|
29 |
+
---
|
30 |
+
|
31 |
+
This model serves as the baseline for the **Drone-Based Reforestation** environment, trained and tested on task <code>1</code> with difficulty <code>4</code> using the Proximal Policy Optimization (PPO) algorithm.<br><br>Environment: **Drone-Based Reforestation**<br>Task: <code>1</code><br>Difficulty: <code>4</code><br>Algorithm: <code>PPO</code><br>Episode Length: <code>2000</code><br>Training <code>max_steps</code>: <code>1200000</code><br>Testing <code>max_steps</code>: <code>300000</code><br><br>Train & Test [Scripts](https://github.com/hivex-research/hivex)<br>Download the [Environment](https://github.com/hivex-research/hivex-environments)
|
configuration.yaml
ADDED
@@ -0,0 +1,107 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
default_settings: null
|
2 |
+
behaviors:
|
3 |
+
Agent:
|
4 |
+
trainer_type: ppo
|
5 |
+
hyperparameters:
|
6 |
+
batch_size: 1024
|
7 |
+
buffer_size: 8192
|
8 |
+
learning_rate: 0.0003
|
9 |
+
beta: 0.005
|
10 |
+
epsilon: 0.2
|
11 |
+
lambd: 0.95
|
12 |
+
num_epoch: 3
|
13 |
+
shared_critic: false
|
14 |
+
learning_rate_schedule: linear
|
15 |
+
beta_schedule: linear
|
16 |
+
epsilon_schedule: linear
|
17 |
+
network_settings:
|
18 |
+
normalize: false
|
19 |
+
hidden_units: 128
|
20 |
+
num_layers: 2
|
21 |
+
vis_encode_type: resnet
|
22 |
+
memory: null
|
23 |
+
goal_conditioning_type: hyper
|
24 |
+
deterministic: false
|
25 |
+
reward_signals:
|
26 |
+
curiosity:
|
27 |
+
gamma: 0.99
|
28 |
+
strength: 0.1
|
29 |
+
network_settings:
|
30 |
+
normalize: false
|
31 |
+
hidden_units: 128
|
32 |
+
num_layers: 2
|
33 |
+
vis_encode_type: resnet
|
34 |
+
memory: null
|
35 |
+
goal_conditioning_type: hyper
|
36 |
+
deterministic: false
|
37 |
+
learning_rate: 0.0003
|
38 |
+
encoding_size: 256
|
39 |
+
extrinsic:
|
40 |
+
gamma: 0.99
|
41 |
+
strength: 0.9
|
42 |
+
network_settings:
|
43 |
+
normalize: false
|
44 |
+
hidden_units: 128
|
45 |
+
num_layers: 2
|
46 |
+
vis_encode_type: resnet
|
47 |
+
memory: null
|
48 |
+
goal_conditioning_type: hyper
|
49 |
+
deterministic: false
|
50 |
+
init_path: null
|
51 |
+
keep_checkpoints: 5
|
52 |
+
checkpoint_interval: 500000
|
53 |
+
max_steps: 1200000
|
54 |
+
time_horizon: 8192
|
55 |
+
summary_freq: 6000
|
56 |
+
threaded: true
|
57 |
+
self_play: null
|
58 |
+
behavioral_cloning: null
|
59 |
+
env_settings:
|
60 |
+
env_path: c:/users/pdsie/documents/hivex/src/hivex/training/baseline/ml_agents/dev_environments/Hivex_DroneBasedReforestation_win
|
61 |
+
env_args: null
|
62 |
+
base_port: 5007
|
63 |
+
num_envs: 1
|
64 |
+
num_areas: 1
|
65 |
+
seed: 5000
|
66 |
+
max_lifetime_restarts: 10
|
67 |
+
restarts_rate_limit_n: 1
|
68 |
+
restarts_rate_limit_period_s: 60
|
69 |
+
engine_settings:
|
70 |
+
width: 84
|
71 |
+
height: 84
|
72 |
+
quality_level: 5
|
73 |
+
time_scale: 20
|
74 |
+
target_frame_rate: -1
|
75 |
+
capture_frame_rate: 60
|
76 |
+
no_graphics: true
|
77 |
+
environment_parameters:
|
78 |
+
difficulty:
|
79 |
+
curriculum:
|
80 |
+
- value:
|
81 |
+
sampler_type: constant
|
82 |
+
sampler_parameters:
|
83 |
+
seed: 5000
|
84 |
+
value: 4
|
85 |
+
name: difficulty
|
86 |
+
completion_criteria: null
|
87 |
+
task:
|
88 |
+
curriculum:
|
89 |
+
- value:
|
90 |
+
sampler_type: constant
|
91 |
+
sampler_parameters:
|
92 |
+
seed: 5001
|
93 |
+
value: 1
|
94 |
+
name: task
|
95 |
+
completion_criteria: null
|
96 |
+
checkpoint_settings:
|
97 |
+
run_id: DroneBasedReforestation/train/DroneBasedReforestation_difficulty_4_task_1_run_id_1_train
|
98 |
+
initialize_from: null
|
99 |
+
load_model: false
|
100 |
+
resume: false
|
101 |
+
force: false
|
102 |
+
train_model: false
|
103 |
+
inference: false
|
104 |
+
results_dir: results
|
105 |
+
torch_settings:
|
106 |
+
device: null
|
107 |
+
debug: false
|
run_logs/timers.json
ADDED
@@ -0,0 +1,575 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"name": "root",
|
3 |
+
"gauges": {
|
4 |
+
"Agent.Policy.Entropy.mean": {
|
5 |
+
"value": 1.3061087131500244,
|
6 |
+
"min": 1.3061087131500244,
|
7 |
+
"max": 1.4217795133590698,
|
8 |
+
"count": 200
|
9 |
+
},
|
10 |
+
"Agent.Policy.Entropy.sum": {
|
11 |
+
"value": 7444.8193359375,
|
12 |
+
"min": 6594.3779296875,
|
13 |
+
"max": 9651.1162109375,
|
14 |
+
"count": 200
|
15 |
+
},
|
16 |
+
"Agent.DroneBasedReforestation.TreeDropCount.mean": {
|
17 |
+
"value": 0.0,
|
18 |
+
"min": 0.0,
|
19 |
+
"max": 0.0,
|
20 |
+
"count": 200
|
21 |
+
},
|
22 |
+
"Agent.DroneBasedReforestation.TreeDropCount.sum": {
|
23 |
+
"value": 0.0,
|
24 |
+
"min": 0.0,
|
25 |
+
"max": 0.0,
|
26 |
+
"count": 200
|
27 |
+
},
|
28 |
+
"Agent.DroneBasedReforestation.RechargeEnergyCount.mean": {
|
29 |
+
"value": 0.0,
|
30 |
+
"min": 0.0,
|
31 |
+
"max": 0.0,
|
32 |
+
"count": 200
|
33 |
+
},
|
34 |
+
"Agent.DroneBasedReforestation.RechargeEnergyCount.sum": {
|
35 |
+
"value": 0.0,
|
36 |
+
"min": 0.0,
|
37 |
+
"max": 0.0,
|
38 |
+
"count": 200
|
39 |
+
},
|
40 |
+
"Agent.DroneBasedReforestation.SaveLocationCount.mean": {
|
41 |
+
"value": 0.0,
|
42 |
+
"min": 0.0,
|
43 |
+
"max": 0.0,
|
44 |
+
"count": 200
|
45 |
+
},
|
46 |
+
"Agent.DroneBasedReforestation.SaveLocationCount.sum": {
|
47 |
+
"value": 0.0,
|
48 |
+
"min": 0.0,
|
49 |
+
"max": 0.0,
|
50 |
+
"count": 200
|
51 |
+
},
|
52 |
+
"Agent.DroneBasedReforestation.OutofEnergyCount.mean": {
|
53 |
+
"value": 0.015625,
|
54 |
+
"min": 0.0,
|
55 |
+
"max": 0.23076923076923078,
|
56 |
+
"count": 200
|
57 |
+
},
|
58 |
+
"Agent.DroneBasedReforestation.OutofEnergyCount.sum": {
|
59 |
+
"value": 3.0,
|
60 |
+
"min": 0.0,
|
61 |
+
"max": 9.0,
|
62 |
+
"count": 200
|
63 |
+
},
|
64 |
+
"Agent.DroneBasedReforestation.CumulativeDistanceUntilTreeDrop.mean": {
|
65 |
+
"value": 0.0,
|
66 |
+
"min": 0.0,
|
67 |
+
"max": 0.0,
|
68 |
+
"count": 200
|
69 |
+
},
|
70 |
+
"Agent.DroneBasedReforestation.CumulativeDistanceUntilTreeDrop.sum": {
|
71 |
+
"value": 0.0,
|
72 |
+
"min": 0.0,
|
73 |
+
"max": 0.0,
|
74 |
+
"count": 200
|
75 |
+
},
|
76 |
+
"Agent.DroneBasedReforestation.CumulativeTreeDropReward.mean": {
|
77 |
+
"value": 0.0,
|
78 |
+
"min": 0.0,
|
79 |
+
"max": 0.0,
|
80 |
+
"count": 200
|
81 |
+
},
|
82 |
+
"Agent.DroneBasedReforestation.CumulativeTreeDropReward.sum": {
|
83 |
+
"value": 0.0,
|
84 |
+
"min": 0.0,
|
85 |
+
"max": 0.0,
|
86 |
+
"count": 200
|
87 |
+
},
|
88 |
+
"Agent.DroneBasedReforestation.CumulativeDistanceReward.mean": {
|
89 |
+
"value": 0.0,
|
90 |
+
"min": 0.0,
|
91 |
+
"max": 0.0,
|
92 |
+
"count": 200
|
93 |
+
},
|
94 |
+
"Agent.DroneBasedReforestation.CumulativeDistanceReward.sum": {
|
95 |
+
"value": 0.0,
|
96 |
+
"min": 0.0,
|
97 |
+
"max": 0.0,
|
98 |
+
"count": 200
|
99 |
+
},
|
100 |
+
"Agent.DroneBasedReforestation.CumulativeNormalizedDistanceUntilTreeDrop.mean": {
|
101 |
+
"value": 0.0,
|
102 |
+
"min": 0.0,
|
103 |
+
"max": 0.0,
|
104 |
+
"count": 200
|
105 |
+
},
|
106 |
+
"Agent.DroneBasedReforestation.CumulativeNormalizedDistanceUntilTreeDrop.sum": {
|
107 |
+
"value": 0.0,
|
108 |
+
"min": 0.0,
|
109 |
+
"max": 0.0,
|
110 |
+
"count": 200
|
111 |
+
},
|
112 |
+
"Agent.DroneBasedReforestation.CumulativeDistancetoExistingTrees.mean": {
|
113 |
+
"value": 0.0,
|
114 |
+
"min": 0.0,
|
115 |
+
"max": 0.0,
|
116 |
+
"count": 200
|
117 |
+
},
|
118 |
+
"Agent.DroneBasedReforestation.CumulativeDistancetoExistingTrees.sum": {
|
119 |
+
"value": 0.0,
|
120 |
+
"min": 0.0,
|
121 |
+
"max": 0.0,
|
122 |
+
"count": 200
|
123 |
+
},
|
124 |
+
"Agent.Environment.LessonNumber.difficulty.mean": {
|
125 |
+
"value": 0.0,
|
126 |
+
"min": 0.0,
|
127 |
+
"max": 0.0,
|
128 |
+
"count": 200
|
129 |
+
},
|
130 |
+
"Agent.Environment.LessonNumber.difficulty.sum": {
|
131 |
+
"value": 0.0,
|
132 |
+
"min": 0.0,
|
133 |
+
"max": 0.0,
|
134 |
+
"count": 200
|
135 |
+
},
|
136 |
+
"Agent.Environment.LessonNumber.task.mean": {
|
137 |
+
"value": 0.0,
|
138 |
+
"min": 0.0,
|
139 |
+
"max": 0.0,
|
140 |
+
"count": 200
|
141 |
+
},
|
142 |
+
"Agent.Environment.LessonNumber.task.sum": {
|
143 |
+
"value": 0.0,
|
144 |
+
"min": 0.0,
|
145 |
+
"max": 0.0,
|
146 |
+
"count": 200
|
147 |
+
},
|
148 |
+
"Agent.Environment.EpisodeLength.mean": {
|
149 |
+
"value": 28.796875,
|
150 |
+
"min": 19.07070707070707,
|
151 |
+
"max": 244.22222222222223,
|
152 |
+
"count": 200
|
153 |
+
},
|
154 |
+
"Agent.Environment.EpisodeLength.sum": {
|
155 |
+
"value": 5529.0,
|
156 |
+
"min": 4677.0,
|
157 |
+
"max": 6963.0,
|
158 |
+
"count": 200
|
159 |
+
},
|
160 |
+
"Agent.Step.mean": {
|
161 |
+
"value": 1199988.0,
|
162 |
+
"min": 5913.0,
|
163 |
+
"max": 1199988.0,
|
164 |
+
"count": 200
|
165 |
+
},
|
166 |
+
"Agent.Step.sum": {
|
167 |
+
"value": 1199988.0,
|
168 |
+
"min": 5913.0,
|
169 |
+
"max": 1199988.0,
|
170 |
+
"count": 200
|
171 |
+
},
|
172 |
+
"Agent.Policy.CuriosityValueEstimate.mean": {
|
173 |
+
"value": 0.5123575925827026,
|
174 |
+
"min": -0.003072895808145404,
|
175 |
+
"max": 1.1557989120483398,
|
176 |
+
"count": 200
|
177 |
+
},
|
178 |
+
"Agent.Policy.CuriosityValueEstimate.sum": {
|
179 |
+
"value": 97.34794616699219,
|
180 |
+
"min": -0.09525977075099945,
|
181 |
+
"max": 296.294921875,
|
182 |
+
"count": 200
|
183 |
+
},
|
184 |
+
"Agent.Policy.ExtrinsicValueEstimate.mean": {
|
185 |
+
"value": 73.4503173828125,
|
186 |
+
"min": 0.07052389532327652,
|
187 |
+
"max": 81.7844467163086,
|
188 |
+
"count": 200
|
189 |
+
},
|
190 |
+
"Agent.Policy.ExtrinsicValueEstimate.sum": {
|
191 |
+
"value": 13955.560546875,
|
192 |
+
"min": 1.7630974054336548,
|
193 |
+
"max": 23572.26171875,
|
194 |
+
"count": 200
|
195 |
+
},
|
196 |
+
"Agent.Environment.CumulativeReward.mean": {
|
197 |
+
"value": 94.65837076325091,
|
198 |
+
"min": 71.42488220405693,
|
199 |
+
"max": 107.31066000690826,
|
200 |
+
"count": 200
|
201 |
+
},
|
202 |
+
"Agent.Environment.CumulativeReward.sum": {
|
203 |
+
"value": 17985.090445017675,
|
204 |
+
"min": 1884.4158946573734,
|
205 |
+
"max": 29685.348208050244,
|
206 |
+
"count": 200
|
207 |
+
},
|
208 |
+
"Agent.Policy.CuriosityReward.mean": {
|
209 |
+
"value": 0.08526155473603132,
|
210 |
+
"min": 0.0,
|
211 |
+
"max": 5.707144324596111,
|
212 |
+
"count": 200
|
213 |
+
},
|
214 |
+
"Agent.Policy.CuriosityReward.sum": {
|
215 |
+
"value": 16.19969539984595,
|
216 |
+
"min": 0.0,
|
217 |
+
"max": 222.57862865924835,
|
218 |
+
"count": 200
|
219 |
+
},
|
220 |
+
"Agent.Policy.ExtrinsicReward.mean": {
|
221 |
+
"value": 85.19253195627458,
|
222 |
+
"min": 64.28239143181305,
|
223 |
+
"max": 96.57959257448331,
|
224 |
+
"count": 200
|
225 |
+
},
|
226 |
+
"Agent.Policy.ExtrinsicReward.sum": {
|
227 |
+
"value": 16186.58107169217,
|
228 |
+
"min": 1695.9742895364761,
|
229 |
+
"max": 26716.812616853043,
|
230 |
+
"count": 200
|
231 |
+
},
|
232 |
+
"Agent.IsTraining.mean": {
|
233 |
+
"value": 1.0,
|
234 |
+
"min": 1.0,
|
235 |
+
"max": 1.0,
|
236 |
+
"count": 200
|
237 |
+
},
|
238 |
+
"Agent.IsTraining.sum": {
|
239 |
+
"value": 1.0,
|
240 |
+
"min": 1.0,
|
241 |
+
"max": 1.0,
|
242 |
+
"count": 200
|
243 |
+
},
|
244 |
+
"Agent.Losses.PolicyLoss.mean": {
|
245 |
+
"value": 0.02436118039380138,
|
246 |
+
"min": 0.014788141299504787,
|
247 |
+
"max": 0.03415193311714878,
|
248 |
+
"count": 144
|
249 |
+
},
|
250 |
+
"Agent.Losses.PolicyLoss.sum": {
|
251 |
+
"value": 0.02436118039380138,
|
252 |
+
"min": 0.014788141299504787,
|
253 |
+
"max": 0.03415193311714878,
|
254 |
+
"count": 144
|
255 |
+
},
|
256 |
+
"Agent.Losses.ValueLoss.mean": {
|
257 |
+
"value": 1433.8892110188801,
|
258 |
+
"min": 323.4534556070964,
|
259 |
+
"max": 2381.5792338053384,
|
260 |
+
"count": 144
|
261 |
+
},
|
262 |
+
"Agent.Losses.ValueLoss.sum": {
|
263 |
+
"value": 1433.8892110188801,
|
264 |
+
"min": 323.4534556070964,
|
265 |
+
"max": 2381.5792338053384,
|
266 |
+
"count": 144
|
267 |
+
},
|
268 |
+
"Agent.Policy.LearningRate.mean": {
|
269 |
+
"value": 1.199349600249996e-06,
|
270 |
+
"min": 1.199349600249996e-06,
|
271 |
+
"max": 0.00029789475070175,
|
272 |
+
"count": 144
|
273 |
+
},
|
274 |
+
"Agent.Policy.LearningRate.sum": {
|
275 |
+
"value": 1.199349600249996e-06,
|
276 |
+
"min": 1.199349600249996e-06,
|
277 |
+
"max": 0.00029789475070175,
|
278 |
+
"count": 144
|
279 |
+
},
|
280 |
+
"Agent.Policy.Epsilon.mean": {
|
281 |
+
"value": 0.10039975000000001,
|
282 |
+
"min": 0.10039975000000001,
|
283 |
+
"max": 0.19929825,
|
284 |
+
"count": 144
|
285 |
+
},
|
286 |
+
"Agent.Policy.Epsilon.sum": {
|
287 |
+
"value": 0.10039975000000001,
|
288 |
+
"min": 0.10039975000000001,
|
289 |
+
"max": 0.19929825,
|
290 |
+
"count": 144
|
291 |
+
},
|
292 |
+
"Agent.Policy.Beta.mean": {
|
293 |
+
"value": 2.9947524999999938e-05,
|
294 |
+
"min": 2.9947524999999938e-05,
|
295 |
+
"max": 0.004964982675,
|
296 |
+
"count": 144
|
297 |
+
},
|
298 |
+
"Agent.Policy.Beta.sum": {
|
299 |
+
"value": 2.9947524999999938e-05,
|
300 |
+
"min": 2.9947524999999938e-05,
|
301 |
+
"max": 0.004964982675,
|
302 |
+
"count": 144
|
303 |
+
},
|
304 |
+
"Agent.Losses.CuriosityForwardLoss.mean": {
|
305 |
+
"value": 0.027251241961494088,
|
306 |
+
"min": 0.019890636671334505,
|
307 |
+
"max": 0.5723420046269894,
|
308 |
+
"count": 144
|
309 |
+
},
|
310 |
+
"Agent.Losses.CuriosityForwardLoss.sum": {
|
311 |
+
"value": 0.027251241961494088,
|
312 |
+
"min": 0.019890636671334505,
|
313 |
+
"max": 0.5723420046269894,
|
314 |
+
"count": 144
|
315 |
+
},
|
316 |
+
"Agent.Losses.CuriosityInverseLoss.mean": {
|
317 |
+
"value": 1.4139881233374278,
|
318 |
+
"min": 1.2391985456148784,
|
319 |
+
"max": 3.305937925974528,
|
320 |
+
"count": 144
|
321 |
+
},
|
322 |
+
"Agent.Losses.CuriosityInverseLoss.sum": {
|
323 |
+
"value": 1.4139881233374278,
|
324 |
+
"min": 1.2391985456148784,
|
325 |
+
"max": 3.305937925974528,
|
326 |
+
"count": 144
|
327 |
+
}
|
328 |
+
},
|
329 |
+
"metadata": {
|
330 |
+
"timer_format_version": "0.1.0",
|
331 |
+
"start_time_seconds": "1717243111",
|
332 |
+
"python_version": "3.9.18 (main, Sep 11 2023, 14:09:26) [MSC v.1916 64 bit (AMD64)]",
|
333 |
+
"command_line_arguments": "C:\\Users\\pdsie\\anaconda3\\envs\\mlagents20\\Scripts\\mlagents-learn c:/users/pdsie/documents/hivex/src/hivex/training/baseline/ml_agents/configs/mlagents/tmp/train/DroneBasedReforestation_difficulty_4_task_1_run_id_1_train.yaml --run-id=DroneBasedReforestation/train/DroneBasedReforestation_difficulty_4_task_1_run_id_1_train --base-port 5007",
|
334 |
+
"mlagents_version": "0.30.0",
|
335 |
+
"mlagents_envs_version": "0.30.0",
|
336 |
+
"communication_protocol_version": "1.5.0",
|
337 |
+
"pytorch_version": "1.7.1+cu110",
|
338 |
+
"numpy_version": "1.21.0",
|
339 |
+
"end_time_seconds": "1717248803"
|
340 |
+
},
|
341 |
+
"total": 5692.3659379,
|
342 |
+
"count": 1,
|
343 |
+
"self": 1.7120566000003237,
|
344 |
+
"children": {
|
345 |
+
"run_training.setup": {
|
346 |
+
"total": 0.051362200000000025,
|
347 |
+
"count": 1,
|
348 |
+
"self": 0.051362200000000025
|
349 |
+
},
|
350 |
+
"TrainerController.start_learning": {
|
351 |
+
"total": 5690.6025191,
|
352 |
+
"count": 1,
|
353 |
+
"self": 5.72295740008758,
|
354 |
+
"children": {
|
355 |
+
"TrainerController._reset_env": {
|
356 |
+
"total": 1.9943673,
|
357 |
+
"count": 1,
|
358 |
+
"self": 1.9943673
|
359 |
+
},
|
360 |
+
"TrainerController.advance": {
|
361 |
+
"total": 5682.726238999912,
|
362 |
+
"count": 410620,
|
363 |
+
"self": 5.277235299933636,
|
364 |
+
"children": {
|
365 |
+
"env_step": {
|
366 |
+
"total": 5677.449003699979,
|
367 |
+
"count": 410620,
|
368 |
+
"self": 3618.1533793998547,
|
369 |
+
"children": {
|
370 |
+
"SubprocessEnvManager._take_step": {
|
371 |
+
"total": 2055.5891080000556,
|
372 |
+
"count": 410620,
|
373 |
+
"self": 10.855453400108672,
|
374 |
+
"children": {
|
375 |
+
"TorchPolicy.evaluate": {
|
376 |
+
"total": 2044.733654599947,
|
377 |
+
"count": 400011,
|
378 |
+
"self": 2044.733654599947
|
379 |
+
}
|
380 |
+
}
|
381 |
+
},
|
382 |
+
"workers": {
|
383 |
+
"total": 3.7065163000684045,
|
384 |
+
"count": 410620,
|
385 |
+
"self": 0.0,
|
386 |
+
"children": {
|
387 |
+
"worker_root": {
|
388 |
+
"total": 5683.471061399958,
|
389 |
+
"count": 410620,
|
390 |
+
"is_parallel": true,
|
391 |
+
"self": 2355.185482599993,
|
392 |
+
"children": {
|
393 |
+
"steps_from_proto": {
|
394 |
+
"total": 0.006364299999999989,
|
395 |
+
"count": 1,
|
396 |
+
"is_parallel": true,
|
397 |
+
"self": 0.00010039999999977844,
|
398 |
+
"children": {
|
399 |
+
"_process_maybe_compressed_observation": {
|
400 |
+
"total": 0.00621930000000015,
|
401 |
+
"count": 2,
|
402 |
+
"is_parallel": true,
|
403 |
+
"self": 2.8200000000033754e-05,
|
404 |
+
"children": {
|
405 |
+
"_observation_to_np_array": {
|
406 |
+
"total": 0.006191100000000116,
|
407 |
+
"count": 3,
|
408 |
+
"is_parallel": true,
|
409 |
+
"self": 2.9800000000301807e-05,
|
410 |
+
"children": {
|
411 |
+
"process_pixels": {
|
412 |
+
"total": 0.006161299999999814,
|
413 |
+
"count": 3,
|
414 |
+
"is_parallel": true,
|
415 |
+
"self": 0.00023290000000009137,
|
416 |
+
"children": {
|
417 |
+
"image_decompress": {
|
418 |
+
"total": 0.005928399999999723,
|
419 |
+
"count": 3,
|
420 |
+
"is_parallel": true,
|
421 |
+
"self": 0.005928399999999723
|
422 |
+
}
|
423 |
+
}
|
424 |
+
}
|
425 |
+
}
|
426 |
+
}
|
427 |
+
}
|
428 |
+
},
|
429 |
+
"_process_rank_one_or_two_observation": {
|
430 |
+
"total": 4.460000000006126e-05,
|
431 |
+
"count": 2,
|
432 |
+
"is_parallel": true,
|
433 |
+
"self": 4.460000000006126e-05
|
434 |
+
}
|
435 |
+
}
|
436 |
+
},
|
437 |
+
"UnityEnvironment.step": {
|
438 |
+
"total": 3328.279214499965,
|
439 |
+
"count": 410620,
|
440 |
+
"is_parallel": true,
|
441 |
+
"self": 19.825185299910117,
|
442 |
+
"children": {
|
443 |
+
"UnityEnvironment._generate_step_input": {
|
444 |
+
"total": 19.60430590010752,
|
445 |
+
"count": 410620,
|
446 |
+
"is_parallel": true,
|
447 |
+
"self": 19.60430590010752
|
448 |
+
},
|
449 |
+
"communicator.exchange": {
|
450 |
+
"total": 3146.9941028000803,
|
451 |
+
"count": 410620,
|
452 |
+
"is_parallel": true,
|
453 |
+
"self": 3146.9941028000803
|
454 |
+
},
|
455 |
+
"steps_from_proto": {
|
456 |
+
"total": 141.85562049986675,
|
457 |
+
"count": 410620,
|
458 |
+
"is_parallel": true,
|
459 |
+
"self": 28.017311199988725,
|
460 |
+
"children": {
|
461 |
+
"_process_maybe_compressed_observation": {
|
462 |
+
"total": 101.8772321999369,
|
463 |
+
"count": 821240,
|
464 |
+
"is_parallel": true,
|
465 |
+
"self": 8.001321499805357,
|
466 |
+
"children": {
|
467 |
+
"_observation_to_np_array": {
|
468 |
+
"total": 93.87591070013154,
|
469 |
+
"count": 1239717,
|
470 |
+
"is_parallel": true,
|
471 |
+
"self": 7.759124300138225,
|
472 |
+
"children": {
|
473 |
+
"process_pixels": {
|
474 |
+
"total": 86.11678639999332,
|
475 |
+
"count": 1239717,
|
476 |
+
"is_parallel": true,
|
477 |
+
"self": 40.50604840019888,
|
478 |
+
"children": {
|
479 |
+
"image_decompress": {
|
480 |
+
"total": 45.61073799979444,
|
481 |
+
"count": 1239717,
|
482 |
+
"is_parallel": true,
|
483 |
+
"self": 45.61073799979444
|
484 |
+
}
|
485 |
+
}
|
486 |
+
}
|
487 |
+
}
|
488 |
+
}
|
489 |
+
}
|
490 |
+
},
|
491 |
+
"_process_rank_one_or_two_observation": {
|
492 |
+
"total": 11.96107709994112,
|
493 |
+
"count": 821240,
|
494 |
+
"is_parallel": true,
|
495 |
+
"self": 11.96107709994112
|
496 |
+
}
|
497 |
+
}
|
498 |
+
}
|
499 |
+
}
|
500 |
+
}
|
501 |
+
}
|
502 |
+
}
|
503 |
+
}
|
504 |
+
}
|
505 |
+
}
|
506 |
+
}
|
507 |
+
}
|
508 |
+
},
|
509 |
+
"trainer_threads": {
|
510 |
+
"total": 3.680000008898787e-05,
|
511 |
+
"count": 1,
|
512 |
+
"self": 3.680000008898787e-05,
|
513 |
+
"children": {
|
514 |
+
"thread_root": {
|
515 |
+
"total": 0.0,
|
516 |
+
"count": 0,
|
517 |
+
"is_parallel": true,
|
518 |
+
"self": 0.0,
|
519 |
+
"children": {
|
520 |
+
"trainer_advance": {
|
521 |
+
"total": 5685.509467500165,
|
522 |
+
"count": 281719,
|
523 |
+
"is_parallel": true,
|
524 |
+
"self": 6.973894799978552,
|
525 |
+
"children": {
|
526 |
+
"process_trajectory": {
|
527 |
+
"total": 4957.248087100189,
|
528 |
+
"count": 281719,
|
529 |
+
"is_parallel": true,
|
530 |
+
"self": 4956.869032000189,
|
531 |
+
"children": {
|
532 |
+
"RLTrainer._checkpoint": {
|
533 |
+
"total": 0.37905510000018694,
|
534 |
+
"count": 2,
|
535 |
+
"is_parallel": true,
|
536 |
+
"self": 0.37905510000018694
|
537 |
+
}
|
538 |
+
}
|
539 |
+
},
|
540 |
+
"_update_policy": {
|
541 |
+
"total": 721.2874855999976,
|
542 |
+
"count": 144,
|
543 |
+
"is_parallel": true,
|
544 |
+
"self": 483.4252829999915,
|
545 |
+
"children": {
|
546 |
+
"TorchPPOOptimizer.update": {
|
547 |
+
"total": 237.86220260000604,
|
548 |
+
"count": 3456,
|
549 |
+
"is_parallel": true,
|
550 |
+
"self": 237.86220260000604
|
551 |
+
}
|
552 |
+
}
|
553 |
+
}
|
554 |
+
}
|
555 |
+
}
|
556 |
+
}
|
557 |
+
}
|
558 |
+
}
|
559 |
+
},
|
560 |
+
"TrainerController._save_models": {
|
561 |
+
"total": 0.15891860000010638,
|
562 |
+
"count": 1,
|
563 |
+
"self": 0.005721599999560567,
|
564 |
+
"children": {
|
565 |
+
"RLTrainer._checkpoint": {
|
566 |
+
"total": 0.1531970000005458,
|
567 |
+
"count": 1,
|
568 |
+
"self": 0.1531970000005458
|
569 |
+
}
|
570 |
+
}
|
571 |
+
}
|
572 |
+
}
|
573 |
+
}
|
574 |
+
}
|
575 |
+
}
|
run_logs/training_status.json
ADDED
@@ -0,0 +1,53 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"difficulty": {
|
3 |
+
"lesson_num": 0
|
4 |
+
},
|
5 |
+
"task": {
|
6 |
+
"lesson_num": 0
|
7 |
+
},
|
8 |
+
"Agent": {
|
9 |
+
"checkpoints": [
|
10 |
+
{
|
11 |
+
"steps": 499980,
|
12 |
+
"file_path": "results\\DroneBasedReforestation/train/DroneBasedReforestation_difficulty_4_task_1_run_id_1_train\\Agent\\Agent-499980.onnx",
|
13 |
+
"reward": 101.18179518349935,
|
14 |
+
"creation_time": 1717245333.2375393,
|
15 |
+
"auxillary_file_paths": [
|
16 |
+
"results\\DroneBasedReforestation/train/DroneBasedReforestation_difficulty_4_task_1_run_id_1_train\\Agent\\Agent-499980.pt"
|
17 |
+
]
|
18 |
+
},
|
19 |
+
{
|
20 |
+
"steps": 999989,
|
21 |
+
"file_path": "results\\DroneBasedReforestation/train/DroneBasedReforestation_difficulty_4_task_1_run_id_1_train\\Agent\\Agent-999989.onnx",
|
22 |
+
"reward": 98.86044054548256,
|
23 |
+
"creation_time": 1717247792.942624,
|
24 |
+
"auxillary_file_paths": [
|
25 |
+
"results\\DroneBasedReforestation/train/DroneBasedReforestation_difficulty_4_task_1_run_id_1_train\\Agent\\Agent-999989.pt"
|
26 |
+
]
|
27 |
+
},
|
28 |
+
{
|
29 |
+
"steps": 1200030,
|
30 |
+
"file_path": "results\\DroneBasedReforestation/train/DroneBasedReforestation_difficulty_4_task_1_run_id_1_train\\Agent\\Agent-1200030.onnx",
|
31 |
+
"reward": 94.98871961802986,
|
32 |
+
"creation_time": 1717248802.060011,
|
33 |
+
"auxillary_file_paths": [
|
34 |
+
"results\\DroneBasedReforestation/train/DroneBasedReforestation_difficulty_4_task_1_run_id_1_train\\Agent\\Agent-1200030.pt"
|
35 |
+
]
|
36 |
+
}
|
37 |
+
],
|
38 |
+
"final_checkpoint": {
|
39 |
+
"steps": 1200030,
|
40 |
+
"file_path": "results\\DroneBasedReforestation/train/DroneBasedReforestation_difficulty_4_task_1_run_id_1_train\\Agent.onnx",
|
41 |
+
"reward": 94.98871961802986,
|
42 |
+
"creation_time": 1717248802.060011,
|
43 |
+
"auxillary_file_paths": [
|
44 |
+
"results\\DroneBasedReforestation/train/DroneBasedReforestation_difficulty_4_task_1_run_id_1_train\\Agent\\Agent-1200030.pt"
|
45 |
+
]
|
46 |
+
}
|
47 |
+
},
|
48 |
+
"metadata": {
|
49 |
+
"stats_format_version": "0.3.0",
|
50 |
+
"mlagents_version": "0.30.0",
|
51 |
+
"torch_version": "1.7.1+cu110"
|
52 |
+
}
|
53 |
+
}
|