|
{ |
|
"help": false, |
|
"algo": "APPO", |
|
"env": "GDY-MettaGrid", |
|
"experiment": "p2.dr4", |
|
"train_dir": "/workspace/metta/train_dir", |
|
"restart_behavior": "resume", |
|
"device": "gpu", |
|
"seed": 0, |
|
"num_policies": 1, |
|
"async_rl": true, |
|
"serial_mode": false, |
|
"batched_sampling": false, |
|
"num_batches_to_accumulate": 2, |
|
"worker_num_splits": 2, |
|
"policy_workers_per_policy": 1, |
|
"max_policy_lag": 50, |
|
"num_workers": 32, |
|
"num_envs_per_worker": 2, |
|
"batch_size": 16384, |
|
"num_batches_per_epoch": 1, |
|
"num_epochs": 1, |
|
"rollout": 256, |
|
"recurrence": 256, |
|
"shuffle_minibatches": false, |
|
"gamma": 0.99, |
|
"reward_scale": 1.0, |
|
"reward_clip": 1000.0, |
|
"value_bootstrap": false, |
|
"normalize_returns": true, |
|
"exploration_loss_coeff": 0.008, |
|
"value_loss_coeff": 0.976, |
|
"kl_loss_coeff": 0.0, |
|
"aux_loss_coeff": 0.0, |
|
"exploration_loss": "symmetric_kl", |
|
"gae_lambda": 0.95, |
|
"ppo_clip_ratio": 0.1, |
|
"ppo_clip_value": 1.0, |
|
"with_vtrace": false, |
|
"vtrace_rho": 1.0, |
|
"vtrace_c": 1.0, |
|
"optimizer": "adam", |
|
"adam_eps": 1e-06, |
|
"adam_beta1": 0.9, |
|
"adam_beta2": 0.999, |
|
"max_grad_norm": 4.0, |
|
"learning_rate": 0.0001, |
|
"lr_schedule": "constant", |
|
"lr_schedule_kl_threshold": 0.008, |
|
"lr_adaptive_min": 1e-06, |
|
"lr_adaptive_max": 0.01, |
|
"obs_subtract_mean": 0.0, |
|
"obs_scale": 1.0, |
|
"normalize_input": false, |
|
"normalize_input_keys": null, |
|
"decorrelate_experience_max_seconds": 150, |
|
"decorrelate_envs_on_one_worker": true, |
|
"actor_worker_gpus": [], |
|
"set_workers_cpu_affinity": true, |
|
"force_envs_single_thread": false, |
|
"default_niceness": 0, |
|
"log_to_file": true, |
|
"experiment_summaries_interval": 10, |
|
"flush_summaries_interval": 30, |
|
"stats_avg": 100, |
|
"summaries_use_frameskip": true, |
|
"heartbeat_interval": 20, |
|
"heartbeat_reporting_interval": 180, |
|
"train_for_env_steps": 9999999999999, |
|
"train_for_seconds": 10000000000, |
|
"save_every_sec": 120, |
|
"keep_checkpoints": 2, |
|
"load_checkpoint_path": null, |
|
"init_checkpoint_path": null, |
|
"load_checkpoint_kind": "latest", |
|
"save_milestones_sec": -1, |
|
"save_best_every_sec": 5, |
|
"save_best_metric": "reward", |
|
"save_best_after": 100000, |
|
"benchmark": false, |
|
"encoder_mlp_layers": [ |
|
512, |
|
512 |
|
], |
|
"encoder_conv_architecture": "convnet_simple", |
|
"encoder_conv_mlp_layers": [ |
|
512 |
|
], |
|
"use_rnn": true, |
|
"rnn_size": 512, |
|
"rnn_type": "gru", |
|
"rnn_num_layers": 1, |
|
"decoder_mlp_layers": [], |
|
"nonlinearity": "elu", |
|
"policy_initialization": "orthogonal", |
|
"policy_init_gain": 1.0, |
|
"actor_critic_share_weights": true, |
|
"adaptive_stddev": true, |
|
"continuous_tanh_scale": 0.0, |
|
"initial_stddev": 1.0, |
|
"use_env_info_cache": false, |
|
"env_gpu_actions": false, |
|
"env_gpu_observations": true, |
|
"env_frameskip": 1, |
|
"env_framestack": 1, |
|
"pixel_format": "CHW", |
|
"use_record_episode_statistics": false, |
|
"episode_counter": false, |
|
"with_wandb": true, |
|
"wandb_user": "platypus", |
|
"wandb_project": "metta", |
|
"wandb_group": null, |
|
"wandb_job_type": "SF", |
|
"wandb_tags": [], |
|
"with_pbt": false, |
|
"pbt_mix_policies_in_one_env": true, |
|
"pbt_period_env_steps": 5000000, |
|
"pbt_start_mutation": 20000000, |
|
"pbt_replace_fraction": 0.3, |
|
"pbt_mutation_rate": 0.15, |
|
"pbt_replace_reward_gap": 0.1, |
|
"pbt_replace_reward_gap_absolute": 1e-06, |
|
"pbt_optimize_gamma": false, |
|
"pbt_target_objective": "true_objective", |
|
"pbt_perturb_min": 1.1, |
|
"pbt_perturb_max": 1.5, |
|
"env_cfg": "{\"name\": \"GDY-MettaGrid\", \"_target_\": \"env.griddly.mettagrid.gym_env.MettaGridGymEnv\", \"max_action_value\": 10, \"hidden_features\": {\"grid_obs\": [\"agent:energy\", \"agent:hp\"]}, \"game\": {\"obs_width\": 11, \"obs_height\": 11, \"max_steps\": 5000, \"tile_size\": 16, \"num_agents\": 20, \"no_energy_steps\": 500, \"objects\": {\"agent\": {\"initial_energy\": [50, 500], \"max_energy\": [200, 500], \"max_inventory\": [3, 10], \"freeze_duration\": [5, 20], \"energy_reward\": false, \"hp\": 1, \"mortal\": false, \"upkeep\": {\"time\": 0, \"shield\": [0, 3]}, \"use_cost\": 0}, \"altar\": {\"hp\": [10, 50], \"cooldown\": [2, 5], \"use_cost\": [50, 300]}, \"converter\": {\"hp\": [10, 50], \"cooldown\": [2, 5], \"energy_output\": [50, 200], \"use_cost\": 0}, \"generator\": {\"hp\": [10, 50], \"cooldown\": [3, 10], \"initial_resources\": [5, 50], \"use_cost\": 0}, \"wall\": {\"density\": 0.01, \"hp\": [3, 20]}}, \"actions\": {\"move\": {\"cost\": [0, 2]}, \"rotate\": {\"cost\": [0, 1]}, \"jump\": {\"cost\": [1, 5]}, \"shield\": {\"cost\": [0, 2]}, \"drop\": {\"cost\": [0, 2]}, \"use\": {\"cost\": [0, 2]}, \"attack\": {\"cost\": [3, 10], \"damage\": [3, 10]}}, \"map\": {\"layout\": {\"rooms_x\": 2, \"rooms_y\": 2}, \"room\": {\"width\": 20, \"height\": 20, \"num_agents\": 5, \"objects\": {\"agent\": 5, \"altar\": 1, \"converter\": 3, \"generator\": 10, \"wall\": 40}, \"border\": 1}}}, \"kinship\": {\"team_size\": [1, 5], \"team_reward\": [0.0, 1.0]}}", |
|
"agent_cfg": "{\"_target_\": \"agent.metta_agent.MettaAgent\", \"observation_encoders\": {\"grid_obs\": {\"feature_names\": [], \"normalize_features\": true, \"label_dim\": 4, \"output_dim\": 512, \"layers\": 4}, \"global_vars\": {\"feature_names\": [], \"normalize_features\": true, \"label_dim\": 4, \"output_dim\": 8, \"layers\": 2}, \"last_action\": {\"feature_names\": [\"last_action_id\", \"last_action_val\"], \"normalize_features\": true, \"label_dim\": 4, \"output_dim\": 8, \"layers\": 2}, \"last_reward\": {\"feature_names\": [\"last_reward\"], \"normalize_features\": true, \"label_dim\": 4, \"output_dim\": 8, \"layers\": 2}, \"kinship\": {\"feature_names\": [\"kinship\"], \"normalize_features\": true, \"label_dim\": 4, \"output_dim\": 8, \"layers\": 2}}, \"fc\": {\"layers\": 3, \"output_dim\": 512}, \"decoder\": {\"_target_\": \"agent.decoder.Decoder\"}, \"core\": {\"_target_\": \"sample_factory.model.core.ModelCoreRNN\", \"rnn_type\": \"gru\", \"rnn_num_layers\": 1, \"rnn_size\": 512}}", |
|
"command_line": "--aux_loss_coeff=0 --recurrence=256 --rollout=256 --value_loss_coeff=0.976 --exploration_loss=symmetric_kl --exploration_loss_coeff=0.002 --policy_initialization=orthogonal --learning_rate=0.0001 --max_policy_lag=50 --nonlinearity=elu --load_checkpoint_kind=latest --normalize_input=False --seed=0 --batch_size=16384 --decorrelate_experience_max_seconds=150 --train_for_env_steps=9999999999999 --with_wandb=True --wandb_user=platypus --wandb_project=metta --experiment=p2.dr4 --rnn_type=gru --rnn_num_layers=1 --rnn_size=512 --env=GDY-MettaGrid --env_cfg={\"name\": \"GDY-MettaGrid\", \"_target_\": \"env.griddly.mettagrid.gym_env.MettaGridGymEnv\", \"max_action_value\": 10, \"hidden_features\": {\"grid_obs\": [\"agent:energy\", \"agent:hp\"]}, \"game\": {\"obs_width\": 11, \"obs_height\": 11, \"max_steps\": 5000, \"tile_size\": 16, \"num_agents\": 20, \"no_energy_steps\": 500, \"objects\": {\"agent\": {\"initial_energy\": [50, 500], \"max_energy\": [200, 500], \"max_inventory\": [3, 10], \"freeze_duration\": [5, 20], \"energy_reward\": false, \"hp\": 1, \"mortal\": false, \"upkeep\": {\"time\": 0, \"shield\": [0, 3]}, \"use_cost\": 0}, \"altar\": {\"hp\": [10, 50], \"cooldown\": [2, 5], \"use_cost\": [50, 300]}, \"converter\": {\"hp\": [10, 50], \"cooldown\": [2, 5], \"energy_output\": [50, 200], \"use_cost\": 0}, \"generator\": {\"hp\": [10, 50], \"cooldown\": [3, 10], \"initial_resources\": [5, 50], \"use_cost\": 0}, \"wall\": {\"density\": 0.01, \"hp\": [3, 20]}}, \"actions\": {\"move\": {\"cost\": [0, 2]}, \"rotate\": {\"cost\": [0, 1]}, \"jump\": {\"cost\": [1, 5]}, \"shield\": {\"cost\": [0, 2]}, \"drop\": {\"cost\": [0, 2]}, \"use\": {\"cost\": [0, 2]}, \"attack\": {\"cost\": [3, 10], \"damage\": [3, 10]}}, \"map\": {\"layout\": {\"rooms_x\": 2, \"rooms_y\": 2}, \"room\": {\"width\": 20, \"height\": 20, \"num_agents\": 5, \"objects\": {\"agent\": 5, \"altar\": 1, \"converter\": 3, \"generator\": 10, \"wall\": 40}, \"border\": 1}}}, \"kinship\": {\"team_size\": [1, 5], \"team_reward\": [0.0, 1.0]}} --agent_cfg={\"_target_\": \"agent.metta_agent.MettaAgent\", \"observation_encoders\": {\"grid_obs\": {\"feature_names\": [], \"normalize_features\": true, \"label_dim\": 4, \"output_dim\": 512, \"layers\": 4}, \"global_vars\": {\"feature_names\": [], \"normalize_features\": true, \"label_dim\": 4, \"output_dim\": 8, \"layers\": 2}, \"last_action\": {\"feature_names\": [\"last_action_id\", \"last_action_val\"], \"normalize_features\": true, \"label_dim\": 4, \"output_dim\": 8, \"layers\": 2}, \"last_reward\": {\"feature_names\": [\"last_reward\"], \"normalize_features\": true, \"label_dim\": 4, \"output_dim\": 8, \"layers\": 2}, \"kinship\": {\"feature_names\": [\"kinship\"], \"normalize_features\": true, \"label_dim\": 4, \"output_dim\": 8, \"layers\": 2}}, \"fc\": {\"layers\": 3, \"output_dim\": 512}, \"decoder\": {\"_target_\": \"agent.decoder.Decoder\"}, \"core\": {\"_target_\": \"sample_factory.model.core.ModelCoreRNN\", \"rnn_type\": \"gru\", \"rnn_num_layers\": 1, \"rnn_size\": 512}}", |
|
"cli_args": { |
|
"env": "GDY-MettaGrid", |
|
"experiment": "p2.dr4", |
|
"seed": 0, |
|
"max_policy_lag": 50, |
|
"batch_size": 16384, |
|
"rollout": 256, |
|
"recurrence": 256, |
|
"exploration_loss_coeff": 0.002, |
|
"value_loss_coeff": 0.976, |
|
"aux_loss_coeff": 0.0, |
|
"exploration_loss": "symmetric_kl", |
|
"learning_rate": 0.0001, |
|
"normalize_input": false, |
|
"decorrelate_experience_max_seconds": 150, |
|
"train_for_env_steps": 9999999999999, |
|
"load_checkpoint_kind": "latest", |
|
"rnn_size": 512, |
|
"rnn_type": "gru", |
|
"rnn_num_layers": 1, |
|
"nonlinearity": "elu", |
|
"policy_initialization": "orthogonal", |
|
"with_wandb": true, |
|
"wandb_user": "platypus", |
|
"wandb_project": "metta", |
|
"env_cfg": "{\"name\": \"GDY-MettaGrid\", \"_target_\": \"env.griddly.mettagrid.gym_env.MettaGridGymEnv\", \"max_action_value\": 10, \"hidden_features\": {\"grid_obs\": [\"agent:energy\", \"agent:hp\"]}, \"game\": {\"obs_width\": 11, \"obs_height\": 11, \"max_steps\": 5000, \"tile_size\": 16, \"num_agents\": 20, \"no_energy_steps\": 500, \"objects\": {\"agent\": {\"initial_energy\": [50, 500], \"max_energy\": [200, 500], \"max_inventory\": [3, 10], \"freeze_duration\": [5, 20], \"energy_reward\": false, \"hp\": 1, \"mortal\": false, \"upkeep\": {\"time\": 0, \"shield\": [0, 3]}, \"use_cost\": 0}, \"altar\": {\"hp\": [10, 50], \"cooldown\": [2, 5], \"use_cost\": [50, 300]}, \"converter\": {\"hp\": [10, 50], \"cooldown\": [2, 5], \"energy_output\": [50, 200], \"use_cost\": 0}, \"generator\": {\"hp\": [10, 50], \"cooldown\": [3, 10], \"initial_resources\": [5, 50], \"use_cost\": 0}, \"wall\": {\"density\": 0.01, \"hp\": [3, 20]}}, \"actions\": {\"move\": {\"cost\": [0, 2]}, \"rotate\": {\"cost\": [0, 1]}, \"jump\": {\"cost\": [1, 5]}, \"shield\": {\"cost\": [0, 2]}, \"drop\": {\"cost\": [0, 2]}, \"use\": {\"cost\": [0, 2]}, \"attack\": {\"cost\": [3, 10], \"damage\": [3, 10]}}, \"map\": {\"layout\": {\"rooms_x\": 2, \"rooms_y\": 2}, \"room\": {\"width\": 20, \"height\": 20, \"num_agents\": 5, \"objects\": {\"agent\": 5, \"altar\": 1, \"converter\": 3, \"generator\": 10, \"wall\": 40}, \"border\": 1}}}, \"kinship\": {\"team_size\": [1, 5], \"team_reward\": [0.0, 1.0]}}", |
|
"agent_cfg": "{\"_target_\": \"agent.metta_agent.MettaAgent\", \"observation_encoders\": {\"grid_obs\": {\"feature_names\": [], \"normalize_features\": true, \"label_dim\": 4, \"output_dim\": 512, \"layers\": 4}, \"global_vars\": {\"feature_names\": [], \"normalize_features\": true, \"label_dim\": 4, \"output_dim\": 8, \"layers\": 2}, \"last_action\": {\"feature_names\": [\"last_action_id\", \"last_action_val\"], \"normalize_features\": true, \"label_dim\": 4, \"output_dim\": 8, \"layers\": 2}, \"last_reward\": {\"feature_names\": [\"last_reward\"], \"normalize_features\": true, \"label_dim\": 4, \"output_dim\": 8, \"layers\": 2}, \"kinship\": {\"feature_names\": [\"kinship\"], \"normalize_features\": true, \"label_dim\": 4, \"output_dim\": 8, \"layers\": 2}}, \"fc\": {\"layers\": 3, \"output_dim\": 512}, \"decoder\": {\"_target_\": \"agent.decoder.Decoder\"}, \"core\": {\"_target_\": \"sample_factory.model.core.ModelCoreRNN\", \"rnn_type\": \"gru\", \"rnn_num_layers\": 1, \"rnn_size\": 512}}" |
|
}, |
|
"git_hash": "18f9e57879d0d23b14080246408ab3432c8dbe3d", |
|
"git_repo_name": "https://github.com/daveey/metta.git", |
|
"wandb_unique_id": "p2.dr4_20240617_215834_292036" |
|
} |