ledmands
commited on
Commit
•
f2b3a9d
1
Parent(s):
1671397
Updated README. Deleted scripts under development from main branch. They are located in development branch.
Browse files- CustomVideoRecorder/CustomVideoRecorder.py +0 -0
- CustomVideoRecorder/__init.py__ +0 -0
- README.md +2 -0
- plot_evaluations.py +0 -60
- record_video.py +0 -54
CustomVideoRecorder/CustomVideoRecorder.py
DELETED
File without changes
|
CustomVideoRecorder/__init.py__
DELETED
File without changes
|
README.md
CHANGED
@@ -29,10 +29,12 @@ This will pull configuration information from the specified agent and save it in
|
|
29 |
This will record a video of a specified agent being evaluated.
|
30 |
Does not save any evaluation information.
|
31 |
Currently in major development.
|
|
|
32 |
### plot_evaluations.py
|
33 |
This will plot the evaluation data that was gathered during the training run of the specified agent using MatPlotLib.
|
34 |
Charts can be saved to a directory of the user's choosing.
|
35 |
Currently in major development.
|
|
|
36 |
### plot_improvement.py
|
37 |
This plots the score of an agent averaged over all evaluation episodes during a training run. Also plots the
|
38 |
standard deviation. Removes the lowest and highest episode scores from each evaluation.
|
|
|
29 |
This will record a video of a specified agent being evaluated.
|
30 |
Does not save any evaluation information.
|
31 |
Currently in major development.
|
32 |
+
Currently located in development branch.
|
33 |
### plot_evaluations.py
|
34 |
This will plot the evaluation data that was gathered during the training run of the specified agent using MatPlotLib.
|
35 |
Charts can be saved to a directory of the user's choosing.
|
36 |
Currently in major development.
|
37 |
+
Currently located in development branch.
|
38 |
### plot_improvement.py
|
39 |
This plots the score of an agent averaged over all evaluation episodes during a training run. Also plots the
|
40 |
standard deviation. Removes the lowest and highest episode scores from each evaluation.
|
plot_evaluations.py
DELETED
@@ -1,60 +0,0 @@
|
|
1 |
-
from numpy import load
|
2 |
-
import numpy as np
|
3 |
-
import matplotlib.pyplot as plt
|
4 |
-
# import matplotlib.axes
|
5 |
-
|
6 |
-
filepath = "agents/dqn_v2-7/evaluations.npz"
|
7 |
-
|
8 |
-
data = load(filepath)
|
9 |
-
lst = data.files # data.files lists the keys that are available for data
|
10 |
-
|
11 |
-
# print('ep_lengths: \n', data['ep_lengths'])
|
12 |
-
|
13 |
-
# results and ep_lengths are 2d arrays, because each evaluation is 5 episodes long.
|
14 |
-
# I want to plot the average of each evaluation.
|
15 |
-
print(data["results"])
|
16 |
-
print()
|
17 |
-
print(np.delete(np.sort(data["results"]), 0, 1))
|
18 |
-
# for i in range(len(data["results"])):
|
19 |
-
# print(np.average(data["results"][i]))
|
20 |
-
|
21 |
-
'''
|
22 |
-
# for each item in results, loop through the array and save the average
|
23 |
-
avg_ep_result_arr = []
|
24 |
-
for eval in data['results']:
|
25 |
-
result_sum = 0
|
26 |
-
|
27 |
-
for result in eval:
|
28 |
-
result_sum = result_sum + result
|
29 |
-
|
30 |
-
avg_ep_result = result_sum / len(eval)
|
31 |
-
avg_ep_result_arr.append(avg_ep_result)
|
32 |
-
|
33 |
-
avg_ep_len_arr = []
|
34 |
-
for eval in data['ep_lengths']:
|
35 |
-
max_len = 0
|
36 |
-
y_limit = 0
|
37 |
-
ep_len_sum = 0
|
38 |
-
|
39 |
-
for ep_length in eval:
|
40 |
-
ep_len_sum = ep_len_sum + ep_length
|
41 |
-
if ep_length > max_len:
|
42 |
-
max_len = ep_length
|
43 |
-
if ep_length > y_limit and y_limit < max_len:
|
44 |
-
y_limit = ep_length
|
45 |
-
|
46 |
-
avg_ep_len = ep_len_sum / len(eval)
|
47 |
-
avg_ep_len_arr.append(avg_ep_len)
|
48 |
-
y_limit = y_limit * 1.9
|
49 |
-
|
50 |
-
|
51 |
-
x = plt.plot(data['timesteps'], avg_ep_result_arr)
|
52 |
-
# plt.bar(data['timesteps'], avg_ep_len_arr, width=10000)
|
53 |
-
y = plt.plot(data['timesteps'], avg_ep_len_arr)
|
54 |
-
plt.ylim(top=y_limit)
|
55 |
-
# plt.ylabel("Avg ep score")
|
56 |
-
# lineObjects = plt.plot(x, y)
|
57 |
-
plt.legend(["avg ep result", "avg ep length"])
|
58 |
-
plt.title("result and length over steps\nfilepath: " + filepath)
|
59 |
-
plt.show()
|
60 |
-
'''
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
record_video.py
DELETED
@@ -1,54 +0,0 @@
|
|
1 |
-
import gymnasium as gym
|
2 |
-
from stable_baselines3 import DQN
|
3 |
-
# from stable_baselines3.common.monitor import Monitor
|
4 |
-
from stable_baselines3.common.vec_env import VecVideoRecorder, DummyVecEnv, VecEnv
|
5 |
-
|
6 |
-
model_name = "agents/dqn_v2-8/best_model" # path to model, should be an argument
|
7 |
-
env_id = "ALE/Pacman-v5"
|
8 |
-
video_folder = "videos/"
|
9 |
-
video_length = 10000 #steps by hard coding this, I can almost ensure only one episode is recorded...
|
10 |
-
|
11 |
-
vec_env = DummyVecEnv([lambda: gym.make(env_id, render_mode="rgb_array")])
|
12 |
-
model = DQN.load(model_name)
|
13 |
-
# output: <stable_baselines3.common.vec_env.dummy_vec_env.DummyVecEnv object at 0x0000029974DC6550>
|
14 |
-
|
15 |
-
# vec_env = gym.make(env_id, render_mode="rgb_array")
|
16 |
-
# output <OrderEnforcing<PassiveEnvChecker<AtariEnv<ALE/Pacman-v5>>>>
|
17 |
-
|
18 |
-
# vec_env = Monitor(gym.make(env_id, render_mode="rgb_array"))
|
19 |
-
|
20 |
-
print("\n\n\n")
|
21 |
-
print(vec_env)
|
22 |
-
print("\n\n\n")
|
23 |
-
|
24 |
-
obs = vec_env.reset()
|
25 |
-
|
26 |
-
|
27 |
-
# Record the video starting at the first step
|
28 |
-
vec_env = VecVideoRecorder(vec_env,
|
29 |
-
video_folder,
|
30 |
-
record_video_trigger=lambda x: x == 0,
|
31 |
-
video_length=video_length,
|
32 |
-
name_prefix="one-episode_v2-8_bestmodel"
|
33 |
-
)
|
34 |
-
# Once I make the environment, now I need to walk through it...???
|
35 |
-
# I want to act according to the policy that has been trained
|
36 |
-
vec_env.reset()
|
37 |
-
print(vec_env)
|
38 |
-
# for _ in range(video_length + 1):
|
39 |
-
# action, states = model.predict(obs)
|
40 |
-
# obs, _, _, _ = vec_env.step(action)
|
41 |
-
|
42 |
-
# Instead of using the specified steps in a for loop
|
43 |
-
# use a while loop to check if the episode has terminated
|
44 |
-
# Stop recording when the episode ends
|
45 |
-
end = True
|
46 |
-
while end == True:
|
47 |
-
action, states = model.predict(obs)
|
48 |
-
obs, _, done, _ = vec_env.step(action)
|
49 |
-
if done == True:
|
50 |
-
print("exiting loop")
|
51 |
-
end = False
|
52 |
-
# # Save the video
|
53 |
-
|
54 |
-
vec_env.close()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|