File tree 1 file changed +15
-6
lines changed
1 file changed +15
-6
lines changed Original file line number Diff line number Diff line change 63
63
# %%bash
64
64
# pip3 install torchrl mujoco glfw
65
65
66
- import torchrl
67
- import torch
68
- import tqdm
69
- from typing import Tuple
70
-
71
66
# sphinx_gallery_start_ignore
72
67
import warnings
73
68
warnings .filterwarnings ("ignore" )
69
+ import multiprocessing
70
+ # TorchRL prefers spawn method, that restricts creation of ``~torchrl.envs.ParallelEnv`` inside
71
+ # `__main__` method call, but for the easy of reading the code switch to fork
72
+ # which is also a default spawn method in Google's Colaboratory
73
+ try :
74
+ multiprocessing .set_start_method ("fork" )
75
+ except RuntimeError :
76
+ assert multiprocessing .get_start_method () == "fork"
74
77
# sphinx_gallery_end_ignore
75
78
79
+
80
+ import torchrl
81
+ import torch
82
+ import tqdm
83
+ from typing import Tuple
84
+
76
85
###############################################################################
77
86
# We will execute the policy on CUDA if available
78
87
device = torch .device ("cuda:0" if torch .cuda .is_available () else "cpu" )
@@ -1219,6 +1228,6 @@ def ceil_div(x, y):
1219
1228
#
1220
1229
# To iterate further on this loss module we might consider:
1221
1230
#
1222
- # - Using `@dispatch` (see `[Feature] Distpatch IQL loss module <https://github.com/pytorch/rl/pull/1230>`_.
1231
+ # - Using `@dispatch` (see `[Feature] Distpatch IQL loss module <https://github.com/pytorch/rl/pull/1230>`_.)
1223
1232
# - Allowing flexible TensorDict keys.
1224
1233
#
You can’t perform that action at this time.
0 commit comments