Skip to content

Commit 9d24377

Browse files
committed
Merge branch 'remove-conda' of github.com:pytorch/tutorials into remove-conda
2 parents 3f98763 + 19319ae commit 9d24377

File tree

10 files changed

+383
-213
lines changed

10 files changed

+383
-213
lines changed

.ci/docker/requirements.txt

+3-3
Original file line numberDiff line numberDiff line change
@@ -14,7 +14,7 @@ tqdm==4.66.1
1414
numpy==1.24.4
1515
matplotlib
1616
librosa
17-
torch==2.6
17+
torch==2.7
1818
torchvision
1919
torchdata
2020
networkx
@@ -67,7 +67,7 @@ iopath
6767
pygame==2.6.0
6868
pycocotools
6969
semilearn==0.3.2
70-
torchao==0.5.0
70+
torchao==0.10.0
7171
segment_anything==1.0
7272
torchrec==1.1.0; platform_system == "Linux"
73-
fbgemm-gpu==1.1.0; platform_system == "Linux"
73+
fbgemm-gpu==1.2.0; platform_system == "Linux"

.jenkins/build.sh

+2-5
Original file line numberDiff line numberDiff line change
@@ -22,13 +22,10 @@ sudo apt-get install -y pandoc
2222
#Install PyTorch Nightly for test.
2323
# Nightly - pip install --pre torch torchvision torchaudio -f https://download.pytorch.org/whl/nightly/cu102/torch_nightly.html
2424
# Install 2.5 to merge all 2.4 PRs - uncomment to install nightly binaries (update the version as needed).
25-
# sudo pip uninstall -y torch torchvision torchaudio torchtext torchdata
26-
# sudo pip3 install torch==2.6.0 torchvision --no-cache-dir --index-url https://download.pytorch.org/whl/test/cu124
2725
# sudo pip uninstall -y fbgemm-gpu torchrec
26+
# sudo pip uninstall -y torch torchvision torchaudio torchtext torchdata torchrl tensordict
2827
# sudo pip3 install fbgemm-gpu==1.1.0 torchrec==1.0.0 --no-cache-dir --index-url https://download.pytorch.org/whl/test/cu124
29-
sudo pip uninstall -y torch torchvision torchaudio torchtext torchdata torchrl tensordict
30-
pip3 install torch==2.7.0 torchvision torchaudio --no-cache-dir --index-url https://download.pytorch.org/whl/test/cu126
31-
#sudo pip uninstall -y fbgemm-gpu
28+
# pip3 install torch==2.7.0 torchvision torchaudio --no-cache-dir --index-url https://download.pytorch.org/whl/test/cu126
3229
# Install two language tokenizers for Translation with TorchText tutorial
3330
python -m spacy download en_core_web_sm
3431
python -m spacy download de_core_news_sm

.jenkins/validate_tutorials_built.py

+3-8
Original file line numberDiff line numberDiff line change
@@ -31,6 +31,7 @@
3131
"prototype_source/vmap_recipe",
3232
"prototype_source/torchscript_freezing",
3333
"prototype_source/nestedtensor",
34+
"prototype_source/gpu_direct_storage", # requires specific filesystem + GPUDirect Storage to be set up
3435
"recipes_source/recipes/saving_and_loading_models_for_inference",
3536
"recipes_source/recipes/saving_multiple_models_in_one_file",
3637
"recipes_source/recipes/tensorboard_with_pytorch",
@@ -51,14 +52,8 @@
5152
"intermediate_source/text_to_speech_with_torchaudio",
5253
"intermediate_source/tensorboard_profiler_tutorial", # reenable after 2.0 release.
5354
"advanced_source/semi_structured_sparse", # reenable after 3303 is fixed.
54-
"intermediate_source/mario_rl_tutorial", # reenable after 3302 is fixed
55-
"intermediate_source/reinforcement_ppo", # reenable after 3302 is fixed
56-
"intermediate_source/pinmem_nonblock", # reenable after 3302 is fixed
57-
"intermediate_source/dqn_with_rnn_tutorial", # reenable after 3302 is fixed
58-
"advanced_source/pendulum", # reenable after 3302 is fixed
59-
"advanced_source/coding_ddpg", # reenable after 3302 is fixed
60-
"intermediate_source/torchrec_intro_tutorial", # reenable after 3302 is fixed
61-
"recipes_source/recipes/reasoning_about_shapes" # reenable after 3326 is fixed
55+
"intermediate_source/torchrec_intro_tutorial", # reenable after 3302 is fixe
56+
"intermediate_source/memory_format_tutorial", # causes other tutorials like torch_logs fail. "state" issue, reseting dynamo didn't help
6257
]
6358

6459
def tutorial_source_dirs() -> List[Path]:

advanced_source/sharding.rst

+5-5
Original file line numberDiff line numberDiff line change
@@ -14,9 +14,10 @@ Requirements: - python >= 3.7
1414
We highly recommend CUDA when using torchRec. If using CUDA: - cuda >=
1515
11.0
1616

17+
.. Should these be updated?
1718
.. code:: python
1819
19-
# install conda to make installying pytorch with cudatoolkit 11.3 easier.
20+
# install conda to make installying pytorch with cudatoolkit 11.3 easier.
2021
!sudo rm Miniconda3-py37_4.9.2-Linux-x86_64.sh Miniconda3-py37_4.9.2-Linux-x86_64.sh.*
2122
!sudo wget https://repo.anaconda.com/miniconda/Miniconda3-py37_4.9.2-Linux-x86_64.sh
2223
!sudo chmod +x Miniconda3-py37_4.9.2-Linux-x86_64.sh
@@ -209,7 +210,7 @@ embedding table placement using planner and generate sharded model using
209210
)
210211
sharders = [cast(ModuleSharder[torch.nn.Module], EmbeddingBagCollectionSharder())]
211212
plan: ShardingPlan = planner.collective_plan(module, sharders, pg)
212-
213+
213214
sharded_model = DistributedModelParallel(
214215
module,
215216
env=ShardingEnv.from_process_group(pg),
@@ -230,7 +231,7 @@ ranks.
230231
.. code:: python
231232
232233
import multiprocess
233-
234+
234235
def spmd_sharing_simulation(
235236
sharding_type: ShardingType = ShardingType.TABLE_WISE,
236237
world_size = 2,
@@ -250,7 +251,7 @@ ranks.
250251
)
251252
p.start()
252253
processes.append(p)
253-
254+
254255
for p in processes:
255256
p.join()
256257
assert 0 == p.exitcode
@@ -329,4 +330,3 @@ With data parallel, we will repeat the tables for all devices.
329330
330331
rank:0,sharding plan: {'': {'large_table_0': ParameterSharding(sharding_type='data_parallel', compute_kernel='batched_dense', ranks=[0, 1], sharding_spec=None), 'large_table_1': ParameterSharding(sharding_type='data_parallel', compute_kernel='batched_dense', ranks=[0, 1], sharding_spec=None), 'small_table_0': ParameterSharding(sharding_type='data_parallel', compute_kernel='batched_dense', ranks=[0, 1], sharding_spec=None), 'small_table_1': ParameterSharding(sharding_type='data_parallel', compute_kernel='batched_dense', ranks=[0, 1], sharding_spec=None)}}
331332
rank:1,sharding plan: {'': {'large_table_0': ParameterSharding(sharding_type='data_parallel', compute_kernel='batched_dense', ranks=[0, 1], sharding_spec=None), 'large_table_1': ParameterSharding(sharding_type='data_parallel', compute_kernel='batched_dense', ranks=[0, 1], sharding_spec=None), 'small_table_0': ParameterSharding(sharding_type='data_parallel', compute_kernel='batched_dense', ranks=[0, 1], sharding_spec=None), 'small_table_1': ParameterSharding(sharding_type='data_parallel', compute_kernel='batched_dense', ranks=[0, 1], sharding_spec=None)}}
332-

0 commit comments

Comments
 (0)