Skip to content

Commit ad873ea

Browse files
committed
Generate Python docs from pytorch/pytorch@1c5a812
1 parent e0c0c98 commit ad873ea

File tree

10 files changed

+259
-259
lines changed

10 files changed

+259
-259
lines changed

docs/master/_images/RReLU.png

32 Bytes
Loading

docs/master/_modules/torch/_jit_internal.html

+8-8
Original file line numberDiff line numberDiff line change
@@ -867,7 +867,7 @@ <h1>Source code for torch._jit_internal</h1><div class="highlight"><pre>
867867
<span class="s2">&quot;if this method is not scripted, copy the python method onto the scripted model&quot;</span>
868868

869869

870-
<div class="viewcode-block" id="export"><a class="viewcode-back" href="../../jit.html#torch.jit.export">[docs]</a><span class="k">def</span> <span class="nf">export</span><span class="p">(</span><span class="n">fn</span><span class="p">):</span>
870+
<span class="k">def</span> <span class="nf">export</span><span class="p">(</span><span class="n">fn</span><span class="p">):</span>
871871
<span class="sd">&quot;&quot;&quot;</span>
872872
<span class="sd"> This decorator indicates that a method on an ``nn.Module`` is used as an entry point into a</span>
873873
<span class="sd"> :class:`ScriptModule` and should be compiled.</span>
@@ -910,10 +910,10 @@ <h1>Source code for torch._jit_internal</h1><div class="highlight"><pre>
910910
<span class="sd"> m = torch.jit.script(MyModule())</span>
911911
<span class="sd"> &quot;&quot;&quot;</span>
912912
<span class="n">fn</span><span class="o">.</span><span class="n">_torchscript_modifier</span> <span class="o">=</span> <span class="n">FunctionModifiers</span><span class="o">.</span><span class="n">EXPORT</span>
913-
<span class="k">return</span> <span class="n">fn</span></div>
913+
<span class="k">return</span> <span class="n">fn</span>
914914

915915

916-
<span class="k">def</span> <span class="nf">unused</span><span class="p">(</span><span class="n">fn</span><span class="p">):</span>
916+
<div class="viewcode-block" id="unused"><a class="viewcode-back" href="../../generated/torch.jit.unused.html#torch.jit.unused">[docs]</a><span class="k">def</span> <span class="nf">unused</span><span class="p">(</span><span class="n">fn</span><span class="p">):</span>
917917
<span class="sd">&quot;&quot;&quot;</span>
918918
<span class="sd"> This decorator indicates to the compiler that a function or method should</span>
919919
<span class="sd"> be ignored and replaced with the raising of an exception. This allows you</span>
@@ -960,7 +960,7 @@ <h1>Source code for torch._jit_internal</h1><div class="highlight"><pre>
960960
<span class="k">return</span> <span class="n">prop</span>
961961

962962
<span class="n">fn</span><span class="o">.</span><span class="n">_torchscript_modifier</span> <span class="o">=</span> <span class="n">FunctionModifiers</span><span class="o">.</span><span class="n">UNUSED</span>
963-
<span class="k">return</span> <span class="n">fn</span>
963+
<span class="k">return</span> <span class="n">fn</span></div>
964964

965965
<span class="c1"># No op context manager from python side</span>
966966
<span class="k">class</span> <span class="nc">_IgnoreContextManager</span><span class="p">(</span><span class="n">contextlib</span><span class="o">.</span><span class="n">AbstractContextManager</span><span class="p">):</span>
@@ -970,7 +970,7 @@ <h1>Source code for torch._jit_internal</h1><div class="highlight"><pre>
970970
<span class="k">def</span> <span class="fm">__exit__</span><span class="p">(</span><span class="bp">self</span><span class="p">,</span> <span class="n">exc_type</span><span class="p">:</span> <span class="n">Any</span><span class="p">,</span> <span class="n">exc_value</span><span class="p">:</span> <span class="n">Any</span><span class="p">,</span> <span class="n">traceback</span><span class="p">:</span> <span class="n">Any</span><span class="p">)</span> <span class="o">-&gt;</span> <span class="kc">None</span><span class="p">:</span>
971971
<span class="k">pass</span>
972972

973-
<span class="k">def</span> <span class="nf">ignore</span><span class="p">(</span><span class="n">drop</span><span class="o">=</span><span class="kc">False</span><span class="p">,</span> <span class="o">**</span><span class="n">kwargs</span><span class="p">):</span>
973+
<div class="viewcode-block" id="ignore"><a class="viewcode-back" href="../../generated/torch.jit.ignore.html#torch.jit.ignore">[docs]</a><span class="k">def</span> <span class="nf">ignore</span><span class="p">(</span><span class="n">drop</span><span class="o">=</span><span class="kc">False</span><span class="p">,</span> <span class="o">**</span><span class="n">kwargs</span><span class="p">):</span>
974974
<span class="sd">&quot;&quot;&quot;</span>
975975
<span class="sd"> This decorator indicates to the compiler that a function or method should</span>
976976
<span class="sd"> be ignored and left as a Python function. This allows you to leave code in</span>
@@ -1061,7 +1061,7 @@ <h1>Source code for torch._jit_internal</h1><div class="highlight"><pre>
10611061
<span class="k">else</span><span class="p">:</span>
10621062
<span class="n">fn</span><span class="o">.</span><span class="n">_torchscript_modifier</span> <span class="o">=</span> <span class="n">FunctionModifiers</span><span class="o">.</span><span class="n">IGNORE</span>
10631063
<span class="k">return</span> <span class="n">fn</span>
1064-
<span class="k">return</span> <span class="n">decorator</span>
1064+
<span class="k">return</span> <span class="n">decorator</span></div>
10651065

10661066

10671067
<span class="k">def</span> <span class="nf">_copy_to_script_wrapper</span><span class="p">(</span><span class="n">fn</span><span class="p">):</span>
@@ -1359,7 +1359,7 @@ <h1>Source code for torch._jit_internal</h1><div class="highlight"><pre>
13591359
<span class="nb">globals</span><span class="p">()[</span><span class="sa">f</span><span class="s2">&quot;BroadcastingList</span><span class="si">{</span><span class="n">i</span><span class="si">}</span><span class="s2">&quot;</span><span class="p">]</span> <span class="o">=</span> <span class="n">BroadcastingList1</span>
13601360

13611361

1362-
<div class="viewcode-block" id="is_scripting"><a class="viewcode-back" href="../../jit_language_reference.html#torch.jit.is_scripting">[docs]</a><span class="k">def</span> <span class="nf">is_scripting</span><span class="p">()</span> <span class="o">-&gt;</span> <span class="nb">bool</span><span class="p">:</span>
1362+
<span class="k">def</span> <span class="nf">is_scripting</span><span class="p">()</span> <span class="o">-&gt;</span> <span class="nb">bool</span><span class="p">:</span>
13631363
<span class="sa">r</span><span class="sd">&quot;&quot;&quot;</span>
13641364
<span class="sd"> Function that returns True when in compilation and False otherwise. This</span>
13651365
<span class="sd"> is useful especially with the @unused decorator to leave code in your</span>
@@ -1378,7 +1378,7 @@ <h1>Source code for torch._jit_internal</h1><div class="highlight"><pre>
13781378
<span class="sd"> else:</span>
13791379
<span class="sd"> return unsupported_linear_op(x)</span>
13801380
<span class="sd"> &quot;&quot;&quot;</span>
1381-
<span class="k">return</span> <span class="kc">False</span></div>
1381+
<span class="k">return</span> <span class="kc">False</span>
13821382

13831383

13841384
<span class="c1"># Retrieves a fully-qualified name (module hierarchy + classname) for a given obj.</span>

docs/master/_modules/torch/_tensor.html

+6-6
Original file line numberDiff line numberDiff line change
@@ -740,7 +740,7 @@ <h1>Source code for torch._tensor</h1><div class="highlight"><pre>
740740
<span class="c1"># All strings are unicode in Python 3.</span>
741741
<span class="k">return</span> <span class="n">torch</span><span class="o">.</span><span class="n">_tensor_str</span><span class="o">.</span><span class="n">_str</span><span class="p">(</span><span class="bp">self</span><span class="p">)</span>
742742

743-
<span class="k">def</span> <span class="nf">backward</span><span class="p">(</span><span class="bp">self</span><span class="p">,</span> <span class="n">gradient</span><span class="o">=</span><span class="kc">None</span><span class="p">,</span> <span class="n">retain_graph</span><span class="o">=</span><span class="kc">None</span><span class="p">,</span> <span class="n">create_graph</span><span class="o">=</span><span class="kc">False</span><span class="p">,</span> <span class="n">inputs</span><span class="o">=</span><span class="kc">None</span><span class="p">):</span>
743+
<div class="viewcode-block" id="Tensor.backward"><a class="viewcode-back" href="../../generated/torch.Tensor.backward.html#torch.Tensor.backward">[docs]</a> <span class="k">def</span> <span class="nf">backward</span><span class="p">(</span><span class="bp">self</span><span class="p">,</span> <span class="n">gradient</span><span class="o">=</span><span class="kc">None</span><span class="p">,</span> <span class="n">retain_graph</span><span class="o">=</span><span class="kc">None</span><span class="p">,</span> <span class="n">create_graph</span><span class="o">=</span><span class="kc">False</span><span class="p">,</span> <span class="n">inputs</span><span class="o">=</span><span class="kc">None</span><span class="p">):</span>
744744
<span class="sa">r</span><span class="sd">&quot;&quot;&quot;Computes the gradient of current tensor w.r.t. graph leaves.</span>
745745

746746
<span class="sd"> The graph is differentiated using the chain rule. If the tensor is</span>
@@ -796,7 +796,7 @@ <h1>Source code for torch._tensor</h1><div class="highlight"><pre>
796796
<span class="n">retain_graph</span><span class="o">=</span><span class="n">retain_graph</span><span class="p">,</span>
797797
<span class="n">create_graph</span><span class="o">=</span><span class="n">create_graph</span><span class="p">,</span>
798798
<span class="n">inputs</span><span class="o">=</span><span class="n">inputs</span><span class="p">)</span>
799-
<span class="n">torch</span><span class="o">.</span><span class="n">autograd</span><span class="o">.</span><span class="n">backward</span><span class="p">(</span><span class="bp">self</span><span class="p">,</span> <span class="n">gradient</span><span class="p">,</span> <span class="n">retain_graph</span><span class="p">,</span> <span class="n">create_graph</span><span class="p">,</span> <span class="n">inputs</span><span class="o">=</span><span class="n">inputs</span><span class="p">)</span>
799+
<span class="n">torch</span><span class="o">.</span><span class="n">autograd</span><span class="o">.</span><span class="n">backward</span><span class="p">(</span><span class="bp">self</span><span class="p">,</span> <span class="n">gradient</span><span class="p">,</span> <span class="n">retain_graph</span><span class="p">,</span> <span class="n">create_graph</span><span class="p">,</span> <span class="n">inputs</span><span class="o">=</span><span class="n">inputs</span><span class="p">)</span></div>
800800

801801
<span class="k">def</span> <span class="nf">register_hook</span><span class="p">(</span><span class="bp">self</span><span class="p">,</span> <span class="n">hook</span><span class="p">):</span>
802802
<span class="sa">r</span><span class="sd">&quot;&quot;&quot;Registers a backward hook.</span>
@@ -898,14 +898,14 @@ <h1>Source code for torch._tensor</h1><div class="highlight"><pre>
898898
<span class="s2"> have forward mode AD gradients.</span>
899899
<span class="s2"> &quot;&quot;&quot;</span><span class="p">)</span>
900900

901-
<div class="viewcode-block" id="Tensor.is_shared"><a class="viewcode-back" href="../../generated/torch.Tensor.is_shared.html#torch.Tensor.is_shared">[docs]</a> <span class="k">def</span> <span class="nf">is_shared</span><span class="p">(</span><span class="bp">self</span><span class="p">):</span>
901+
<span class="k">def</span> <span class="nf">is_shared</span><span class="p">(</span><span class="bp">self</span><span class="p">):</span>
902902
<span class="sa">r</span><span class="sd">&quot;&quot;&quot;Checks if tensor is in shared memory.</span>
903903

904904
<span class="sd"> This is always ``True`` for CUDA tensors.</span>
905905
<span class="sd"> &quot;&quot;&quot;</span>
906906
<span class="k">if</span> <span class="n">has_torch_function_unary</span><span class="p">(</span><span class="bp">self</span><span class="p">):</span>
907907
<span class="k">return</span> <span class="n">handle_torch_function</span><span class="p">(</span><span class="n">Tensor</span><span class="o">.</span><span class="n">is_shared</span><span class="p">,</span> <span class="p">(</span><span class="bp">self</span><span class="p">,),</span> <span class="bp">self</span><span class="p">)</span>
908-
<span class="k">return</span> <span class="bp">self</span><span class="o">.</span><span class="n">storage</span><span class="p">()</span><span class="o">.</span><span class="n">is_shared</span><span class="p">()</span></div>
908+
<span class="k">return</span> <span class="bp">self</span><span class="o">.</span><span class="n">storage</span><span class="p">()</span><span class="o">.</span><span class="n">is_shared</span><span class="p">()</span>
909909

910910
<span class="k">def</span> <span class="nf">share_memory_</span><span class="p">(</span><span class="bp">self</span><span class="p">):</span>
911911
<span class="sa">r</span><span class="sd">&quot;&quot;&quot;Moves the underlying storage to shared memory.</span>
@@ -964,7 +964,7 @@ <h1>Source code for torch._tensor</h1><div class="highlight"><pre>
964964
<span class="k">return</span> <span class="n">torch</span><span class="o">.</span><span class="n">stft</span><span class="p">(</span><span class="bp">self</span><span class="p">,</span> <span class="n">n_fft</span><span class="p">,</span> <span class="n">hop_length</span><span class="p">,</span> <span class="n">win_length</span><span class="p">,</span> <span class="n">window</span><span class="p">,</span> <span class="n">center</span><span class="p">,</span>
965965
<span class="n">pad_mode</span><span class="p">,</span> <span class="n">normalized</span><span class="p">,</span> <span class="n">onesided</span><span class="p">,</span> <span class="n">return_complex</span><span class="o">=</span><span class="n">return_complex</span><span class="p">)</span>
966966

967-
<div class="viewcode-block" id="Tensor.istft"><a class="viewcode-back" href="../../generated/torch.Tensor.istft.html#torch.Tensor.istft">[docs]</a> <span class="k">def</span> <span class="nf">istft</span><span class="p">(</span><span class="bp">self</span><span class="p">,</span> <span class="n">n_fft</span><span class="p">:</span> <span class="nb">int</span><span class="p">,</span> <span class="n">hop_length</span><span class="p">:</span> <span class="n">Optional</span><span class="p">[</span><span class="nb">int</span><span class="p">]</span> <span class="o">=</span> <span class="kc">None</span><span class="p">,</span>
967+
<span class="k">def</span> <span class="nf">istft</span><span class="p">(</span><span class="bp">self</span><span class="p">,</span> <span class="n">n_fft</span><span class="p">:</span> <span class="nb">int</span><span class="p">,</span> <span class="n">hop_length</span><span class="p">:</span> <span class="n">Optional</span><span class="p">[</span><span class="nb">int</span><span class="p">]</span> <span class="o">=</span> <span class="kc">None</span><span class="p">,</span>
968968
<span class="n">win_length</span><span class="p">:</span> <span class="n">Optional</span><span class="p">[</span><span class="nb">int</span><span class="p">]</span> <span class="o">=</span> <span class="kc">None</span><span class="p">,</span> <span class="n">window</span><span class="p">:</span> <span class="s1">&#39;Optional[Tensor]&#39;</span> <span class="o">=</span> <span class="kc">None</span><span class="p">,</span>
969969
<span class="n">center</span><span class="p">:</span> <span class="nb">bool</span> <span class="o">=</span> <span class="kc">True</span><span class="p">,</span> <span class="n">normalized</span><span class="p">:</span> <span class="nb">bool</span> <span class="o">=</span> <span class="kc">False</span><span class="p">,</span>
970970
<span class="n">onesided</span><span class="p">:</span> <span class="n">Optional</span><span class="p">[</span><span class="nb">bool</span><span class="p">]</span> <span class="o">=</span> <span class="kc">None</span><span class="p">,</span> <span class="n">length</span><span class="p">:</span> <span class="n">Optional</span><span class="p">[</span><span class="nb">int</span><span class="p">]</span> <span class="o">=</span> <span class="kc">None</span><span class="p">,</span>
@@ -977,7 +977,7 @@ <h1>Source code for torch._tensor</h1><div class="highlight"><pre>
977977
<span class="n">return_complex</span><span class="o">=</span><span class="n">return_complex</span>
978978
<span class="p">)</span>
979979
<span class="k">return</span> <span class="n">torch</span><span class="o">.</span><span class="n">istft</span><span class="p">(</span><span class="bp">self</span><span class="p">,</span> <span class="n">n_fft</span><span class="p">,</span> <span class="n">hop_length</span><span class="p">,</span> <span class="n">win_length</span><span class="p">,</span> <span class="n">window</span><span class="p">,</span> <span class="n">center</span><span class="p">,</span>
980-
<span class="n">normalized</span><span class="p">,</span> <span class="n">onesided</span><span class="p">,</span> <span class="n">length</span><span class="p">,</span> <span class="n">return_complex</span><span class="o">=</span><span class="n">return_complex</span><span class="p">)</span></div>
980+
<span class="n">normalized</span><span class="p">,</span> <span class="n">onesided</span><span class="p">,</span> <span class="n">length</span><span class="p">,</span> <span class="n">return_complex</span><span class="o">=</span><span class="n">return_complex</span><span class="p">)</span>
981981

982982
<span class="k">def</span> <span class="nf">resize</span><span class="p">(</span><span class="bp">self</span><span class="p">,</span> <span class="o">*</span><span class="n">sizes</span><span class="p">):</span>
983983
<span class="k">if</span> <span class="n">has_torch_function_unary</span><span class="p">(</span><span class="bp">self</span><span class="p">):</span>

0 commit comments

Comments
 (0)