Skip to content

Commit c4f6e78

Browse files
committed
Update urls for pytorch documentation
1 parent 6bf856e commit c4f6e78

File tree

14 files changed

+41
-189
lines changed

14 files changed

+41
-189
lines changed

docs/articles/2023-06-20-introducing-onnx-script/index.md

Lines changed: 6 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -326,12 +326,12 @@ Finally, a _huge thank you_ to the wonderful engineering team at Microsoft that
326326
[onnx-expand-operator]: https://onnx.ai/onnx/operators/onnx__Expand.html#expand-13
327327
[onnxscript-pypi]: https://pypi.org/project/onnxscript
328328
[onnxscript-github]: https://github.com/microsoft/onnxscript
329-
[torch-onnx]: https://pytorch.org/docs/stable/onnx.html
330-
[torch-ir]: https://pytorch.org/docs/stable/ir.html
331-
[torch-dynamo]: https://pytorch.org/docs/stable/dynamo/index.html
332-
[torch-onnx-dynamoexport]: https://pytorch.org/docs/main/onnx.html#preview-torch-onnx-torchdynamo-exporter
333-
[torch-onnx-customops]: https://pytorch.org/docs/stable/onnx.html#onnx-script-functions
334-
[torch-chunk]: https://pytorch.org/docs/stable/generated/torch.chunk.html
329+
[torch-onnx]: https://docs.pytorch.org/stable/onnx.html
330+
[torch-ir]: https://docs.pytorch.org/stable/ir.html
331+
[torch-dynamo]: https://docs.pytorch.org/stable/dynamo/index.html
332+
[torch-onnx-dynamoexport]: https://docs.pytorch.org/main/onnx.html#preview-torch-onnx-torchdynamo-exporter
333+
[torch-onnx-customops]: https://docs.pytorch.org/stable/onnx.html#onnx-script-functions
334+
[torch-chunk]: https://docs.pytorch.org/stable/generated/torch.chunk.html
335335
[netron]: https://netron.app
336336
[numpy]: https://numpy.org
337337
[pdb]: https://docs.python.org/3/library/pdb.html

docs/conf.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -92,7 +92,7 @@
9292
"onnx_ir": ("https://onnx.ai/ir-py/", None),
9393
"onnxruntime": ("https://onnxruntime.ai/docs/api/python/", None),
9494
"scipy": ("https://docs.scipy.org/doc/scipy/", None),
95-
"torch": ("https://pytorch.org/docs/main/", None),
95+
"torch": ("https://docs.pytorch.org/main/", None),
9696
}
9797

9898
# -- Options for Sphinx Gallery ----------------------------------------------

onnxscript/function_libs/torch_lib/ops/core.py

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -243,7 +243,7 @@ def aten_addmm(
243243
alpha = float(alpha)
244244
beta = float(beta)
245245

246-
# addmm only accepts 2d tensors: https://pytorch.org/docs/stable/generated/torch.addmm.html
246+
# addmm only accepts 2d tensors: https://docs.pytorch.org/stable/generated/torch.addmm.html
247247
return op.Gemm(mat1, mat2, self, alpha=alpha, beta=beta)
248248

249249

@@ -3710,7 +3710,7 @@ def aten_frac(self: TFloat) -> TFloat:
37103710
Computes the fractional portion of each element in input.
37113711
"""
37123712

3713-
# https://pytorch.org/docs/stable/generated/torch.frac.html
3713+
# https://docs.pytorch.org/stable/generated/torch.frac.html
37143714
return op.Sub(self, op.Mul(op.Floor(op.Abs(self)), op.Sign(self)))
37153715

37163716

@@ -6311,7 +6311,7 @@ def aten_native_layer_norm(
63116311
) -> Tuple[TReal, TReal, TReal]:
63126312
"""native_layer_norm(Tensor input, SymInt[] normalized_shape, Tensor? weight, Tensor? bias, float eps) -> (Tensor, Tensor, Tensor)"""
63136313

6314-
# https://pytorch.org/docs/stable/generated/torch.nn.LayerNorm.html#torch.nn.LayerNorm
6314+
# https://docs.pytorch.org/stable/generated/torch.nn.LayerNorm.html#torch.nn.LayerNorm
63156315
# The mean and standard-deviation are calculated over the last D dimensions,
63166316
# where D is the dimension of normalized_shape. For example, if normalized_shape is
63176317
# (3, 5) (a 2-dimensional shape), the mean and standard-deviation are computed
@@ -7901,7 +7901,7 @@ def aten_slice_scatter(
79017901
# And, 'end' also must be specified, and end-start must be equal to the size of 'src'
79027902
# Assert(end-start == shape(src) > 0)
79037903
# Try torch sample to get more information:
7904-
# https://pytorch.org/docs/master/generated/torch.slice_scatter.html?highlight=slice_scatter#torch.slice_scatter
7904+
# https://docs.pytorch.org/master/generated/torch.slice_scatter.html?highlight=slice_scatter#torch.slice_scatter
79057905
# Take (torch.zeros(8, 8), torch.ones(2, 8), 0, 6, 64, 1) as example:
79067906
# Step 1: get 1D tensor from 0 to dim_size-1, then Slice it using start, end and step.
79077907
# We cannot use Range(start, end, step) directly as start or end may out of range.

onnxscript/function_libs/torch_lib/ops/nn.py

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -1754,7 +1754,7 @@ def aten_scaled_dot_product_attention(
17541754
) -> TFloat:
17551755
"""scaled_dot_product_attention(Tensor query, Tensor key, Tensor value, Tensor? attn_mask=None, float dropout_p=0.0, bool is_causal=False, *, float? scale=None, bool enable_gqa=False) -> Tensor
17561756
1757-
Reference: https://pytorch.org/docs/stable/generated/torch.nn.functional.scaled_dot_product_attention.html
1757+
Reference: https://docs.pytorch.org/stable/generated/torch.nn.functional.scaled_dot_product_attention.html
17581758
17591759
Equivalent to the PyTorch code::
17601760
scale_factor = 1 / math.sqrt(Q.size(-1)) if scale is None else scale
@@ -1776,7 +1776,7 @@ def aten_scaled_dot_product_attention(
17761776
"conversion of scaled_dot_product_attention not implemented if enable_gqa is True"
17771777
)
17781778

1779-
# Reference: https://pytorch.org/docs/stable/generated/torch.nn.functional.scaled_dot_product_attention.html
1779+
# Reference: https://docs.pytorch.org/stable/generated/torch.nn.functional.scaled_dot_product_attention.html
17801780
if scale is None:
17811781
scale = _attention_scale(query)
17821782
scale = op.CastLike(scale, query)
@@ -1825,7 +1825,7 @@ def aten__scaled_dot_product_flash_attention(
18251825
"""_scaled_dot_product_flash_attention(Tensor query, Tensor key, Tensor value, float dropout_p=0.0, bool is_causal=False, bool return_debug_mask=False, *, float? scale=None) -> (Tensor output, Tensor logsumexp, Tensor cum_seq_q, Tensor cum_seq_k, int max_q, int max_k, Tensor philox_seed, Tensor philox_offset, Tensor debug_attn_mask)
18261826
18271827
One of the implementations of scaled_dot_product_attention.
1828-
Reference: https://pytorch.org/docs/stable/generated/torch.nn.functional.scaled_dot_product_attention.html
1828+
Reference: https://docs.pytorch.org/stable/generated/torch.nn.functional.scaled_dot_product_attention.html
18291829
18301830
NOTE: Currently, there are three implementations of nn.scaled_dot_product_attention in PyTorch due to optimization.
18311831
However, it's the same implementation from ONNX perspective.
@@ -1964,7 +1964,7 @@ def aten_scaled_dot_product_attention_bool_mask(
19641964
) -> TFloat:
19651965
"""scaled_dot_product_attention(Tensor query, Tensor key, Tensor value, Tensor? attn_mask=None, float dropout_p=0.0, bool is_causal=False, *, float? scale=None, bool enable_gqa=False) -> Tensor
19661966
1967-
Reference: https://pytorch.org/docs/stable/generated/torch.nn.functional.scaled_dot_product_attention.html
1967+
Reference: https://docs.pytorch.org/stable/generated/torch.nn.functional.scaled_dot_product_attention.html
19681968
19691969
Equivalent to the PyTorch code::
19701970
scale_factor = 1 / math.sqrt(Q.size(-1)) if scale is None else scale

onnxscript/function_libs/torch_lib/ops/special.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -343,7 +343,7 @@ def aten_special_sinc(self: TFloat) -> TFloat:
343343
"""special_sinc(Tensor self) -> Tensor"""
344344

345345
# This computes the normalized sinc, where the input is multiplied by pi.
346-
# https://pytorch.org/docs/stable/special.html#torch.special.sinc
346+
# https://docs.pytorch.org/stable/special.html#torch.special.sinc
347347
pi_self = self * _MATH_PI
348348

349349
return op.Where(self == 0.0, op.CastLike(1, self), op.Sin(pi_self) / pi_self)
@@ -365,7 +365,7 @@ def aten_special_xlog1py(self: TensorType, other: TensorType) -> TensorType:
365365
def aten_special_xlogy(self: TFloat, other: TFloat) -> TFloat:
366366
"""special_xlogy(Tensor self, Tensor other) -> Tensor"""
367367

368-
# https://pytorch.org/docs/stable/special.html#torch.special.xlogy
368+
# https://docs.pytorch.org/stable/special.html#torch.special.xlogy
369369
# out := {
370370
# NaN if other == NaN
371371
# 0 if self == 0

onnxscript/function_libs/torch_lib/tensor_typing.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -26,7 +26,7 @@
2626
)
2727

2828
# NOTE: We do not care about unsigned types beyond UINT8 because PyTorch does not us them.
29-
# More detail can be found: https://pytorch.org/docs/stable/tensors.html
29+
# More detail can be found: https://docs.pytorch.org/stable/tensors.html
3030

3131
_TensorType = Union[
3232
BFLOAT16,

onnxscript/onnx_opset/_impl/opset12.py

Lines changed: 3 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -7,8 +7,7 @@
77
# --------------------------------------------------------------------------
88
# pylint: disable=W0221,W0222,R0901,W0237
99
# mypy: disable-error-code=override
10-
# ruff: noqa: N801,E741
11-
# ruff: noqa: D214,D402,D405,D411,D412,D416,D417
10+
# ruff: noqa: D402
1211
# --------------------------------------------------------------------------
1312

1413
from __future__ import annotations
@@ -666,7 +665,7 @@ def MaxPool(
666665
subset of the input tensor according to the kernel size and downsampling the
667666
data into the output tensor Y for further processing. The output spatial shape is calculated differently
668667
depending on whether explicit padding is used, where pads is employed, or auto padding is used, where auto_pad is utilized.
669-
With explicit padding (https://pytorch.org/docs/stable/generated/torch.nn.MaxPool2d.html?highlight=maxpool#torch.nn.MaxPool2d):
668+
With explicit padding (https://docs.pytorch.org/stable/generated/torch.nn.MaxPool2d.html?highlight=maxpool#torch.nn.MaxPool2d):
670669
```
671670
output_spatial_shape[i] = floor((input_spatial_shape[i] + pad_shape[i] - dilation[i] * (kernel_shape[i] - 1) - 1) / strides_spatial_shape[i] + 1)
672671
```
@@ -820,7 +819,7 @@ def NegativeLogLikelihoodLoss(
820819
sum(loss) / sum(weight[target[n][d_1][d_2]...[d_k]]]), for all samples.
821820
If "reduction" attribute is set to "sum", the output is a scalar:
822821
sum(loss).
823-
See also https://pytorch.org/docs/stable/nn.html#torch.nn.NLLLoss.
822+
See also https://docs.pytorch.org/stable/nn.html#torch.nn.NLLLoss.
824823
Example 1:
825824
// negative log likelihood loss, "none" reduction
826825
N, C, d1 = 2, 3, 2

onnxscript/onnx_opset/_impl/opset13.py

Lines changed: 2 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -7,8 +7,7 @@
77
# --------------------------------------------------------------------------
88
# pylint: disable=W0221,W0222,R0901,W0237
99
# mypy: disable-error-code=override
10-
# ruff: noqa: N801,E741
11-
# ruff: noqa: D214,D402,D405,D411,D412,D416,D417
10+
# ruff: noqa: D214, D402, D405, D411, D416, D417
1211
# --------------------------------------------------------------------------
1312

1413
from __future__ import annotations
@@ -2058,7 +2057,7 @@ def NegativeLogLikelihoodLoss(
20582057
20592058
If "reduction" attribute is set to "sum", the output is a scalar: `sum(loss)`.
20602059
2061-
See also https://pytorch.org/docs/stable/nn.html#torch.nn.NLLLoss.
2060+
See also https://docs.pytorch.org/stable/nn.html#torch.nn.NLLLoss.
20622061
20632062
Example 1:
20642063

onnxscript/onnx_opset/_impl/opset16.py

Lines changed: 3 additions & 69 deletions
Original file line numberDiff line numberDiff line change
@@ -7,8 +7,7 @@
77
# --------------------------------------------------------------------------
88
# pylint: disable=W0221,W0222,R0901,W0237
99
# mypy: disable-error-code=override
10-
# ruff: noqa: N801,E741
11-
# ruff: noqa: D214,D402,D405,D411,D412,D416,D417
10+
# ruff: noqa: D214, D402, D405, D411, D416
1211
# --------------------------------------------------------------------------
1312

1413
from __future__ import annotations
@@ -128,7 +127,7 @@ def GridSample(
128127
They are used to interpolate output values of `Y[N, C, H_out, W_out]`.
129128
130129
The GridSample operator is often used in doing grid generator and sampler in the [Spatial Transformer Networks](https://arxiv.org/abs/1506.02025).
131-
See also in [torch.nn.functional.grid_sample](https://pytorch.org/docs/master/generated/torch.nn.functional.grid_sample.html#torch-nn-functional-grid-sample).
130+
See also in [torch.nn.functional.grid_sample](https://docs.pytorch.org/master/generated/torch.nn.functional.grid_sample.html#torch-nn-functional-grid-sample).
132131
133132
134133
Args:
@@ -252,72 +251,7 @@ def Identity(self, input: V_Identity) -> V_Identity:
252251

253252
B_If: TypeAlias = BOOL
254253

255-
V_If: TypeAlias = Union[
256-
Optional[Sequence[BFLOAT16]],
257-
Optional[Sequence[BOOL]],
258-
Optional[Sequence[COMPLEX128]],
259-
Optional[Sequence[COMPLEX64]],
260-
Optional[Sequence[DOUBLE]],
261-
Optional[Sequence[FLOAT]],
262-
Optional[Sequence[FLOAT16]],
263-
Optional[Sequence[INT16]],
264-
Optional[Sequence[INT32]],
265-
Optional[Sequence[INT64]],
266-
Optional[Sequence[INT8]],
267-
Optional[Sequence[STRING]],
268-
Optional[Sequence[UINT16]],
269-
Optional[Sequence[UINT32]],
270-
Optional[Sequence[UINT64]],
271-
Optional[Sequence[UINT8]],
272-
Optional[BFLOAT16],
273-
Optional[BOOL],
274-
Optional[COMPLEX128],
275-
Optional[COMPLEX64],
276-
Optional[DOUBLE],
277-
Optional[FLOAT],
278-
Optional[FLOAT16],
279-
Optional[INT16],
280-
Optional[INT32],
281-
Optional[INT64],
282-
Optional[INT8],
283-
Optional[STRING],
284-
Optional[UINT16],
285-
Optional[UINT32],
286-
Optional[UINT64],
287-
Optional[UINT8],
288-
Sequence[BFLOAT16],
289-
Sequence[BOOL],
290-
Sequence[COMPLEX128],
291-
Sequence[COMPLEX64],
292-
Sequence[DOUBLE],
293-
Sequence[FLOAT],
294-
Sequence[FLOAT16],
295-
Sequence[INT16],
296-
Sequence[INT32],
297-
Sequence[INT64],
298-
Sequence[INT8],
299-
Sequence[STRING],
300-
Sequence[UINT16],
301-
Sequence[UINT32],
302-
Sequence[UINT64],
303-
Sequence[UINT8],
304-
BFLOAT16,
305-
BOOL,
306-
COMPLEX128,
307-
COMPLEX64,
308-
DOUBLE,
309-
FLOAT,
310-
FLOAT16,
311-
INT16,
312-
INT32,
313-
INT64,
314-
INT8,
315-
STRING,
316-
UINT16,
317-
UINT32,
318-
UINT64,
319-
UINT8,
320-
]
254+
V_If: TypeAlias = Union[None, Sequence[BFLOAT16], Sequence[BOOL], Sequence[COMPLEX128], Sequence[COMPLEX64], Sequence[DOUBLE], Sequence[FLOAT], Sequence[FLOAT16], Sequence[INT16], Sequence[INT32], Sequence[INT64], Sequence[INT8], Sequence[STRING], Sequence[UINT16], Sequence[UINT32], Sequence[UINT64], Sequence[UINT8], BFLOAT16, BOOL, COMPLEX128, COMPLEX64, DOUBLE, FLOAT, FLOAT16, INT16, INT32, INT64, INT8, STRING, UINT16, UINT32, UINT64, UINT8]
321255

322256
def If(self, cond: B_If, *, else_branch: GraphProto, then_branch: GraphProto) -> V_If:
323257
r"""[🌐 If(16)](https://onnx.ai/onnx/operators/onnx__If.html#if-16 "Online Documentation")

onnxscript/onnx_opset/_impl/opset18.py

Lines changed: 2 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -7,8 +7,7 @@
77
# --------------------------------------------------------------------------
88
# pylint: disable=W0221,W0222,R0901,W0237
99
# mypy: disable-error-code=override
10-
# ruff: noqa: N801,E741
11-
# ruff: noqa: D214,D402,D405,D411,D412,D416,D417
10+
# ruff: noqa: D402, D405
1211
# --------------------------------------------------------------------------
1312

1413
from __future__ import annotations
@@ -235,7 +234,7 @@ def Col2Im(
235234
236235
The operator rearranges column blocks back into a multidimensional image
237236
238-
Col2Im behaves similarly to PyTorch's fold https://pytorch.org/docs/stable/generated/torch.nn.Fold.html,
237+
Col2Im behaves similarly to PyTorch's fold https://docs.pytorch.org/stable/generated/torch.nn.Fold.html,
239238
but it only supports *batched* multi-dimensional image tensors.
240239
Another implementation in Python with N-dimension support can be found at https://github.com/f-dangel/unfoldNd/.
241240

0 commit comments

Comments
 (0)