281281 < div class ="pytorch-left-menu-search ">
282282
283283 < div class ="version ">
284- < a href ='https://pytorch.org/docs/versions.html '> main (2.7.0a0+gitdcc04e9 ) ▼</ a >
284+ < a href ='https://pytorch.org/docs/versions.html '> main (2.7.0a0+git8c2aa0c ) ▼</ a >
285285 </ div >
286286 < div id ="searchBox ">
287287 < div class ="searchbox " id ="googleSearchBox ">
@@ -903,6 +903,24 @@ <h1>Source code for torch</h1><div class="highlight"><pre>
903903
904904 < span class ="k "> try</ span > < span class ="p "> :</ span >
905905 < span class ="n "> ctypes</ span > < span class ="o "> .</ span > < span class ="n "> CDLL</ span > < span class ="p "> (</ span > < span class ="n "> global_deps_lib_path</ span > < span class ="p "> ,</ span > < span class ="n "> mode</ span > < span class ="o "> =</ span > < span class ="n "> ctypes</ span > < span class ="o "> .</ span > < span class ="n "> RTLD_GLOBAL</ span > < span class ="p "> )</ span >
906+ < span class ="c1 "> # Workaround slim-wheel CUDA-12.4+ dependency bug in libcusparse by preloading nvjitlink</ span >
907+ < span class ="c1 "> # In those versions of cuda cusparse depends on nvjitlink, but does not have rpath when</ span >
908+ < span class ="c1 "> # shipped as wheel, which results in OS picking wrong/older version of nvjitlink library</ span >
909+ < span class ="c1 "> # if `LD_LIBRARY_PATH` is defined</ span >
910+ < span class ="c1 "> # See https://github.com/pytorch/pytorch/issues/138460</ span >
911+ < span class ="k "> if</ span > < span class ="n "> version</ span > < span class ="o "> .</ span > < span class ="n "> cuda</ span > < span class ="ow "> not</ span > < span class ="ow "> in</ span > < span class ="p "> [</ span > < span class ="s2 "> "12.4"</ span > < span class ="p "> ,</ span > < span class ="s2 "> "12.6"</ span > < span class ="p "> ]:</ span > < span class ="c1 "> # type: ignore[name-defined]</ span >
912+ < span class ="k "> return</ span >
913+ < span class ="k "> try</ span > < span class ="p "> :</ span >
914+ < span class ="k "> with</ span > < span class ="nb "> open</ span > < span class ="p "> (</ span > < span class ="s2 "> "/proc/self/maps"</ span > < span class ="p "> )</ span > < span class ="k "> as</ span > < span class ="n "> f</ span > < span class ="p "> :</ span >
915+ < span class ="n "> _maps</ span > < span class ="o "> =</ span > < span class ="n "> f</ span > < span class ="o "> .</ span > < span class ="n "> read</ span > < span class ="p "> ()</ span >
916+ < span class ="c1 "> # libtorch_global_deps.so always depends in cudart, check if its installed via wheel</ span >
917+ < span class ="k "> if</ span > < span class ="s2 "> "nvidia/cuda_runtime/lib/libcudart.so"</ span > < span class ="ow "> not</ span > < span class ="ow "> in</ span > < span class ="n "> _maps</ span > < span class ="p "> :</ span >
918+ < span class ="k "> return</ span >
919+ < span class ="c1 "> # If all abovementioned conditions are met, preload nvjitlink</ span >
920+ < span class ="n "> _preload_cuda_deps</ span > < span class ="p "> (</ span > < span class ="s2 "> "nvjitlink"</ span > < span class ="p "> ,</ span > < span class ="s2 "> "libnvJitLink.so.*[0-9]"</ span > < span class ="p "> )</ span >
921+ < span class ="k "> except</ span > < span class ="ne "> Exception</ span > < span class ="p "> :</ span >
922+ < span class ="k "> pass</ span >
923+
906924 < span class ="k "> except</ span > < span class ="ne "> OSError</ span > < span class ="k "> as</ span > < span class ="n "> err</ span > < span class ="p "> :</ span >
907925 < span class ="c1 "> # Can only happen for wheel with cuda libs as PYPI deps</ span >
908926 < span class ="c1 "> # As PyTorch is not purelib, but nvidia-*-cu12 is</ span >
@@ -1735,7 +1753,7 @@ <h1>Source code for torch</h1><div class="highlight"><pre>
17351753< span class ="sd "> .. note::</ span >
17361754
17371755< span class ="sd "> This doesn't affect functions that create tensors that share the same memory as the input, like:</ span >
1738- < span class ="sd "> :func:`torch.from_numpy` and :func:`torch.frombuffer`. Using :func:`torch.Tensor.to` move tensor to desired device. </ span >
1756+ < span class ="sd "> :func:`torch.from_numpy` and :func:`torch.frombuffer`</ span >
17391757
17401758< span class ="sd "> Args:</ span >
17411759< span class ="sd "> device (device or string): the device to set as default</ span >
@@ -2224,11 +2242,17 @@ <h1>Source code for torch</h1><div class="highlight"><pre>
22242242 < span class ="n "> _check_with</ span > < span class ="p "> (</ span > < span class ="ne "> RuntimeError</ span > < span class ="p "> ,</ span > < span class ="n "> cond</ span > < span class ="p "> ,</ span > < span class ="n "> message</ span > < span class ="p "> )</ span >
22252243
22262244
2227- < span class ="k "> def</ span > < span class ="nf "> _check_is_size</ span > < span class ="p "> (</ span > < span class ="n "> i</ span > < span class ="p "> ,</ span > < span class ="n "> message</ span > < span class ="o "> =</ span > < span class ="kc "> None</ span > < span class ="p "> ):</ span >
2245+ < span class ="k "> def</ span > < span class ="nf "> _check_is_size</ span > < span class ="p "> (</ span > < span class ="n "> i</ span > < span class ="p "> ,</ span > < span class ="n "> message</ span > < span class ="o "> =</ span > < span class ="kc "> None</ span > < span class ="p "> , </ span > < span class =" o " > * </ span > < span class =" p " > , </ span > < span class =" nb " > max </ span > < span class =" o " > = </ span > < span class =" kc " > None </ span > < span class =" p " > ):</ span >
22282246< span class ="w "> </ span > < span class ="sd "> """Checks that a given integer is a valid size (i.e., is non-negative).</ span >
2229- < span class ="sd "> You should use this over _check(i >= 0) because we can use the semantic</ span >
2230- < span class ="sd "> information (that i is a size) to make some further inferences in case</ span >
2231- < span class ="sd "> i is an unbacked SymInt.</ span >
2247+ < span class ="sd "> You should use this over ``_check(i >= 0)`` because it can prevent</ span >
2248+ < span class ="sd "> ``GuardOnDataDependentSymNode`` exceptions by opting yourself into alternate</ span >
2249+ < span class ="sd "> semantics for ``guard_size_oblivious`` tests that treat values 0 and 1</ span >
2250+ < span class ="sd "> equivalently to all other values.</ span >
2251+
2252+ < span class ="sd "> When max is not None, this specifies an upper bound equivalent to</ span >
2253+ < span class ="sd "> ``_check(i <= max)``. This bound is also subject to alternate semantics:</ span >
2254+ < span class ="sd "> in ``guard_size_oblivious`` tests, we assume that the max bound is treated</ span >
2255+ < span class ="sd "> equivalently to all other values.</ span >
22322256
22332257< span class ="sd "> NB: Do NOT use this in contexts where a -1 size would be valid (indicating</ span >
22342258< span class ="sd "> to infer the size from context, or if you should wrap-around or truncate).</ span >
@@ -2240,6 +2264,13 @@ <h1>Source code for torch</h1><div class="highlight"><pre>
22402264
22412265 < span class ="n "> _advise_is_size</ span > < span class ="p "> (</ span > < span class ="n "> i</ span > < span class ="p "> )</ span >
22422266
2267+ < span class ="k "> if</ span > < span class ="nb "> max</ span > < span class ="ow "> is</ span > < span class ="ow "> not</ span > < span class ="kc "> None</ span > < span class ="p "> :</ span >
2268+ < span class ="n "> _check</ span > < span class ="p "> (</ span > < span class ="n "> i</ span > < span class ="o "> <=</ span > < span class ="nb "> max</ span > < span class ="p "> ,</ span > < span class ="n "> message</ span > < span class ="p "> )</ span >
2269+
2270+ < span class ="kn "> from</ span > < span class ="nn "> torch.fx.experimental.symbolic_shapes</ span > < span class ="kn "> import</ span > < span class ="n "> _advise_is_bounded</ span >
2271+
2272+ < span class ="n "> _advise_is_bounded</ span > < span class ="p "> (</ span > < span class ="n "> i</ span > < span class ="p "> ,</ span > < span class ="nb "> max</ span > < span class ="p "> )</ span >
2273+
22432274
22442275< span class ="k "> def</ span > < span class ="nf "> _check_index</ span > < span class ="p "> (</ span > < span class ="n "> cond</ span > < span class ="p "> ,</ span > < span class ="n "> message</ span > < span class ="o "> =</ span > < span class ="kc "> None</ span > < span class ="p "> ):</ span > < span class ="c1 "> # noqa: F811</ span >
22452276< span class ="w "> </ span > < span class ="sa "> r</ span > < span class ="sd "> """Throws error containing an optional message if the specified condition</ span >
0 commit comments