Skip to content

Commit 693f429

Browse files
committed
Update pytorch-metadata.json
1 parent a383449 commit 693f429

File tree

2 files changed

+53
-0
lines changed

2 files changed

+53
-0
lines changed

source/pytorch-metadata.json

Lines changed: 49 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -483,6 +483,12 @@
483483
{
484484
"name": "aten::_embedding_bag.out(Tensor weight, Tensor indices, Tensor offsets, bool scale_grad_by_freq=False, int mode=0, bool sparse=False, Tensor? per_sample_weights=None, bool include_last_offset=False, int padding_idx=-1, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2, Tensor(d!) out3) -> (Tensor(a!), Tensor(b!), Tensor(c!), Tensor(d!))"
485485
},
486+
{
487+
"name": "aten::_fake_quantize_learnable_per_channel_affine(Tensor self, Tensor scale, Tensor zero_point, int axis, int quant_min, int quant_max, float grad_factor=1.) -> Tensor"
488+
},
489+
{
490+
"name": "aten::_fake_quantize_learnable_per_channel_affine.out(Tensor self, Tensor scale, Tensor zero_point, int axis, int quant_min, int quant_max, float grad_factor=1., *, Tensor(a!) out) -> Tensor(a!)"
491+
},
486492
{
487493
"name": "aten::_fake_quantize_learnable_per_tensor_affine(Tensor self, Tensor scale, Tensor zero_point, int quant_min, int quant_max, float grad_factor=1.) -> Tensor",
488494
"category": "Quantization"
@@ -501,6 +507,9 @@
501507
{
502508
"name": "aten::_fft_r2c.out(Tensor self, int[] dim, int normalization, bool onesided, *, Tensor(a!) out) -> Tensor(a!)"
503509
},
510+
{
511+
"name": "aten::_get_cpu_capability() -> str"
512+
},
504513
{
505514
"name": "aten::_has_compatible_shallow_copy_type(Tensor self, Tensor from) -> bool"
506515
},
@@ -577,6 +586,10 @@
577586
{
578587
"name": "aten::_prelu_kernel(Tensor self, Tensor weight) -> Tensor"
579588
},
589+
{
590+
"name": "aten::_safe_softmax(Tensor self, int dim, ScalarType? dtype=None) -> Tensor",
591+
"category": "Activation"
592+
},
580593
{
581594
"name": "aten::_scaled_dot_product_efficient_attention(Tensor query, Tensor key, Tensor value, Tensor? attn_bias, bool compute_log_sumexp, float dropout_p=0., bool is_causal=False, *, float? scale=None) -> (Tensor output, Tensor log_sumexp, Tensor philox_seed, Tensor philox_offset)"
582595
},
@@ -657,6 +670,12 @@
657670
{
658671
"name": "aten::_transformer_encoder_layer_fwd.out(Tensor src, int embed_dim, int num_heads, Tensor qkv_weight, Tensor qkv_bias, Tensor proj_weight, Tensor proj_bias, bool use_gelu, bool norm_first, float eps, Tensor norm_weight_1, Tensor norm_bias_1, Tensor norm_weight_2, Tensor norm_bias_2, Tensor ffn_weight_1, Tensor ffn_bias_1, Tensor ffn_weight_2, Tensor ffn_bias_2, Tensor? mask=None, int? mask_type=None, *, Tensor(a!) out) -> Tensor(a!)"
659672
},
673+
{
674+
"name": "aten::_unique(Tensor self, bool sorted=True, bool return_inverse=False) -> (Tensor, Tensor)"
675+
},
676+
{
677+
"name": "aten::_unique.out(Tensor self, bool sorted=True, bool return_inverse=False, *, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!))"
678+
},
660679
{
661680
"name": "aten::_unique2(Tensor self, bool sorted=True, bool return_inverse=False, bool return_counts=False) -> (Tensor, Tensor, Tensor)"
662681
},
@@ -2771,6 +2790,15 @@
27712790
"name": "aten::format(str self, ...) -> str",
27722791
"is_vararg": true
27732792
},
2793+
{
2794+
"name": "aten::frexp.Tensor(Tensor self) -> (Tensor mantissa, Tensor exponent)"
2795+
},
2796+
{
2797+
"name": "aten::frexp.Tensor_out(Tensor self, *, Tensor(a!) mantissa, Tensor(b!) exponent) -> (Tensor(a!) mantissa, Tensor(b!) exponent)"
2798+
},
2799+
{
2800+
"name": "aten::frexp(float a) -> (float, int)"
2801+
},
27742802
{
27752803
"name": "aten::frobenius_norm.dim(Tensor self, int[1] dim, bool keepdim=False) -> Tensor",
27762804
"category": "Normalization"
@@ -2968,6 +2996,9 @@
29682996
{
29692997
"name": "aten::greater.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)"
29702998
},
2999+
{
3000+
"name": "aten::greater(Tensor self, Tensor other) -> Tensor"
3001+
},
29713002
{
29723003
"name": "aten::greater_equal.Tensor(Tensor self, Tensor other) -> Tensor"
29733004
},
@@ -5151,6 +5182,9 @@
51515182
{
51525183
"name": "aten::range.out_(Scalar start, Scalar end, *, Tensor(a!) out) -> Tensor(a!)"
51535184
},
5185+
{
5186+
"name": "aten::ravel(Tensor(a) self) -> Tensor(a)"
5187+
},
51545188
{
51555189
"name": "aten::real(Tensor(a) self) -> Tensor(a)"
51565190
},
@@ -6794,6 +6828,9 @@
67946828
{
67956829
"name": "cortex_m::transpose.out(Tensor input, int[] perm, *, Tensor(a!) out) -> Tensor(a!)"
67966830
},
6831+
{
6832+
"name": "cuda::_current_device() -> int"
6833+
},
67976834
{
67986835
"name": "detectron2::nms_rotated(Tensor boxes, Tensor scores, float iou_threshold) -> Tensor"
67996836
},
@@ -6971,6 +7008,9 @@
69717008
{
69727009
"name": "fbgemm::rope_qkv_varseq_prefill(Tensor XQ, Tensor XK, Tensor XV, Tensor(a!) cache_K, Tensor(b!) cache_V, Tensor varseq_batch, Tensor varseq_seqpos, float theta, int? num_groups=1, Tensor? block_tables=None, int page_size=64, Tensor? varseq_cache_seqpos=None, int cache_logical_dtype_int=0, bool rope_scaling=False, int old_context_len=8192, float scaling_factor=16., float lo_freq_factor=1., float hi_freq_factor=32., Tensor? qparam_k=None, Tensor? qparam_v=None) -> Tensor"
69737010
},
7011+
{
7012+
"name": "fbgemm::segment_sum_csr(SymInt batch_size, Tensor csr_seg, Tensor values) -> Tensor"
7013+
},
69747014
{
69757015
"name": "fbgemm::silu_mul_quantize_i8(Tensor X1, Tensor X2, float scale) -> Tensor"
69767016
},
@@ -7742,6 +7782,9 @@
77427782
{
77437783
"name": "prims::collapse(Tensor a, int start, int end) -> Tensor"
77447784
},
7785+
{
7786+
"name": "profiler::_record_function_enter(str name, str? args=None) -> Tensor"
7787+
},
77457788
{
77467789
"name": "profiler::_record_function_enter_new(str name, str? args=None) -> __torch__.torch.classes.profiler._RecordFunction"
77477790
},
@@ -8256,6 +8299,9 @@
82568299
{
82578300
"name": "quantized_decomposed::quantize_per_token(Tensor input, Tensor scales, Tensor zero_points, int quant_min, int quant_max, ScalarType dtype) -> Tensor"
82588301
},
8302+
{
8303+
"name": "sgl_kernel::extend_attention_cpu(Tensor q_extend, Tensor k_extend, Tensor v_extend, Tensor(a!) o_extend, Tensor k_buffer, Tensor v_buffer, Tensor req_to_token, Tensor req_pool_indices, Tensor seq_lens, Tensor extend_seq_lens, Tensor extend_start_loc, int max_len_extend, float sm_scale, float logit_cap) -> ()"
8304+
},
82598305
{
82608306
"name": "tensorrt::execute_engine(Tensor[] inputs, __torch__.torch.classes.tensorrt.Engine engine) -> Tensor[]"
82618307
},
@@ -8379,6 +8425,9 @@
83798425
{
83808426
"name": "torchao::quantize_affine(Tensor input, SymInt[] block_size, Tensor scale, Tensor? zero_point, ScalarType output_dtype, Scalar? quant_min=None, Scalar? quant_max=None) -> Tensor"
83818427
},
8428+
{
8429+
"name": "torchaudio::forced_align(Tensor log_probs, Tensor targets, Tensor input_lengths, Tensor target_lengths, int blank) -> (Tensor, Tensor)"
8430+
},
83828431
{
83838432
"name": "torchaudio::sox_effects_apply_effects_tensor(Tensor tensor, int sample_rate, str[][] effects, bool channels_first=True) -> (Tensor, int)"
83848433
},

tools/pytorch_script.py

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -63,6 +63,8 @@ def _write_metadata(metadata):
6363
"aten::fft(Tensor self, int signal_ndim, bool normalized=False) -> Tensor",
6464
"aten::grid_sampler.legacy(Tensor input, Tensor grid, int interpolation_mode, int padding_mode) -> Tensor", # noqa E501
6565
"aten::get_num_threads() -> int",
66+
"aten::greater(Tensor self, Tensor other) -> Tensor",
67+
"cuda::_current_device() -> int",
6668
"aten::list_with_default(int[] list, int[] defaults) -> int[]",
6769
"aten::randint_like.generator_with_low_dtype(Tensor self, SymInt low, SymInt high, *, Generator? generator, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, MemoryFormat? memory_format=None) -> Tensor", # noqa E501
6870
"aten::randint_like.generator_with_low_dtype_out(Tensor self, SymInt low, SymInt high, *, Generator? generator, MemoryFormat? memory_format=None, Tensor(a!) out) -> Tensor(a!)", # noqa E501
@@ -106,6 +108,7 @@ def _write_metadata(metadata):
106108
"fbgemm::nccl_allreduce(Tensor dst, Tensor src, Tensor? bias=None, int comm_idx=0) -> ()", # noqa E501
107109
"fbgemm::jagged_to_padded_dense(Tensor values, Tensor[] offsets, SymInt[] max_lengths, float padding_value=0.) -> Tensor", # noqa E501
108110
"fbgemm::quantize_fp8_per_tensor(Tensor input, Tensor? bs=None, Tensor? scale_ub=None, bool stochastic_rounding=False) -> Tensor[]", # noqa E501
111+
"fbgemm::segment_sum_csr(SymInt batch_size, Tensor csr_seg, Tensor values) -> Tensor",
109112
"fbgemm::per_tensor_dynamic_quantize_i8(Tensor X) -> (Tensor, Tensor)",
110113
"fbgemm::nccl_reducescatter(Tensor dst, Tensor src, int comm_idx=0) -> ()",
111114
"fbgemm::nccl_allgather(Tensor dst, Tensor src, int comm_idx=0) -> ()",
@@ -362,6 +365,7 @@ def _write_metadata(metadata):
362365
"torchaudio::sox_effects_apply_effects_tensor(Tensor tensor, int sample_rate, str[][] effects, bool channels_first=True) -> (Tensor, int)", # noqa E501
363366
"torchvision::_interpolate_bilinear2d_aa(Tensor input, int[] size, bool align_corners) -> Tensor", # noqa E501
364367
"torchvision::deform_conv2d.out(Tensor input, Tensor weight, Tensor offset, Tensor mask, Tensor bias, SymInt stride_h, SymInt stride_w, SymInt pad_h, SymInt pad_w, SymInt dilation_h, SymInt dilation_w, SymInt groups, SymInt offset_groups, bool use_mask, *, Tensor(a!) out) -> Tensor(a!)", # noqa E501
368+
"sgl_kernel::extend_attention_cpu(Tensor q_extend, Tensor k_extend, Tensor v_extend, Tensor(a!) o_extend, Tensor k_buffer, Tensor v_buffer, Tensor req_to_token, Tensor req_pool_indices, Tensor seq_lens, Tensor extend_seq_lens, Tensor extend_start_loc, int max_len_extend, float sm_scale, float logit_cap) -> ()", # noqa E501
365369
"vai::fix_neuron(Tensor input, int valmin, int valmax, float valamp, int zero_point, int method, int device_id, int inplace) -> Tensor" # noqa E501
366370
]
367371

0 commit comments

Comments
 (0)