Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
4 changes: 2 additions & 2 deletions lib/node_modules/@stdlib/repl/code-blocks/data/data.csv
Original file line number Diff line number Diff line change
Expand Up @@ -1876,8 +1876,8 @@ base.strided.dnansumpw,"var x = new Float64Array( [ 1.0, -2.0, NaN, 2.0 ] );\nba
base.strided.dnansumpw.ndarray,"var x = new Float64Array( [ 1.0, -2.0, NaN, 2.0 ] );\nbase.strided.dnansumpw.ndarray( x.length, x, 1, 0 )\nvar x = new Float64Array( [ 1.0, -2.0, 3.0, 2.0, 5.0, -1.0, NaN, NaN ] );\nbase.strided.dnansumpw.ndarray( 4, x, 2, 1 )\n"
base.strided.dnanvariance,"var x = new Float64Array( [ 1.0, -2.0, NaN, 2.0 ] );\nbase.strided.dnanvariance( x.length, 1, x, 1 )\nx = new Float64Array( [ -2.0, 1.0, 1.0, -5.0, 2.0, -1.0 ] );\nvar N = base.floor( x.length / 2 );\nvar stride = 2;\nbase.strided.dnanvariance( N, 1, x, stride )\nvar x0 = new Float64Array( [ 1.0, -2.0, 3.0, 2.0, 5.0, -1.0 ] );\nvar x1 = new Float64Array( x0.buffer, x0.BYTES_PER_ELEMENT*1 );\nN = base.floor( x0.length / 2 );\nstride = 2;\nbase.strided.dnanvariance( N, 1, x1, stride )\n"
base.strided.dnanvariance.ndarray,"var x = new Float64Array( [ 1.0, -2.0, NaN, 2.0 ] );\nbase.strided.dnanvariance.ndarray( x.length, 1, x, 1, 0 )\nvar x = new Float64Array( [ 1.0, -2.0, 3.0, 2.0, 5.0, -1.0 ] );\nvar N = base.floor( x.length / 2 );\nbase.strided.dnanvariance.ndarray( N, 1, x, 2, 1 )\n"
base.strided.dnanvariancech,"var x = new Float64Array( [ 1.0, -2.0, NaN, 2.0 ] );\nbase.strided.dnanvariancech( x.length, 1, x, 1 )\nx = new Float64Array( [ -2.0, 1.0, 1.0, -5.0, 2.0, -1.0 ] );\nvar N = base.floor( x.length / 2 );\nvar stride = 2;\nbase.strided.dnanvariancech( N, 1, x, stride )\nvar x0 = new Float64Array( [ 1.0, -2.0, 3.0, 2.0, 5.0, -1.0 ] );\nvar x1 = new Float64Array( x0.buffer, x0.BYTES_PER_ELEMENT*1 );\nN = base.floor( x0.length / 2 );\nstride = 2;\nbase.strided.dnanvariancech( N, 1, x1, stride )\n"
base.strided.dnanvariancech.ndarray,"var x = new Float64Array( [ 1.0, -2.0, NaN, 2.0 ] );\nbase.strided.dnanvariancech.ndarray( x.length, 1, x, 1, 0 )\nvar x = new Float64Array( [ 1.0, -2.0, 3.0, 2.0, 5.0, -1.0 ] );\nvar N = base.floor( x.length / 2 );\nbase.strided.dnanvariancech.ndarray( N, 1, x, 2, 1 )\n"
base.strided.dnanvariancech,"var x = new Float64Array( [ 1.0, NaN, -2.0, 2.0 ] );\nbase.strided.dnanvariancech( x.length, 1, x, 1 )\nx = new Float64Array( [ -2.0, 1.0, 1.0, -5.0, 2.0, -1.0, NaN, NaN ] );\nbase.strided.dnanvariancech( 4, 1, x, 2 )\nvar x0 = new Float64Array( [ 1.0, -2.0, 3.0, 2.0, 5.0, -1.0, NaN, NaN ] );\nvar x1 = new Float64Array( x0.buffer, x0.BYTES_PER_ELEMENT*1 );\nbase.strided.dnanvariancech( 4, 1, x1, 2 )\n"
base.strided.dnanvariancech.ndarray,"var x = new Float64Array( [ 1.0, -2.0, NaN, 2.0 ] );\nbase.strided.dnanvariancech.ndarray( x.length, 1, x, 1, 0 )\nvar x = new Float64Array( [ 1.0, -2.0, 3.0, 2.0, 5.0, -1.0, NaN, NaN ] );\nbase.strided.dnanvariancech.ndarray( 4, 1, x, 2, 1 )\n"
base.strided.dnanvariancepn,"var x = new Float64Array( [ 1.0, -2.0, NaN, 2.0 ] );\nbase.strided.dnanvariancepn( x.length, 1, x, 1 )\nx = new Float64Array( [ -2.0, 1.0, 1.0, -5.0, 2.0, -1.0, NaN, NaN ] );\nbase.strided.dnanvariancepn( 4, 1, x, 2 )\nvar x0 = new Float64Array( [ 1.0, -2.0, 3.0, 2.0, 5.0, -1.0, NaN, NaN ] );\nvar x1 = new Float64Array( x0.buffer, x0.BYTES_PER_ELEMENT*1 );\nbase.strided.dnanvariancepn( 4, 1, x1, 2 )\n"
base.strided.dnanvariancepn.ndarray,"var x = new Float64Array( [ 1.0, -2.0, NaN, 2.0 ] );\nbase.strided.dnanvariancepn.ndarray( x.length, 1, x, 1, 0 )\nvar x = new Float64Array( [ 1.0, -2.0, 3.0, 2.0, 5.0, -1.0, NaN, NaN ] );\nbase.strided.dnanvariancepn.ndarray( 4, 1, x, 2, 1 )\n"
base.strided.dnanvariancetk,"var x = new Float64Array( [ 1.0, -2.0, NaN, 2.0 ] );\nbase.strided.dnanvariancetk( x.length, 1, x, 1 )\nx = new Float64Array( [ -2.0, 1.0, 1.0, -5.0, 2.0, -1.0, NaN, NaN ] );\nbase.strided.dnanvariancetk( 4, 1, x, 2 )\nvar x0 = new Float64Array( [ 1.0, -2.0, 3.0, 2.0, 5.0, -1.0, NaN, NaN ] );\nvar x1 = new Float64Array( x0.buffer, x0.BYTES_PER_ELEMENT*1 );\nbase.strided.dnanvariancetk( 4, 1, x1, 2 )\n"
Expand Down
2 changes: 1 addition & 1 deletion lib/node_modules/@stdlib/repl/code-blocks/data/data.json

Large diffs are not rendered by default.

2 changes: 1 addition & 1 deletion lib/node_modules/@stdlib/repl/data/contributor.json
Original file line number Diff line number Diff line change
@@ -1 +1 @@
["Aadish Jain","Aayush Khanna","Abhijit Raut","Adarsh Palaskar","Aditya Sapra","Ahmed Atwa","Ahmed Kashkoush","Ahmed Khaled","Aleksandr","Ali Salesi","Aman Bhansali","Amit Jimiwal","Anudeep Sanapala","Athan Reines","Ayaka","Brendan Graetz","Bruno Fenzl","Bryan Elee","Chinmay Joshi","Christopher Dambamuromo","Dan Rose","Daniel Killenberger","Daniel Yu","Debashis Maharana","Desh Deepak Kant","Dev Goel","Dhruv Arvind Singh","Divyansh Seth","Dominic Lim","Dominik Moritz","Dorrin Sotoudeh","EuniceSim142","Frank Kovacs","GK Bishnoi","Gaurav","Golden Kumar","Gunj Joshi","Gururaj Gurram","Harsh","HarshaNP","Harshita Kalani","Hridyanshu","Jaimin Godhani","James Gelok","Jaysukh Makvana","Jenish Thapa","Jithin KS","Joel Mathew Koshy","Joey Reed","Jordan Gallivan","Joris Labie","Justin Dennison","Karan Anand","Karthik Prakash","Kohantika Nath","Krishnendu Das","Kshitij-Dale","Lovelin Dhoni J B","Manik Sharma","Manvith M","Marcus Fantham","Matt Cochrane","Mihir Pandit","Milan Raj","Mohammad Bin Aftab","Mohammad Kaif","Momtchil Momtchev","Muhammad Haris","Naresh Jagadeesan","Naveen Kumar","Neeraj Pathak","Nishant Shinde","Nithin Katta","Nourhan Hasan","Ognjen Jevremović","Oneday12323","Ori Miles","Philipp Burckhardt","Prajwal Kulkarni","Pranav Goswami","Pranjal Jha","Prashant Kumar Yadav","Pratik Singh","Pratyush Kumar Chouhan","Priyansh Prajapati","Priyanshu Agarwal","Pushpendra Chandravanshi","Raunak Kumar Gupta","Rejoan Sardar","Ricky Reusser","Ridam Garg","Rishav","Rishav Tarway","Robert Gislason","Roman Stetsyk","Rutam Kathale","Ruthwik Chikoti","Ryan Seal","Rylan Yang","Sai Srikar Dumpeti","Sarthak Paandey","Saurabh Singh","Seyyed Parsa Neshaei","Shabareesh Shetty","Shashank Shekhar Singh","Shivam Ahir","Shraddheya Shendre","Shubh Mehta","Shubham Mishra","Sivam Das","Snehil Shah","Soumajit Chatterjee","Spandan Barve","Stephannie Jiménez Gacha","Suhaib Ilahi","Suraj Kumar","Tirtadwipa Manunggal","Tudor Pagu","Tufailahmed Bargir","Utkarsh","Utkarsh Raj","UtkershBasnet","Vaibhav Patel","Varad Gupta","Vinit Pandit","Vivek Maurya","Xiaochuan Ye","Yaswanth Kosuru","Yernar Yergaziyev","Yuvi Mittal","ekambains","olenkabilonizhka","pranav-1720","rainn","rei2hu"]
["Aadish Jain","Aayush Khanna","Abhijit Raut","Abhishek Jain","Adarsh Palaskar","Aditya Sapra","Ahmed Atwa","Ahmed Kashkoush","Ahmed Khaled","Aleksandr","Ali Salesi","Aman Bhansali","Amit Jimiwal","Anudeep Sanapala","Athan Reines","Ayaka","Brendan Graetz","Bruno Fenzl","Bryan Elee","Chinmay Joshi","Christopher Dambamuromo","Dan Rose","Daniel Killenberger","Daniel Yu","Debashis Maharana","Desh Deepak Kant","Dev Goel","Dhruv Arvind Singh","Divyansh Seth","Dominic Lim","Dominik Moritz","Dorrin Sotoudeh","EuniceSim142","Frank Kovacs","GK Bishnoi","Gaurav","Golden Kumar","Gunj Joshi","Gururaj Gurram","Harsh","HarshaNP","Harshita Kalani","Hridyanshu","Jaimin Godhani","James Gelok","Jaysukh Makvana","Jenish Thapa","Jithin KS","Joel Mathew Koshy","Joey Reed","Jordan Gallivan","Joris Labie","Justin Dennison","Karan Anand","Karthik Prakash","Kohantika Nath","Krishnam Agarwal","Krishnendu Das","Kshitij-Dale","Lovelin Dhoni J B","Manik Sharma","Manvith M","Marcus Fantham","Matt Cochrane","Mihir Pandit","Milan Raj","Mohammad Bin Aftab","Mohammad Kaif","Momtchil Momtchev","Muhammad Haris","Naresh Jagadeesan","Naveen Kumar","Neeraj Pathak","Nishant Shinde","Nithin Katta","Nourhan Hasan","Ognjen Jevremović","Oneday12323","Ori Miles","Philipp Burckhardt","Prajwal Kulkarni","Pranav Goswami","Pranjal Jha","Prashant Kumar Yadav","Pratik Singh","Pratyush Kumar Chouhan","Priyansh Prajapati","Priyanshu Agarwal","Pushpendra Chandravanshi","Raunak Kumar Gupta","Rejoan Sardar","Ricky Reusser","Ridam Garg","Rishav","Rishav Tarway","Robert Gislason","Roman Stetsyk","Rutam Kathale","Ruthwik Chikoti","Ryan Seal","Rylan Yang","Sai Srikar Dumpeti","Sarthak Paandey","Saurabh Singh","Seyyed Parsa Neshaei","Shabareesh Shetty","Shashank Shekhar Singh","Shivam Ahir","Shraddheya Shendre","Shubh Mehta","Shubham Mishra","Sivam Das","Snehil Shah","Soumajit Chatterjee","Spandan Barve","Stephannie Jiménez Gacha","Suhaib Ilahi","Suraj Kumar","Tirtadwipa Manunggal","Tudor Pagu","Tufailahmed Bargir","Utkarsh","Utkarsh Raj","UtkershBasnet","Vaibhav Patel","Varad Gupta","Vinit Pandit","Vivek Maurya","Xiaochuan Ye","Yaswanth Kosuru","Yernar Yergaziyev","Yuvi Mittal","ekambains","olenkabilonizhka","pranav-1720","rainn","rei2hu"]
10 changes: 5 additions & 5 deletions lib/node_modules/@stdlib/repl/help/data/data.csv

Large diffs are not rendered by default.

2 changes: 1 addition & 1 deletion lib/node_modules/@stdlib/repl/help/data/data.json

Large diffs are not rendered by default.

4 changes: 2 additions & 2 deletions lib/node_modules/@stdlib/repl/info/data/data.csv
Original file line number Diff line number Diff line change
Expand Up @@ -1876,8 +1876,8 @@ base.strided.dnansumpw,"\nbase.strided.dnansumpw( N:integer, x:Float64Array, str
base.strided.dnansumpw.ndarray,"\nbase.strided.dnansumpw.ndarray( N:integer, x:Float64Array, strideX:integer, \n offsetX:integer )\n Computes the sum of double-precision floating-point strided array elements,\n ignoring `NaN` values and using pairwise summation and alternative indexing\n semantics.\n"
base.strided.dnanvariance,"\nbase.strided.dnanvariance( N:integer, correction:number, x:Float64Array, \n stride:integer )\n Computes the variance of a double-precision floating-point strided array\n ignoring `NaN` values.\n"
base.strided.dnanvariance.ndarray,"\nbase.strided.dnanvariance.ndarray( N:integer, correction:number, \n x:Float64Array, stride:integer, offset:integer )\n Computes the variance of a double-precision floating-point strided array\n ignoring `NaN` values and using alternative indexing semantics.\n"
base.strided.dnanvariancech,"\nbase.strided.dnanvariancech( N:integer, correction:number, x:Float64Array, \n stride:integer )\n Computes the variance of a double-precision floating-point strided array\n ignoring `NaN` values and using a one-pass trial mean algorithm.\n"
base.strided.dnanvariancech.ndarray,"\nbase.strided.dnanvariancech.ndarray( N:integer, correction:number, \n x:Float64Array, stride:integer, offset:integer )\n Computes the variance of a double-precision floating-point strided array\n ignoring `NaN` values and using a one-pass trial mean algorithm and\n alternative indexing semantics.\n"
base.strided.dnanvariancech,"\nbase.strided.dnanvariancech( N:integer, correction:number, x:Float64Array, \n strideX:integer )\n Computes the variance of a double-precision floating-point strided array\n ignoring `NaN` values and using a one-pass trial mean algorithm.\n"
base.strided.dnanvariancech.ndarray,"\nbase.strided.dnanvariancech.ndarray( N:integer, correction:number, \n x:Float64Array, strideX:integer, offsetX:integer )\n Computes the variance of a double-precision floating-point strided array\n ignoring `NaN` values and using a one-pass trial mean algorithm and\n alternative indexing semantics.\n"
base.strided.dnanvariancepn,"\nbase.strided.dnanvariancepn( N:integer, correction:number, x:Float64Array, \n strideX:integer )\n Computes the variance of a double-precision floating-point strided array\n ignoring `NaN` values and using a two-pass algorithm.\n"
base.strided.dnanvariancepn.ndarray,"\nbase.strided.dnanvariancepn.ndarray( N:integer, correction:number, \n x:Float64Array, strideX:integer, offsetX:integer )\n Computes the variance of a double-precision floating-point strided array\n ignoring `NaN` values and using a two-pass algorithm and alternative\n indexing semantics.\n"
base.strided.dnanvariancetk,"\nbase.strided.dnanvariancetk( N:integer, correction:number, x:Float64Array, \n strideX:integer )\n Computes the variance of a double-precision floating-point strided array\n ignoring `NaN` values and using a one-pass textbook algorithm.\n"
Expand Down
2 changes: 1 addition & 1 deletion lib/node_modules/@stdlib/repl/info/data/data.json

Large diffs are not rendered by default.

4 changes: 2 additions & 2 deletions lib/node_modules/@stdlib/repl/signature/data/data.csv
Original file line number Diff line number Diff line change
Expand Up @@ -1877,8 +1877,8 @@ base.strided.dnansumpw,"base.strided.dnansumpw( N, x, strideX )"
base.strided.dnansumpw.ndarray,"base.strided.dnansumpw.ndarray( N, x, strideX, offsetX )"
base.strided.dnanvariance,"base.strided.dnanvariance( N, correction, x, stride )"
base.strided.dnanvariance.ndarray,"base.strided.dnanvariance.ndarray( N, correction, x, stride, offset )"
base.strided.dnanvariancech,"base.strided.dnanvariancech( N, correction, x, stride )"
base.strided.dnanvariancech.ndarray,"base.strided.dnanvariancech.ndarray( N, correction, x, stride, offset )"
base.strided.dnanvariancech,"base.strided.dnanvariancech( N, correction, x, strideX )"
base.strided.dnanvariancech.ndarray,"base.strided.dnanvariancech.ndarray( N, correction, x, strideX, offsetX )"
base.strided.dnanvariancepn,"base.strided.dnanvariancepn( N, correction, x, strideX )"
base.strided.dnanvariancepn.ndarray,"base.strided.dnanvariancepn.ndarray( N, correction, x, strideX, offsetX )"
base.strided.dnanvariancetk,"base.strided.dnanvariancetk( N, correction, x, strideX )"
Expand Down
2 changes: 1 addition & 1 deletion lib/node_modules/@stdlib/repl/signature/data/data.json

Large diffs are not rendered by default.

4 changes: 2 additions & 2 deletions lib/node_modules/@stdlib/repl/typed-signature/data/data.csv
Original file line number Diff line number Diff line change
Expand Up @@ -1877,8 +1877,8 @@ base.strided.dnansumpw,"base.strided.dnansumpw( N:integer, x:Float64Array, strid
base.strided.dnansumpw.ndarray,"base.strided.dnansumpw.ndarray( N:integer, x:Float64Array, strideX:integer, offsetX:integer )"
base.strided.dnanvariance,"base.strided.dnanvariance( N:integer, correction:number, x:Float64Array, stride:integer )"
base.strided.dnanvariance.ndarray,"base.strided.dnanvariance.ndarray( N:integer, correction:number, x:Float64Array, stride:integer, offset:integer )"
base.strided.dnanvariancech,"base.strided.dnanvariancech( N:integer, correction:number, x:Float64Array, stride:integer )"
base.strided.dnanvariancech.ndarray,"base.strided.dnanvariancech.ndarray( N:integer, correction:number, x:Float64Array, stride:integer, offset:integer )"
base.strided.dnanvariancech,"base.strided.dnanvariancech( N:integer, correction:number, x:Float64Array, strideX:integer )"
base.strided.dnanvariancech.ndarray,"base.strided.dnanvariancech.ndarray( N:integer, correction:number, x:Float64Array, strideX:integer, offsetX:integer )"
base.strided.dnanvariancepn,"base.strided.dnanvariancepn( N:integer, correction:number, x:Float64Array, strideX:integer )"
base.strided.dnanvariancepn.ndarray,"base.strided.dnanvariancepn.ndarray( N:integer, correction:number, x:Float64Array, strideX:integer, offsetX:integer )"
base.strided.dnanvariancetk,"base.strided.dnanvariancetk( N:integer, correction:number, x:Float64Array, strideX:integer )"
Expand Down

Large diffs are not rendered by default.

Loading