@@ -248,8 +248,6 @@ use std::fmt::{self, Display};
248
248
use std:: fs;
249
249
use std:: io:: { self , Write } ;
250
250
use std:: path:: { Component , Path , PathBuf } ;
251
- #[ cfg( feature = "parallel" ) ]
252
- use std:: process:: Child ;
253
251
use std:: process:: { Command , Stdio } ;
254
252
use std:: sync:: {
255
253
atomic:: { AtomicU8 , Ordering :: Relaxed } ,
@@ -260,8 +258,10 @@ use shlex::Shlex;
260
258
261
259
#[ cfg( feature = "parallel" ) ]
262
260
mod parallel;
261
+
263
262
mod target;
264
263
use self :: target:: * ;
264
+
265
265
/// A helper module to looking for windows-specific tools:
266
266
/// 1. On Windows host, probe the Windows Registry if needed;
267
267
/// 2. On non-Windows host, check specified environment variables.
@@ -1755,144 +1755,17 @@ impl Build {
1755
1755
Ok ( objects. into_iter ( ) . map ( |v| v. dst ) . collect ( ) )
1756
1756
}
1757
1757
1758
- #[ cfg( feature = "parallel" ) ]
1759
1758
fn compile_objects ( & self , objs : & [ Object ] ) -> Result < ( ) , Error > {
1760
- use std:: cell:: Cell ;
1761
-
1762
- use parallel:: async_executor:: { block_on, YieldOnce } ;
1763
-
1764
1759
check_disabled ( ) ?;
1765
1760
1766
- if objs. len ( ) <= 1 {
1767
- return self . compile_objects_sequential ( objs) ;
1768
- }
1769
-
1770
- // Limit our parallelism globally with a jobserver.
1771
- let mut tokens = parallel:: job_token:: ActiveJobTokenServer :: new ( ) ;
1772
-
1773
- // When compiling objects in parallel we do a few dirty tricks to speed
1774
- // things up:
1775
- //
1776
- // * First is that we use the `jobserver` crate to limit the parallelism
1777
- // of this build script. The `jobserver` crate will use a jobserver
1778
- // configured by Cargo for build scripts to ensure that parallelism is
1779
- // coordinated across C compilations and Rust compilations. Before we
1780
- // compile anything we make sure to wait until we acquire a token.
1781
- //
1782
- // Note that this jobserver is cached globally so we only used one per
1783
- // process and only worry about creating it once.
1784
- //
1785
- // * Next we use spawn the process to actually compile objects in
1786
- // parallel after we've acquired a token to perform some work
1787
- //
1788
- // With all that in mind we compile all objects in a loop here, after we
1789
- // acquire the appropriate tokens, Once all objects have been compiled
1790
- // we wait on all the processes and propagate the results of compilation.
1791
-
1792
- let pendings =
1793
- Cell :: new ( Vec :: < ( Command , KillOnDrop , parallel:: job_token:: JobToken ) > :: new ( ) ) ;
1794
- let is_disconnected = Cell :: new ( false ) ;
1795
- let has_made_progress = Cell :: new ( false ) ;
1796
-
1797
- let wait_future = async {
1798
- let mut error = None ;
1799
- // Buffer the stdout
1800
- let mut stdout = io:: BufWriter :: with_capacity ( 128 , io:: stdout ( ) ) ;
1801
-
1802
- loop {
1803
- // If the other end of the pipe is already disconnected, then we're not gonna get any new jobs,
1804
- // so it doesn't make sense to reuse the tokens; in fact,
1805
- // releasing them as soon as possible (once we know that the other end is disconnected) is beneficial.
1806
- // Imagine that the last file built takes an hour to finish; in this scenario,
1807
- // by not releasing the tokens before that last file is done we would effectively block other processes from
1808
- // starting sooner - even though we only need one token for that last file, not N others that were acquired.
1809
-
1810
- let mut pendings_is_empty = false ;
1811
-
1812
- cell_update ( & pendings, |mut pendings| {
1813
- // Try waiting on them.
1814
- pendings. retain_mut ( |( cmd, child, _token) | {
1815
- match try_wait_on_child ( cmd, & mut child. 0 , & mut stdout, & mut child. 1 ) {
1816
- Ok ( Some ( ( ) ) ) => {
1817
- // Task done, remove the entry
1818
- has_made_progress. set ( true ) ;
1819
- false
1820
- }
1821
- Ok ( None ) => true , // Task still not finished, keep the entry
1822
- Err ( err) => {
1823
- // Task fail, remove the entry.
1824
- // Since we can only return one error, log the error to make
1825
- // sure users always see all the compilation failures.
1826
- has_made_progress. set ( true ) ;
1827
-
1828
- if self . cargo_output . warnings {
1829
- let _ = writeln ! ( stdout, "cargo:warning={}" , err) ;
1830
- }
1831
- error = Some ( err) ;
1832
-
1833
- false
1834
- }
1835
- }
1836
- } ) ;
1837
- pendings_is_empty = pendings. is_empty ( ) ;
1838
- pendings
1839
- } ) ;
1840
-
1841
- if pendings_is_empty && is_disconnected. get ( ) {
1842
- break if let Some ( err) = error {
1843
- Err ( err)
1844
- } else {
1845
- Ok ( ( ) )
1846
- } ;
1847
- }
1848
-
1849
- YieldOnce :: default ( ) . await ;
1850
- }
1851
- } ;
1852
- let spawn_future = async {
1853
- for obj in objs {
1854
- let mut cmd = self . create_compile_object_cmd ( obj) ?;
1855
- let token = tokens. acquire ( ) . await ?;
1856
- let mut child = spawn ( & mut cmd, & self . cargo_output ) ?;
1857
- let mut stderr_forwarder = StderrForwarder :: new ( & mut child) ;
1858
- stderr_forwarder. set_non_blocking ( ) ?;
1859
-
1860
- cell_update ( & pendings, |mut pendings| {
1861
- pendings. push ( ( cmd, KillOnDrop ( child, stderr_forwarder) , token) ) ;
1862
- pendings
1863
- } ) ;
1864
-
1865
- has_made_progress. set ( true ) ;
1866
- }
1867
- is_disconnected. set ( true ) ;
1868
-
1869
- Ok :: < _ , Error > ( ( ) )
1870
- } ;
1871
-
1872
- return block_on ( wait_future, spawn_future, & has_made_progress) ;
1873
-
1874
- struct KillOnDrop ( Child , StderrForwarder ) ;
1875
-
1876
- impl Drop for KillOnDrop {
1877
- fn drop ( & mut self ) {
1878
- let child = & mut self . 0 ;
1879
-
1880
- child. kill ( ) . ok ( ) ;
1881
- }
1882
- }
1883
-
1884
- fn cell_update < T , F > ( cell : & Cell < T > , f : F )
1885
- where
1886
- T : Default ,
1887
- F : FnOnce ( T ) -> T ,
1888
- {
1889
- let old = cell. take ( ) ;
1890
- let new = f ( old) ;
1891
- cell. set ( new) ;
1761
+ #[ cfg( feature = "parallel" ) ]
1762
+ if objs. len ( ) > 1 {
1763
+ return parallel:: run_commands_in_parallel (
1764
+ & self . cargo_output ,
1765
+ & mut objs. iter ( ) . map ( |obj| self . create_compile_object_cmd ( obj) ) ,
1766
+ ) ;
1892
1767
}
1893
- }
1894
1768
1895
- fn compile_objects_sequential ( & self , objs : & [ Object ] ) -> Result < ( ) , Error > {
1896
1769
for obj in objs {
1897
1770
let mut cmd = self . create_compile_object_cmd ( obj) ?;
1898
1771
run ( & mut cmd, & self . cargo_output ) ?;
@@ -1901,13 +1774,6 @@ impl Build {
1901
1774
Ok ( ( ) )
1902
1775
}
1903
1776
1904
- #[ cfg( not( feature = "parallel" ) ) ]
1905
- fn compile_objects ( & self , objs : & [ Object ] ) -> Result < ( ) , Error > {
1906
- check_disabled ( ) ?;
1907
-
1908
- self . compile_objects_sequential ( objs)
1909
- }
1910
-
1911
1777
fn create_compile_object_cmd ( & self , obj : & Object ) -> Result < Command , Error > {
1912
1778
let asm_ext = AsmFileExt :: from_path ( & obj. src ) ;
1913
1779
let is_asm = asm_ext. is_some ( ) ;
0 commit comments