119
119
//! This is equivalent to the prefix sum approach described above but a little clearer to
120
120
//! understand however slower to calculate.
121
121
use crate :: util:: parse:: * ;
122
+ use std:: sync:: atomic:: { AtomicU64 , Ordering } ;
123
+ use std:: thread;
122
124
123
- type Input < ' a > = Vec < ( & ' a [ u8 ] , Vec < usize > ) > ;
125
+ type Spring < ' a > = ( & ' a [ u8 ] , Vec < usize > ) ;
124
126
125
- pub fn parse ( input : & str ) -> Input < ' _ > {
127
+ pub fn parse ( input : & str ) -> Vec < Spring < ' _ > > {
126
128
input
127
129
. lines ( )
128
130
. map ( |line| {
@@ -134,15 +136,29 @@ pub fn parse(input: &str) -> Input<'_> {
134
136
. collect ( )
135
137
}
136
138
137
- pub fn part1 ( input : & Input < ' _ > ) -> u64 {
139
+ pub fn part1 ( input : & [ Spring < ' _ > ] ) -> u64 {
138
140
solve ( input, 1 )
139
141
}
140
142
141
- pub fn part2 ( input : & Input < ' _ > ) -> u64 {
142
- solve ( input, 5 )
143
+ pub fn part2 ( input : & [ Spring < ' _ > ] ) -> u64 {
144
+ // Break the work into roughly equally size batches.
145
+ let threads = thread:: available_parallelism ( ) . unwrap ( ) . get ( ) ;
146
+ let size = input. len ( ) . div_ceil ( threads) ;
147
+ let batches: Vec < _ > = input. chunks ( size) . collect ( ) ;
148
+
149
+ // Use as many cores as possible to parallelize the calculation.
150
+ let shared = AtomicU64 :: new ( 0 ) ;
151
+
152
+ thread:: scope ( |scope| {
153
+ for batch in batches {
154
+ scope. spawn ( || shared. fetch_add ( solve ( batch, 5 ) , Ordering :: Relaxed ) ) ;
155
+ }
156
+ } ) ;
157
+
158
+ shared. load ( Ordering :: Relaxed )
143
159
}
144
160
145
- pub fn solve ( input : & Input < ' _ > , repeat : usize ) -> u64 {
161
+ pub fn solve ( input : & [ Spring < ' _ > ] , repeat : usize ) -> u64 {
146
162
let mut result = 0 ;
147
163
let mut pattern = Vec :: new ( ) ;
148
164
let mut springs = Vec :: new ( ) ;
0 commit comments