|
34 | 34 | use codec::{Decode, Encode}; |
35 | 35 | use cumulus_client_collator::service::ServiceInterface as CollatorServiceInterface; |
36 | 36 | use cumulus_client_consensus_common::{ |
| 37 | + self as consensus_common, |
37 | 38 | ParachainBlockImportMarker, ParachainCandidate, ParentSearchParams, |
38 | 39 | }; |
39 | 40 | use cumulus_client_consensus_proposer::ProposerInterface; |
@@ -89,6 +90,7 @@ pub struct Params<BI, CIDP, Client, RClient, SO, Proposer, CS> { |
89 | 90 | pub relay_chain_slot_duration: SlotDuration, |
90 | 91 | pub proposer: Proposer, |
91 | 92 | pub collator_service: CS, |
| 93 | + pub authoring_duration: Duration, |
92 | 94 | } |
93 | 95 |
|
94 | 96 | /// Run async-backing-friendly Aura. |
@@ -149,7 +151,28 @@ pub async fn run<Block, P, BI, CIDP, Client, RClient, SO, Proposer, CS>( |
149 | 151 | let relay_parent = relay_parent_header.hash(); |
150 | 152 |
|
151 | 153 | // TODO [now]: get asynchronous backing parameters from the relay-chain |
152 | | - // runtime. why? |
| 154 | + // runtime. why? for the parent search parameters. |
| 155 | + |
| 156 | + let max_pov_size = match params.relay_client.persisted_validation_data( |
| 157 | + relay_parent, |
| 158 | + params.para_id, |
| 159 | + OccupiedCoreAssumption::Included, |
| 160 | + ).await { |
| 161 | + Ok(None) => continue, |
| 162 | + Ok(Some(pvd)) => pvd.max_pov_size, |
| 163 | + Err(err) => { |
| 164 | + tracing::error!(target: crate::LOG_TARGET, ?err, "Failed to gather information from relay-client"); |
| 165 | + continue; |
| 166 | + } |
| 167 | + }; |
| 168 | + |
| 169 | + let (slot_now, timestamp) = match consensus_common::relay_slot_and_timestamp( |
| 170 | + &relay_parent_header, |
| 171 | + params.relay_chain_slot_duration, |
| 172 | + ) { |
| 173 | + None => continue, |
| 174 | + Some((s, t)) => (Slot::from_timestamp(t, params.slot_duration), t), |
| 175 | + }; |
153 | 176 |
|
154 | 177 | let parent_search_params = ParentSearchParams { |
155 | 178 | relay_parent, |
@@ -183,32 +206,117 @@ pub async fn run<Block, P, BI, CIDP, Client, RClient, SO, Proposer, CS>( |
183 | 206 | Ok(x) => x, |
184 | 207 | }; |
185 | 208 |
|
186 | | - // Sort by depth, descending, to choose the longest chain, and lazily filter |
187 | | - // by those with space. |
188 | | - potential_parents.sort_by(|a, b| b.depth.cmp(&a.depth)); |
189 | | - let potential_parents = potential_parents |
190 | | - .into_iter() |
191 | | - .filter(|p| can_build_upon(p.hash, &*params.para_client)); |
192 | | - |
193 | | - if let Some(parent) = potential_parents.next() { |
194 | | - // TODO [now]: build and announce collations recursively until |
195 | | - // `can_build_upon` fails. |
196 | | - unimplemented!() |
| 209 | + let included_block = match potential_parents.iter().find(|x| x.depth == 0) { |
| 210 | + None => continue, // also serves as an `is_empty` check. |
| 211 | + Some(b) => b.hash, |
| 212 | + }; |
| 213 | + |
| 214 | + let para_client = &*params.para_client; |
| 215 | + let keystore = ¶ms.keystore; |
| 216 | + let can_build_upon = |block_hash| can_build_upon( |
| 217 | + slot_now, |
| 218 | + timestamp, |
| 219 | + block_hash, |
| 220 | + included_block, |
| 221 | + ¶_client, |
| 222 | + &keystore, |
| 223 | + ); |
| 224 | + |
| 225 | + // Sort by depth, ascending, to choose the longest chain. |
| 226 | + // |
| 227 | + // If the longest chain has space, build upon that. Otherwise, don't |
| 228 | + // build at all. |
| 229 | + potential_parents.sort_by_key(|a| &a.depth); |
| 230 | + let initial_parent = match potential_parents.pop() { |
| 231 | + None => continue, |
| 232 | + Some(p) => p, |
| 233 | + }; |
| 234 | + |
| 235 | + // Build in a loop until not allowed. Note that the authorities can change |
| 236 | + // at any block, so we need to re-claim our slot every time. |
| 237 | + let mut parent_hash = initial_parent.hash; |
| 238 | + let mut parent_header = initial_parent.header; |
| 239 | + loop { |
| 240 | + let slot_claim = match can_build_upon(parent_hash).await { |
| 241 | + None => break, |
| 242 | + Some(c) => c, |
| 243 | + }; |
| 244 | + |
| 245 | + let persisted_validation_data = PersistedValidationData { |
| 246 | + parent_head: parent_header.encode(), |
| 247 | + relay_parent_number: *relay_parent_header.number(), |
| 248 | + relay_parent_storage_root: *relay_parent_header.state_root(), |
| 249 | + max_pov_size, |
| 250 | + }; |
| 251 | + |
| 252 | + // Build and announce collations recursively until |
| 253 | + // `can_build_upon` fails or building a collation fails. |
| 254 | + let (parachain_inherent_data, other_inherent_data) = match collator.create_inherent_data( |
| 255 | + relay_parent, |
| 256 | + &persisted_validation_data, |
| 257 | + parent_hash, |
| 258 | + slot_claim.timestamp(), |
| 259 | + ).await { |
| 260 | + Err(err) => { |
| 261 | + tracing::error!(target: crate::LOG_TARGET, ?err); |
| 262 | + break; |
| 263 | + }, |
| 264 | + Ok(x) => x, |
| 265 | + }; |
| 266 | + |
| 267 | + let (new_block_hash, new_block_header) = match collator.collate( |
| 268 | + &parent_header, |
| 269 | + &slot_claim, |
| 270 | + None, |
| 271 | + (parachain_inherent_data, other_inherent_data), |
| 272 | + params.authoring_duration, |
| 273 | + // Set the block limit to 50% of the maximum PoV size. |
| 274 | + // |
| 275 | + // TODO: If we got benchmarking that includes the proof size, |
| 276 | + // we should be able to use the maximum pov size. |
| 277 | + (validation_data.max_pov_size / 2) as usize, |
| 278 | + ).await { |
| 279 | + Ok((collation, block_data, new_block_hash)) => { |
| 280 | + parent_hash = new_block_hash; |
| 281 | + parent_header = block_data.header; |
| 282 | + |
| 283 | + // TODO [now]: announce to parachain sub-network |
| 284 | + |
| 285 | + // TODO [link to github issue when i have internet]: |
| 286 | + // announce collation to relay-chain validators. |
| 287 | + } |
| 288 | + Err(err) => { |
| 289 | + tracing::error!(target: crate::LOG_TARGET, ?err); |
| 290 | + break; |
| 291 | + } |
| 292 | + }; |
197 | 293 | } |
198 | 294 | } |
199 | 295 | } |
200 | 296 |
|
201 | | -fn can_build_upon<Block: BlockT, Client>(block_hash: Block::Hash, client: &Client) -> bool |
| 297 | +// Checks if we own the slot at the given block and whether there |
| 298 | +// is space in the unincluded segment. |
| 299 | +async fn can_build_upon<Block: BlockT, Client, P>( |
| 300 | + slot: Slot, |
| 301 | + timestamp: Timestamp, |
| 302 | + block_hash: Block::Hash, |
| 303 | + included_block: Block::Hash, |
| 304 | + client: &Client, |
| 305 | + keystore: &KeystorePtr, |
| 306 | +) -> Option<SlotClaim<P::Public>> |
202 | 307 | where |
203 | 308 | Client: ProvideRuntimeApi<Block>, |
| 309 | + Client::Api: AuraApi<Block, P::Public>, |
| 310 | + P: Pair, |
| 311 | + P::Public: Encode + Decode, |
| 312 | + P::Signature: Encode + Decode, |
204 | 313 | { |
205 | | - // TODO [now]: claim slot, maybe with an authorities cache to avoid |
206 | | - // all validators doing this every new relay-chain block. |
207 | | - // Actually, as long as sessions are based on slot number then they should |
208 | | - // be the same for all... |
209 | | - // That is, blocks with the same relay-parent should have the same session. |
210 | | - // |
| 314 | + let authorities = client.runtime_api().authorities(block_hash).ok()?; |
| 315 | + let author_pub = aura_internal::claim_slot::<P>(slot, &authorities, keystore).await?; |
| 316 | + |
211 | 317 | // TODO [now]: new runtime API, |
212 | | - // AuraUnincludedSegmentApi::has_space(slot) or something like it. |
| 318 | + // AuraUnincludedSegmentApi::has_space(included_block, slot) or something like it. |
213 | 319 | unimplemented!() |
| 320 | + |
| 321 | + Some(SlotClaim::unchecked(author_pub, slot, timestamp)) |
214 | 322 | } |
0 commit comments