diff --git a/Cargo.toml b/Cargo.toml index 3f373fdace9cbf..4bd06dbff91963 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -49,3 +49,34 @@ overflow-checks = true debug = true # Use ThinLTO. Much smaller output for a small amount of build time increase. lto = "thin" + +[workspace.lints.clippy] +correctness = { level = "forbid", priority = 1 } +suspicious = { level = "forbid", priority = 1 } +perf = { level = "forbid", priority = 1 } + +complexity = { level = "deny", priority = -1 } + +style = { level = "allow", priority = -1 } +pedantic = { level = "allow", priority = -1 } +restriction = { level = "allow", priority = -1 } + +too_many_arguments = "allow" +identity_op = "allow" + +ptr_arg = "forbid" +ref_option = "forbid" +inefficient_to_string = "forbid" +from_over_into = "forbid" +unwrap_or_default = "forbid" +redundant_static_lifetimes = "forbid" +match_like_matches_macro = "forbid" +field_reassign_with_default = "forbid" +inherent_to_string = "forbid" +owned_cow = "forbid" +redundant_clone = "forbid" +str_to_string = "forbid" +single_char_pattern = "forbid" +single_char_add_str = "forbid" +unnecessary_owned_empty_strings = "forbid" +manual_string_new = "forbid" diff --git a/yjit/Cargo.toml b/yjit/Cargo.toml index ad7dd35ecfb173..7fd8c44a6bc45b 100644 --- a/yjit/Cargo.toml +++ b/yjit/Cargo.toml @@ -24,3 +24,6 @@ disasm = ["capstone"] # from cfg!(debug_assertions) so that we can see disasm of the code # that would run in the release mode. runtime_checks = [] + +[lints] +workspace = true diff --git a/yjit/src/asm/arm64/opnd.rs b/yjit/src/asm/arm64/opnd.rs index 108824e08dee65..b992ebfe97e6d4 100644 --- a/yjit/src/asm/arm64/opnd.rs +++ b/yjit/src/asm/arm64/opnd.rs @@ -79,10 +79,7 @@ impl A64Opnd { /// Convenience function to check if this operand is a register. pub fn is_reg(&self) -> bool { - match self { - A64Opnd::Reg(_) => true, - _ => false - } + matches!(self, A64Opnd::Reg(_)) } /// Unwrap a register from an operand. diff --git a/yjit/src/asm/mod.rs b/yjit/src/asm/mod.rs index ebdc205da96518..54445b4d880b64 100644 --- a/yjit/src/asm/mod.rs +++ b/yjit/src/asm/mod.rs @@ -224,16 +224,16 @@ impl CodeBlock { } /// Free the memory pages of given code page indexes - fn free_pages(&mut self, page_idxs: &Vec) { - let mut page_idxs = page_idxs.clone(); - page_idxs.reverse(); // to loop with pop() + /// In Rust >= 1.77 this could probably be simplified with chunk_by. + fn free_pages(&mut self, page_idxs: &[usize]) { + let mut page_idxs = page_idxs.iter().rev().collect::>(); // Group adjacent page indexes and free them in batches to reduce the # of syscalls. - while let Some(page_idx) = page_idxs.pop() { + while let Some(&page_idx) = page_idxs.pop() { // Group first adjacent page indexes let mut batch_idxs = vec![page_idx]; - while page_idxs.last() == Some(&(batch_idxs.last().unwrap() + 1)) { - batch_idxs.push(page_idxs.pop().unwrap()); + while page_idxs.last() == Some(&&(*batch_idxs.last().unwrap() + 1)) { + batch_idxs.push(*page_idxs.pop().unwrap()); } // Free the grouped pages at once @@ -378,7 +378,7 @@ impl CodeBlock { // Unless this comment is the same as the last one at this same line, add it. if this_line_comments.last().map(String::as_str) != Some(comment) { - this_line_comments.push(comment.to_string()); + this_line_comments.push(comment.into()); } } @@ -441,12 +441,12 @@ impl CodeBlock { // Ignore empty code ranges if start_addr == end_addr { - return (0..0).into_iter(); + return 0..0; } let start_page = (start_addr.raw_addr(self) - mem_start) / self.page_size; let end_page = (end_addr.raw_addr(self) - mem_start - 1) / self.page_size; - (start_page..end_page + 1).into_iter() + start_page..end_page + 1 } /// Get a (possibly dangling) direct pointer to the current write position diff --git a/yjit/src/asm/x86_64/mod.rs b/yjit/src/asm/x86_64/mod.rs index 0ef5e92117377e..7541472afa3b53 100644 --- a/yjit/src/asm/x86_64/mod.rs +++ b/yjit/src/asm/x86_64/mod.rs @@ -163,10 +163,7 @@ impl X86Opnd { } pub fn is_some(&self) -> bool { - match self { - X86Opnd::None => false, - _ => true - } + !matches!(self, X86Opnd::None) } } diff --git a/yjit/src/asm/x86_64/tests.rs b/yjit/src/asm/x86_64/tests.rs index eefcbfd52e130b..a800e9a586d9d2 100644 --- a/yjit/src/asm/x86_64/tests.rs +++ b/yjit/src/asm/x86_64/tests.rs @@ -454,7 +454,7 @@ fn block_comments() { let third_write_ptr = cb.get_write_ptr().raw_addr(&cb); cb.add_comment("Ten bytes in"); - assert_eq!(&vec!( "Beginning".to_string() ), cb.comments_at(first_write_ptr).unwrap()); - assert_eq!(&vec!( "Two bytes in".to_string(), "Still two bytes in".to_string() ), cb.comments_at(second_write_ptr).unwrap()); - assert_eq!(&vec!( "Ten bytes in".to_string() ), cb.comments_at(third_write_ptr).unwrap()); + assert_eq!(&vec!( "Beginning".to_owned() ), cb.comments_at(first_write_ptr).unwrap()); + assert_eq!(&vec!( "Two bytes in".to_owned(), "Still two bytes in".to_owned() ), cb.comments_at(second_write_ptr).unwrap()); + assert_eq!(&vec!( "Ten bytes in".to_owned() ), cb.comments_at(third_write_ptr).unwrap()); } diff --git a/yjit/src/backend/arm64/mod.rs b/yjit/src/backend/arm64/mod.rs index 66e333f867d451..9ea219c8c09ee8 100644 --- a/yjit/src/backend/arm64/mod.rs +++ b/yjit/src/backend/arm64/mod.rs @@ -1243,7 +1243,7 @@ impl Assembler emit_cmp_zero_jump(cb, opnd.into(), false, compile_side_exit(*target, self, ocb)?); }, Insn::IncrCounter { mem, value } => { - let label = cb.new_label("incr_counter_loop".to_string()); + let label = cb.new_label("incr_counter_loop".into()); cb.write_label(label); ldaxr(cb, Self::SCRATCH0, mem.into()); diff --git a/yjit/src/backend/ir.rs b/yjit/src/backend/ir.rs index 1df151433a8025..06c067b1f00be5 100644 --- a/yjit/src/backend/ir.rs +++ b/yjit/src/backend/ir.rs @@ -186,7 +186,7 @@ impl Opnd /// Maps the indices from a previous list of instructions to a new list of /// instructions. - pub fn map_index(self, indices: &Vec) -> Opnd { + pub fn map_index(self, indices: &[usize]) -> Opnd { match self { Opnd::InsnOut { idx, num_bits } => { Opnd::InsnOut { idx: indices[idx], num_bits } @@ -1186,7 +1186,7 @@ impl Assembler assert!(!name.contains(' '), "use underscores in label names, not spaces"); let label_idx = self.label_names.len(); - self.label_names.push(name.to_string()); + self.label_names.push(name.into()); Target::Label(label_idx) } @@ -1266,11 +1266,11 @@ impl Assembler /// Spill all live registers to the stack pub fn spill_regs(&mut self) { - self.spill_regs_except(&vec![]); + self.spill_regs_except(&[]); } /// Spill all live registers except `ignored_temps` to the stack - pub fn spill_regs_except(&mut self, ignored_temps: &Vec) { + pub fn spill_regs_except(&mut self, ignored_temps: &[RegOpnd]) { // Forget registers above the stack top let mut reg_mapping = self.ctx.get_reg_mapping(); for stack_idx in self.ctx.get_stack_size()..MAX_CTX_TEMPS as u8 { @@ -1348,17 +1348,17 @@ impl Assembler // Shuffle register moves, sometimes adding extra moves using SCRATCH_REG, // so that they will not rewrite each other before they are used. - pub fn reorder_reg_moves(old_moves: &Vec<(Reg, Opnd)>) -> Vec<(Reg, Opnd)> { + pub fn reorder_reg_moves(old_moves: &[(Reg, Opnd)]) -> Vec<(Reg, Opnd)> { // Return the index of a move whose destination is not used as a source if any. - fn find_safe_move(moves: &Vec<(Reg, Opnd)>) -> Option { + fn find_safe_move(moves: &[(Reg, Opnd)]) -> Option { moves.iter().enumerate().find(|(_, &(dest_reg, _))| { moves.iter().all(|&(_, src_opnd)| src_opnd != Opnd::Reg(dest_reg)) }).map(|(index, _)| index) } // Remove moves whose source and destination are the same - let mut old_moves: Vec<(Reg, Opnd)> = old_moves.clone().into_iter() - .filter(|&(reg, opnd)| Opnd::Reg(reg) != opnd).collect(); + let mut old_moves: Vec<(Reg, Opnd)> = old_moves.into_iter() + .filter(|&&(reg, opnd)| Opnd::Reg(reg) != opnd).copied().collect(); let mut new_moves = vec![]; while old_moves.len() > 0 { @@ -1385,6 +1385,7 @@ impl Assembler /// Sets the out field on the various instructions that require allocated /// registers because their output is used as the operand on a subsequent /// instruction. This is our implementation of the linear scan algorithm. + #[allow(clippy::useless_conversion)] pub(super) fn alloc_regs(mut self, regs: Vec) -> Assembler { //dbg!(&self); @@ -1394,7 +1395,7 @@ impl Assembler // Mutate the pool bitmap to indicate that the register at that index // has been allocated and is live. - fn alloc_reg(pool: &mut u32, regs: &Vec) -> Option { + fn alloc_reg(pool: &mut u32, regs: &[Reg]) -> Option { for (index, reg) in regs.iter().enumerate() { if (*pool & (1 << index)) == 0 { *pool |= 1 << index; @@ -1405,7 +1406,7 @@ impl Assembler } // Allocate a specific register - fn take_reg(pool: &mut u32, regs: &Vec, reg: &Reg) -> Reg { + fn take_reg(pool: &mut u32, regs: &[Reg], reg: &Reg) -> Reg { let reg_index = regs.iter().position(|elem| elem.reg_no == reg.reg_no); if let Some(reg_index) = reg_index { @@ -1419,7 +1420,7 @@ impl Assembler // Mutate the pool bitmap to indicate that the given register is being // returned as it is no longer used by the instruction that previously // held it. - fn dealloc_reg(pool: &mut u32, regs: &Vec, reg: &Reg) { + fn dealloc_reg(pool: &mut u32, regs: &[Reg], reg: &Reg) { let reg_index = regs.iter().position(|elem| elem.reg_no == reg.reg_no); if let Some(reg_index) = reg_index { @@ -1602,7 +1603,7 @@ impl Assembler if c_args.len() > 0 { // Resolve C argument dependencies let c_args_len = c_args.len() as isize; - let moves = Self::reorder_reg_moves(&c_args.drain(..).into_iter().collect()); + let moves = Self::reorder_reg_moves(&c_args.drain(..).into_iter().collect::>()); shift_live_ranges(&mut shifted_live_ranges, asm.insns.len(), moves.len() as isize - c_args_len); // Push batched C arguments @@ -1751,7 +1752,7 @@ impl Assembler { } pub fn bake_string(&mut self, text: &str) { - self.push_insn(Insn::BakeString(text.to_string())); + self.push_insn(Insn::BakeString(text.into())); } #[allow(dead_code)] @@ -1791,7 +1792,7 @@ impl Assembler { } /// Let vm_check_canary() assert the leafness of this ccall if leaf_ccall is set - fn set_stack_canary(&mut self, opnds: &Vec) -> Option { + fn set_stack_canary(&mut self, opnds: &[Opnd]) -> Option { // Use the slot right above the stack top for verifying leafness. let canary_opnd = self.stack_opnd(-1); diff --git a/yjit/src/codegen.rs b/yjit/src/codegen.rs index b8b15adc8b44a8..ad5fae13fd40a3 100644 --- a/yjit/src/codegen.rs +++ b/yjit/src/codegen.rs @@ -43,6 +43,7 @@ type InsnGenFn = fn( /// Ephemeral code generation state. /// Represents a [crate::core::Block] while we build it. +#[allow(clippy::type_complexity)] pub struct JITState<'a> { /// Instruction sequence for the compiling block pub iseq: IseqPtr, @@ -403,7 +404,7 @@ impl<'a> JITState<'a> { if !self.perf_stack.is_empty() { self.perf_symbol_range_end(asm); } - self.perf_stack.push(symbol_name.to_string()); + self.perf_stack.push(symbol_name.into()); self.perf_symbol_range_start(asm, symbol_name); } @@ -453,12 +454,9 @@ impl<'a> JITState<'a> { /// Return true if we're compiling a send-like instruction, not an opt_* instruction. pub fn is_sendish(&self) -> bool { - match unsafe { rb_iseq_opcode_at_pc(self.iseq, self.pc) } as u32 { - YARVINSN_send | + matches!(unsafe { rb_iseq_opcode_at_pc(self.iseq, self.pc) } as u32, YARVINSN_send | YARVINSN_opt_send_without_block | - YARVINSN_invokesuper => true, - _ => false, - } + YARVINSN_invokesuper) } /// Return the number of locals in the current ISEQ @@ -1178,14 +1176,14 @@ pub fn gen_entry_reg_mapping(asm: &mut Assembler, blockid: BlockId, stack_size: // Find an existing callee block. If it's not found or uses no register, skip loading registers. let mut ctx = Context::default(); ctx.set_stack_size(stack_size); - let reg_mapping = find_most_compatible_reg_mapping(blockid, &ctx).unwrap_or(RegMapping::default()); + let reg_mapping = find_most_compatible_reg_mapping(blockid, &ctx).unwrap_or_default(); if reg_mapping == RegMapping::default() { return reg_mapping; } // If found, load the same registers to reuse the block. asm_comment!(asm, "reuse maps: {:?}", reg_mapping); - let local_table_size: u32 = unsafe { get_iseq_body_local_table_size(blockid.iseq) }.try_into().unwrap(); + let local_table_size: u32 = unsafe { get_iseq_body_local_table_size(blockid.iseq) }; for ®_opnd in reg_mapping.get_reg_opnds().iter() { match reg_opnd { RegOpnd::Local(local_idx) => { @@ -1299,7 +1297,7 @@ pub fn gen_single_block( #[cfg(feature = "disasm")] if get_option_ref!(dump_disasm).is_some() { let blockid_idx = blockid.idx; - let chain_depth = if asm.ctx.get_chain_depth() > 0 { format!("(chain_depth: {})", asm.ctx.get_chain_depth()) } else { "".to_string() }; + let chain_depth = if asm.ctx.get_chain_depth() > 0 { format!("(chain_depth: {})", asm.ctx.get_chain_depth()) } else { String::new() }; asm_comment!(asm, "Block: {} {}", iseq_get_location(blockid.iseq, blockid_idx), chain_depth); asm_comment!(asm, "reg_mapping: {:?}", asm.ctx.get_reg_mapping()); } @@ -7708,7 +7706,7 @@ fn gen_send_iseq( // runtime guards later in copy_splat_args_for_rest_callee() if !iseq_has_rest { let supplying = argc - 1 - i32::from(kw_splat) + array_length as i32; - if (required_num..=required_num + opt_num).contains(&supplying) == false { + if !(required_num..=required_num + opt_num).contains(&supplying) { gen_counter_incr(jit, asm, Counter::send_iseq_splat_arity_error); return None; } @@ -9546,7 +9544,7 @@ fn get_class_name(class: Option) -> String { unsafe { RB_TYPE_P(class, RUBY_T_MODULE) || RB_TYPE_P(class, RUBY_T_CLASS) } }).and_then(|class| unsafe { cstr_to_rust_string(rb_class2name(class)) - }).unwrap_or_else(|| "Unknown".to_string()) + }).unwrap_or_else(|| "Unknown".into()) } /// Assemble "{class_name}#{method_name}" from a class pointer and a method ID @@ -9556,7 +9554,7 @@ fn get_method_name(class: Option, mid: u64) -> String { unsafe { cstr_to_rust_string(rb_id2name(mid)) } } else { None - }.unwrap_or_else(|| "Unknown".to_string()); + }.unwrap_or_else(|| "Unknown".into()); format!("{}#{}", class_name, method_name) } @@ -9564,7 +9562,7 @@ fn get_method_name(class: Option, mid: u64) -> String { fn get_iseq_name(iseq: IseqPtr) -> String { let c_string = unsafe { rb_yjit_iseq_inspect(iseq) }; let string = unsafe { CStr::from_ptr(c_string) }.to_str() - .unwrap_or_else(|_| "not UTF-8").to_string(); + .unwrap_or_else(|_| "not UTF-8").into(); unsafe { ruby_xfree(c_string as *mut c_void); } string } diff --git a/yjit/src/core.rs b/yjit/src/core.rs index d42726bcc77691..1031bce0c21dab 100644 --- a/yjit/src/core.rs +++ b/yjit/src/core.rs @@ -121,24 +121,12 @@ impl Type { /// Check if the type is an immediate pub fn is_imm(&self) -> bool { - match self { - Type::UnknownImm => true, - Type::Nil => true, - Type::True => true, - Type::False => true, - Type::Fixnum => true, - Type::Flonum => true, - Type::ImmSymbol => true, - _ => false, - } + matches!(self, Type::UnknownImm | Type::Nil | Type::True | Type::False | Type::Fixnum | Type::Flonum | Type::ImmSymbol) } /// Returns true when the type is not specific. pub fn is_unknown(&self) -> bool { - match self { - Type::Unknown | Type::UnknownImm | Type::UnknownHeap => true, - _ => false, - } + matches!(self, Type::Unknown | Type::UnknownImm | Type::UnknownHeap) } /// Returns true when we know the VALUE is a specific handle type, @@ -150,17 +138,7 @@ impl Type { /// Check if the type is a heap object pub fn is_heap(&self) -> bool { - match self { - Type::UnknownHeap => true, - Type::TArray => true, - Type::CArray => true, - Type::THash => true, - Type::CHash => true, - Type::TString => true, - Type::CString => true, - Type::BlockParamProxy => true, - _ => false, - } + matches!(self, Type::UnknownHeap | Type::TArray | Type::CArray | Type::THash | Type::CHash | Type::TString | Type::CString | Type::BlockParamProxy) } /// Check if it's a T_ARRAY object (both TArray and CArray are T_ARRAY) @@ -797,8 +775,10 @@ mod bitvector_tests { let ctx0 = Context::default(); let idx0 = ctx0.encode_into(&mut bits); - let mut ctx1 = Context::default(); - ctx1.reg_mapping = RegMapping([Some(RegOpnd::Stack(0)), None, None, None, None]); + let ctx1 = Context { + reg_mapping: RegMapping([Some(RegOpnd::Stack(0)), None, None, None, None]), + ..Default::default() + }; let idx1 = ctx1.encode_into(&mut bits); // Make sure that we can encode two contexts successively @@ -811,8 +791,10 @@ mod bitvector_tests { #[test] fn regress_reg_mapping() { let mut bits = BitVector::new(); - let mut ctx = Context::default(); - ctx.reg_mapping = RegMapping([Some(RegOpnd::Stack(0)), None, None, None, None]); + let ctx = Context { + reg_mapping: RegMapping([Some(RegOpnd::Stack(0)), None, None, None, None]), + ..Default::default() + }; ctx.encode_into(&mut bits); let b0 = bits.read_u1(&mut 0); @@ -1099,7 +1081,7 @@ impl Context { MapToLocal(local_idx) => { bits.push_op(CtxOp::MapTempLocal); bits.push_u3(stack_idx as u8); - bits.push_u3(local_idx as u8); + bits.push_u3(local_idx); } MapToSelf => { @@ -1170,19 +1152,19 @@ impl Context { match op { CtxOp::SetSelfType => { - ctx.self_type = unsafe { transmute(bits.read_u4(&mut idx)) }; + ctx.self_type = unsafe { transmute::(bits.read_u4(&mut idx)) }; } CtxOp::SetLocalType => { let local_idx = bits.read_u3(&mut idx) as usize; - let t = unsafe { transmute(bits.read_u4(&mut idx)) }; + let t = unsafe { transmute::(bits.read_u4(&mut idx)) }; ctx.set_local_type(local_idx, t); } // Map temp to stack (known type) CtxOp::SetTempType => { let temp_idx = bits.read_u3(&mut idx) as usize; - let temp_type = unsafe { transmute(bits.read_u4(&mut idx)) }; + let temp_type = unsafe { transmute::(bits.read_u4(&mut idx)) }; ctx.set_temp_mapping(temp_idx, TempMapping::MapToStack(temp_type)); } @@ -2294,7 +2276,7 @@ pub fn limit_block_versions(blockid: BlockId, ctx: &Context) -> Context { let generic_ctx = ctx.get_generic_ctx(); if cfg!(debug_assertions) { - let mut ctx = ctx.clone(); + let mut ctx = *ctx; if ctx.inline() { // Suppress TypeDiff::Incompatible from ctx.diff(). We return TypeDiff::Incompatible // to keep inlining blocks until we hit the limit, but it's safe to give up inlining. @@ -2510,10 +2492,12 @@ impl Context { /// Create a new Context that is compatible with self but doesn't have type information. pub fn get_generic_ctx(&self) -> Context { - let mut generic_ctx = Context::default(); - generic_ctx.stack_size = self.stack_size; - generic_ctx.sp_offset = self.sp_offset; - generic_ctx.reg_mapping = self.reg_mapping; + let mut generic_ctx = Context { + stack_size: self.stack_size, + sp_offset: self.sp_offset, + reg_mapping: self. reg_mapping, + ..Default::default() + }; if self.is_return_landing() { generic_ctx.set_as_return_landing(); } @@ -2920,7 +2904,7 @@ impl Context { } // Prepare a Context with the same registers - let mut dst_with_same_regs = dst.clone(); + let mut dst_with_same_regs = *dst; dst_with_same_regs.set_reg_mapping(self.get_reg_mapping()); // Diff registers and other stuff separately, and merge them @@ -3233,9 +3217,7 @@ fn gen_entry_point_body(blockid: BlockId, stack_size: u8, ec: EcPtr, jit_excepti let (code_ptr, reg_mapping) = gen_entry_prologue(cb, ocb, blockid, stack_size, jit_exception)?; // Find or compile a block version - let mut ctx = Context::default(); - ctx.stack_size = stack_size; - ctx.reg_mapping = reg_mapping; + let ctx = Context { stack_size, reg_mapping, ..Default::default()}; let block = match find_block_version(blockid, &ctx) { // If an existing block is found, generate a jump to the block. Some(blockref) => { @@ -3359,9 +3341,7 @@ fn entry_stub_hit_body( asm.compile(cb, Some(ocb))?; // Find or compile a block version - let mut ctx = Context::default(); - ctx.stack_size = stack_size; - ctx.reg_mapping = reg_mapping; + let ctx = Context { stack_size, reg_mapping, ..Default::default()}; let blockref = match find_block_version(blockid, &ctx) { // If an existing block is found, generate a jump to the block. Some(blockref) => { @@ -3823,7 +3803,7 @@ pub fn gen_branch_stub_hit_trampoline(ocb: &mut OutlinedCb) -> Option { } /// Return registers to be pushed and popped on branch_stub_hit. -pub fn caller_saved_temp_regs() -> impl Iterator + DoubleEndedIterator { +pub fn caller_saved_temp_regs() -> impl DoubleEndedIterator { let temp_regs = Assembler::get_temp_regs().iter(); let len = temp_regs.len(); // The return value gen_leave() leaves in C_RET_REG diff --git a/yjit/src/cruby.rs b/yjit/src/cruby.rs index f7a08b3b18b537..043b258bb1ef4e 100644 --- a/yjit/src/cruby.rs +++ b/yjit/src/cruby.rs @@ -97,7 +97,7 @@ pub type size_t = u64; pub type RedefinitionFlag = u32; #[allow(dead_code)] -#[allow(clippy::all)] +#[allow(clippy::complexity)] mod autogened { use super::*; // Textually include output from rust-bindgen as suggested by its user guide. @@ -223,7 +223,7 @@ pub fn insn_name(opcode: usize) -> String { let op_name = CStr::from_ptr(op_name).to_str().unwrap(); // Convert into an owned string - op_name.to_string() + op_name.into() } } @@ -619,7 +619,7 @@ pub fn cstr_to_rust_string(c_char_ptr: *const c_char) -> Option { let c_str: &CStr = unsafe { CStr::from_ptr(c_char_ptr) }; match c_str.to_str() { - Ok(rust_str) => Some(rust_str.to_string()), + Ok(rust_str) => Some(rust_str.into()), Err(_) => None } } diff --git a/yjit/src/disasm.rs b/yjit/src/disasm.rs index 4f85937ee9f0b7..13e7013c2acddb 100644 --- a/yjit/src/disasm.rs +++ b/yjit/src/disasm.rs @@ -69,7 +69,7 @@ pub extern "C" fn rb_yjit_disasm_iseq(_ec: EcPtr, _ruby_self: VALUE, iseqw: VALU /// Only call while holding the VM lock. #[cfg(feature = "disasm")] pub fn disasm_iseq_insn_range(iseq: IseqPtr, start_idx: u16, end_idx: u16) -> String { - let mut out = String::from(""); + let mut out = String::new(); // Get a list of block versions generated for this iseq let block_list = get_or_create_iseq_block_list(iseq); @@ -161,7 +161,7 @@ pub fn dump_disasm_addr_range(cb: &CodeBlock, start_addr: CodePtr, end_addr: Cod #[cfg(feature = "disasm")] pub fn disasm_addr_range(cb: &CodeBlock, start_addr: usize, end_addr: usize) -> String { - let mut out = String::from(""); + let mut out = String::new(); // Initialize capstone use capstone::prelude::*; diff --git a/yjit/src/lib.rs b/yjit/src/lib.rs index f3247fbf1aab71..3523240f494ab8 100644 --- a/yjit/src/lib.rs +++ b/yjit/src/lib.rs @@ -1,8 +1,3 @@ -// Clippy disagreements -#![allow(clippy::style)] // We are laid back about style -#![allow(clippy::too_many_arguments)] // :shrug: -#![allow(clippy::identity_op)] // Sometimes we do it for style - // TODO(alan): This lint is right -- the way we use `static mut` is UB happy. We have many globals // and take `&mut` frequently, sometimes with a method that easily allows calling it twice. // diff --git a/yjit/src/options.rs b/yjit/src/options.rs index c87a436091279f..6d9e9fccbc63d5 100644 --- a/yjit/src/options.rs +++ b/yjit/src/options.rs @@ -115,7 +115,7 @@ pub static mut OPTIONS: Options = Options { /// YJIT option descriptions for `ruby --help`. /// Note that --help allows only 80 characters per line, including indentation. 80-character limit --> | -pub const YJIT_OPTIONS: &'static [(&str, &str)] = &[ +pub const YJIT_OPTIONS: &[(&str, &str)] = &[ ("--yjit-mem-size=num", "Soft limit on YJIT memory usage in MiB (default: 128)."), ("--yjit-exec-mem-size=num", "Hard limit on executable memory block in MiB."), ("--yjit-call-threshold=num", "Number of calls to trigger JIT."), @@ -319,7 +319,7 @@ pub fn parse_option(str_ptr: *const std::os::raw::c_char) -> Option<()> { eprintln!("WARNING: the {} option is only available when YJIT is built in dev mode, i.e. ./configure --enable-yjit=dev", opt_name); } - OPTIONS.dump_iseq_disasm = Some(opt_val.to_string()); + OPTIONS.dump_iseq_disasm = Some(opt_val.into()); }, ("no-type-prop", "") => unsafe { OPTIONS.no_type_prop = true }, @@ -346,7 +346,7 @@ pub fn parse_option(str_ptr: *const std::os::raw::c_char) -> Option<()> { let log_file_path = if std::path::Path::new(arg_value).is_dir() { format!("{arg_value}/yjit_{}.log", std::process::id()) } else { - arg_value.to_string() + arg_value.into() }; match File::options().create(true).write(true).truncate(true).open(&log_file_path) { diff --git a/yjit/src/stats.rs b/yjit/src/stats.rs index ba84b7a5494e73..c070180da51a92 100644 --- a/yjit/src/stats.rs +++ b/yjit/src/stats.rs @@ -95,7 +95,7 @@ fn get_method_idx( Some(idx) => *idx, None => { let idx = name_to_idx.len(); - name_to_idx.insert(name.to_string(), idx); + name_to_idx.insert(name.into(), idx); // Resize the call count vector if idx >= call_count.len() { @@ -269,7 +269,7 @@ macro_rules! make_counters { /// The list of counters that are available without --yjit-stats. /// They are incremented only by `incr_counter!` and don't use `gen_counter_incr`. -pub const DEFAULT_COUNTERS: &'static [Counter] = &[ +pub const DEFAULT_COUNTERS: &[Counter] = &[ Counter::code_gc_count, Counter::compiled_iseq_entry, Counter::cold_iseq_entry, diff --git a/yjit/src/utils.rs b/yjit/src/utils.rs index 8c4133546d54c8..c274adfdfee662 100644 --- a/yjit/src/utils.rs +++ b/yjit/src/utils.rs @@ -92,10 +92,7 @@ pub fn ruby_str_to_rust(v: VALUE) -> String { let str_ptr = unsafe { rb_RSTRING_PTR(v) } as *mut u8; let str_len: usize = unsafe { rb_RSTRING_LEN(v) }.try_into().unwrap(); let str_slice: &[u8] = unsafe { slice::from_raw_parts(str_ptr, str_len) }; - match String::from_utf8(str_slice.to_vec()) { - Ok(utf8) => utf8, - Err(_) => String::new(), - } + String::from_utf8(str_slice.to_vec()).unwrap_or_default() } // Location is the file defining the method, colon, method name. @@ -107,17 +104,17 @@ pub fn iseq_get_location(iseq: IseqPtr, pos: u16) -> String { let iseq_lineno = unsafe { rb_iseq_line_no(iseq, pos as usize) }; let mut s = if iseq_label == Qnil { - "None".to_string() + "None".into() } else { ruby_str_to_rust(iseq_label) }; - s.push_str("@"); + s.push('@'); if iseq_path == Qnil { s.push_str("None"); } else { s.push_str(&ruby_str_to_rust(iseq_path)); } - s.push_str(":"); + s.push(':'); s.push_str(&iseq_lineno.to_string()); s } diff --git a/zjit/Cargo.toml b/zjit/Cargo.toml index a1da8e7cc0990a..cd33311a5b9821 100644 --- a/zjit/Cargo.toml +++ b/zjit/Cargo.toml @@ -19,3 +19,6 @@ expect-test = "1.5.1" # Support --yjit-dump-disasm and RubyVM::YJIT.disasm using libcapstone. disasm = ["capstone"] runtime_checks = [] + +[lints] +workspace = true diff --git a/zjit/build.rs b/zjit/build.rs index 6aec5407f62af7..a9e81bb7c1f00a 100644 --- a/zjit/build.rs +++ b/zjit/build.rs @@ -13,7 +13,7 @@ fn main() { // ordered after -lminiruby above. let link_flags = env::var("RUBY_LD_FLAGS").unwrap(); - let mut split_iter = link_flags.split(" "); + let mut split_iter = link_flags.split(' '); while let Some(token) = split_iter.next() { if token == "-framework" { if let Some(framework) = split_iter.next() { diff --git a/zjit/src/asm/arm64/opnd.rs b/zjit/src/asm/arm64/opnd.rs index a77958f7e6eeec..8246bea08e69ee 100644 --- a/zjit/src/asm/arm64/opnd.rs +++ b/zjit/src/asm/arm64/opnd.rs @@ -79,10 +79,7 @@ impl A64Opnd { /// Convenience function to check if this operand is a register. pub fn is_reg(&self) -> bool { - match self { - A64Opnd::Reg(_) => true, - _ => false - } + matches!(self, A64Opnd::Reg(_)) } /// Unwrap a register from an operand. diff --git a/zjit/src/asm/mod.rs b/zjit/src/asm/mod.rs index 9bf11dfc4afdee..81d3b2283bd9b6 100644 --- a/zjit/src/asm/mod.rs +++ b/zjit/src/asm/mod.rs @@ -97,7 +97,7 @@ impl CodeBlock { // Unless this comment is the same as the last one at this same line, add it. if this_line_comments.last().map(String::as_str) != Some(comment) { - this_line_comments.push(comment.to_string()); + this_line_comments.push(comment.into()); } } diff --git a/zjit/src/backend/arm64/mod.rs b/zjit/src/backend/arm64/mod.rs index 0c7e6883c2759c..bac523e751e0fb 100644 --- a/zjit/src/backend/arm64/mod.rs +++ b/zjit/src/backend/arm64/mod.rs @@ -139,50 +139,55 @@ fn emit_load_value(cb: &mut CodeBlock, rd: A64Opnd, value: u64) -> usize { // If the value fits into a single movz // instruction, then we'll use that. movz(cb, rd, A64Opnd::new_uimm(current), 0); - return 1; - } else if u16::try_from(!value).is_ok() { + return 1 + } + + if u16::try_from(!value).is_ok() { // For small negative values, use a single movn movn(cb, rd, A64Opnd::new_uimm(!value), 0); return 1; - } else if BitmaskImmediate::try_from(current).is_ok() { + } + + if BitmaskImmediate::try_from(current).is_ok() { // Otherwise, if the immediate can be encoded // with the special bitmask immediate encoding, // we'll use that. mov(cb, rd, A64Opnd::new_uimm(current)); return 1; - } else { - // Finally we'll fall back to encoding the value - // using movz for the first 16 bits and movk for - // each subsequent set of 16 bits as long we - // they are necessary. - movz(cb, rd, A64Opnd::new_uimm(current & 0xffff), 0); - let mut num_insns = 1; - - // (We're sure this is necessary since we - // checked if it only fit into movz above). + } + + // Finally we'll fall back to encoding the value + // using movz for the first 16 bits and movk for + // each subsequent set of 16 bits as long we + // they are necessary. + movz(cb, rd, A64Opnd::new_uimm(current & 0xffff), 0); + let mut num_insns = 1; + + // (We're sure this is necessary since we + // checked if it only fit into movz above). + current >>= 16; + movk(cb, rd, A64Opnd::new_uimm(current & 0xffff), 16); + num_insns += 1; + + if current > 0xffff { current >>= 16; - movk(cb, rd, A64Opnd::new_uimm(current & 0xffff), 16); + movk(cb, rd, A64Opnd::new_uimm(current & 0xffff), 32); num_insns += 1; + } - if current > 0xffff { - current >>= 16; - movk(cb, rd, A64Opnd::new_uimm(current & 0xffff), 32); - num_insns += 1; - } - - if current > 0xffff { - current >>= 16; - movk(cb, rd, A64Opnd::new_uimm(current & 0xffff), 48); - num_insns += 1; - } - return num_insns; + if current > 0xffff { + current >>= 16; + movk(cb, rd, A64Opnd::new_uimm(current & 0xffff), 48); + num_insns += 1; } + + num_insns } /// List of registers that can be used for register allocation. /// This has the same number of registers for x86_64 and arm64. /// SCRATCH0 and SCRATCH1 are excluded. -pub const ALLOC_REGS: &'static [Reg] = &[ +pub const ALLOC_REGS: &[Reg] = &[ X0_REG, X1_REG, X2_REG, @@ -480,7 +485,7 @@ impl Assembler // which is both the return value and first argument register if !opnds.is_empty() { let mut args: Vec<(Reg, Opnd)> = vec![]; - for (idx, opnd) in opnds.into_iter().enumerate().rev() { + for (idx, opnd) in opnds.iter_mut().enumerate().rev() { // If the value that we're sending is 0, then we can use // the zero register, so in this case we'll just send // a UImm of 0 along as the argument to the move. @@ -1106,8 +1111,8 @@ impl Assembler // be stored is first and the address is second. However in // our IR we have the address first and the register second. match dest_num_bits { - 64 | 32 => stur(cb, src.into(), dest.into()), - 16 => sturh(cb, src.into(), dest.into()), + 64 | 32 => stur(cb, src, dest), + 16 => sturh(cb, src, dest), num_bits => panic!("unexpected dest num_bits: {} (src: {:#?}, dest: {:#?})", num_bits, src, dest), } }, @@ -1306,7 +1311,7 @@ impl Assembler last_patch_pos = Some(cb.get_write_pos()); }, Insn::IncrCounter { mem, value } => { - let label = cb.new_label("incr_counter_loop".to_string()); + let label = cb.new_label("incr_counter_loop".into()); cb.write_label(label); ldaxr(cb, Self::SCRATCH0, mem.into()); @@ -1411,7 +1416,7 @@ impl Assembler /// /// If a, b, and c are all registers. fn merge_three_reg_mov( - live_ranges: &Vec, + live_ranges: &[LiveRange], iterator: &mut std::iter::Peekable>, left: &Opnd, right: &Opnd, @@ -1566,7 +1571,7 @@ mod tests { #[test] fn frame_setup_and_teardown() { - const THREE_REGS: &'static [Opnd] = &[Opnd::Reg(X19_REG), Opnd::Reg(X20_REG), Opnd::Reg(X21_REG)]; + const THREE_REGS: &[Opnd] = &[Opnd::Reg(X19_REG), Opnd::Reg(X20_REG), Opnd::Reg(X21_REG)]; // Test 3 preserved regs (odd), odd slot_count { let (mut asm, mut cb) = setup_asm(); @@ -1607,7 +1612,7 @@ mod tests { // Test 4 preserved regs (even), odd slot_count { - static FOUR_REGS: &'static [Opnd] = &[Opnd::Reg(X19_REG), Opnd::Reg(X20_REG), Opnd::Reg(X21_REG), Opnd::Reg(X22_REG)]; + static FOUR_REGS: &[Opnd] = &[Opnd::Reg(X19_REG), Opnd::Reg(X20_REG), Opnd::Reg(X21_REG), Opnd::Reg(X22_REG)]; let (mut asm, mut cb) = setup_asm(); asm.frame_setup(FOUR_REGS, 3); asm.frame_teardown(FOUR_REGS); diff --git a/zjit/src/backend/lir.rs b/zjit/src/backend/lir.rs index 3263392cf6d371..6a6b8b5976bb64 100644 --- a/zjit/src/backend/lir.rs +++ b/zjit/src/backend/lir.rs @@ -16,7 +16,7 @@ pub use crate::backend::current::{ C_ARG_OPNDS, C_RET_REG, C_RET_OPND, }; -pub static JIT_PRESERVED_REGS: &'static [Opnd] = &[CFP, SP, EC]; +pub static JIT_PRESERVED_REGS: &[Opnd] = &[CFP, SP, EC]; // Memory operand base #[derive(Clone, Copy, PartialEq, Eq, Debug)] @@ -97,8 +97,8 @@ impl Opnd assert!(base_reg.num_bits == 64); Opnd::Mem(Mem { base: MemBase::Reg(base_reg.reg_no), - disp: disp, - num_bits: num_bits, + disp, + num_bits, }) }, @@ -106,8 +106,8 @@ impl Opnd assert!(num_bits <= out_num_bits); Opnd::Mem(Mem { base: MemBase::VReg(idx), - disp: disp, - num_bits: num_bits, + disp, + num_bits, }) }, @@ -1354,7 +1354,7 @@ impl Assembler assert!(!name.contains(' '), "use underscores in label names, not spaces"); let label = Label(self.label_names.len()); - self.label_names.push(name.to_string()); + self.label_names.push(name.into()); Target::Label(label) } @@ -1518,17 +1518,17 @@ impl Assembler // Shuffle register moves, sometimes adding extra moves using SCRATCH_REG, // so that they will not rewrite each other before they are used. - pub fn resolve_parallel_moves(old_moves: &Vec<(Reg, Opnd)>) -> Vec<(Reg, Opnd)> { + pub fn resolve_parallel_moves(old_moves: &[(Reg, Opnd)]) -> Vec<(Reg, Opnd)> { // Return the index of a move whose destination is not used as a source if any. - fn find_safe_move(moves: &Vec<(Reg, Opnd)>) -> Option { + fn find_safe_move(moves: &[(Reg, Opnd)]) -> Option { moves.iter().enumerate().find(|&(_, &(dest_reg, _))| { moves.iter().all(|&(_, src_opnd)| src_opnd != Opnd::Reg(dest_reg)) }).map(|(index, _)| index) } // Remove moves whose source and destination are the same - let mut old_moves: Vec<(Reg, Opnd)> = old_moves.clone().into_iter() - .filter(|&(reg, opnd)| Opnd::Reg(reg) != opnd).collect(); + let mut old_moves: Vec<(Reg, Opnd)> = old_moves.iter() + .filter(|&(reg, opnd)| Opnd::Reg(*reg) != *opnd).copied().collect(); let mut new_moves = vec![]; while !old_moves.is_empty() { @@ -1696,21 +1696,19 @@ impl Assembler // Allocate a new register for this instruction if one is not // already allocated. if out_reg.is_none() { - out_reg = match &insn { - _ => match pool.alloc_reg(vreg_idx.unwrap()) { - Some(reg) => Some(reg), - None => { - if get_option!(debug) { - let mut insns = asm.insns; + out_reg = match pool.alloc_reg(vreg_idx.unwrap()) { + Some(reg) => Some(reg), + None => { + if get_option!(debug) { + let mut insns = asm.insns; + insns.push(insn); + for (_, insn) in iterator.by_ref() { insns.push(insn); - while let Some((_, insn)) = iterator.next() { - insns.push(insn); - } - dump_live_regs(insns, live_ranges, regs.len(), index); } - debug!("Register spill not supported"); - return None; + dump_live_regs(insns, live_ranges, regs.len(), index); } + debug!("Register spill not supported"); + return None; } }; } @@ -1771,7 +1769,7 @@ impl Assembler if is_ccall { // On x86_64, maintain 16-byte stack alignment if cfg!(target_arch = "x86_64") && saved_regs.len() % 2 == 1 { - asm.cpop_into(Opnd::Reg(saved_regs.last().unwrap().0.clone())); + asm.cpop_into(Opnd::Reg(saved_regs.last().unwrap().0)); } // Restore saved registers for &(reg, vreg_idx) in saved_regs.iter().rev() { @@ -1830,7 +1828,7 @@ impl Assembler let side_exit_label = if let Some(label) = label { Target::Label(label) } else { - self.new_label("side_exit".into()) + self.new_label("side_exit") }; self.write_label(side_exit_label.clone()); @@ -1902,7 +1900,7 @@ impl Assembler { } pub fn bake_string(&mut self, text: &str) { - self.push_insn(Insn::BakeString(text.to_string())); + self.push_insn(Insn::BakeString(text.into())); } #[allow(dead_code)] diff --git a/zjit/src/codegen.rs b/zjit/src/codegen.rs index 1d6901bac4a73f..7b9169ba27282d 100644 --- a/zjit/src/codegen.rs +++ b/zjit/src/codegen.rs @@ -185,7 +185,7 @@ fn gen_entry(cb: &mut CodeBlock, iseq: IseqPtr, function: &Function, function_pt gen_entry_params(&mut asm, iseq, function.block(BlockId(0))); // Jump to the first block using a call instruction - asm.ccall(function_ptr.raw_ptr(cb) as *const u8, vec![]); + asm.ccall(function_ptr.raw_ptr(cb), vec![]); // Restore registers for CFP, EC, and SP after use asm_comment!(asm, "return to the interpreter"); @@ -210,6 +210,7 @@ fn gen_entry(cb: &mut CodeBlock, iseq: IseqPtr, function: &Function, function_pt } /// Compile an ISEQ into machine code +#[allow(clippy::type_complexity)] fn gen_iseq(cb: &mut CodeBlock, iseq: IseqPtr) -> Option<(CodePtr, Vec<(Rc, IseqPtr)>)> { // Return an existing pointer if it's already compiled let payload = get_or_create_iseq_payload(iseq); @@ -218,10 +219,7 @@ fn gen_iseq(cb: &mut CodeBlock, iseq: IseqPtr) -> Option<(CodePtr, Vec<(Rc function, - None => return None, - }; + let function = compile_iseq(iseq)?; // Compile the High-level IR let result = gen_function(cb, iseq, &function); @@ -304,6 +302,7 @@ fn gen_function(cb: &mut CodeBlock, iseq: IseqPtr, function: &Function) -> Optio } /// Compile an instruction +#[allow(clippy::unit_arg)] fn gen_insn(cb: &mut CodeBlock, jit: &mut JITState, asm: &mut Assembler, function: &Function, insn_id: InsnId, insn: &Insn) -> Option<()> { // Convert InsnId to lir::Opnd macro_rules! opnd { @@ -341,7 +340,7 @@ fn gen_insn(cb: &mut CodeBlock, jit: &mut JITState, asm: &mut Assembler, functio gen_send_without_block(jit, asm, *cd, &function.frame_state(*state), opnd!(self_val), opnds!(args))?, Insn::SendWithoutBlockDirect { cme, iseq, self_val, args, state, .. } => gen_send_without_block_direct(cb, jit, asm, *cme, *iseq, opnd!(self_val), opnds!(args), &function.frame_state(*state))?, Insn::InvokeBuiltin { bf, args, state, .. } => gen_invokebuiltin(jit, asm, &function.frame_state(*state), bf, opnds!(args))?, - Insn::Return { val } => return Some(gen_return(asm, opnd!(val))?), + Insn::Return { val } => return gen_return(asm, opnd!(val)), Insn::FixnumAdd { left, right, state } => gen_fixnum_add(jit, asm, opnd!(left), opnd!(right), &function.frame_state(*state))?, Insn::FixnumSub { left, right, state } => gen_fixnum_sub(jit, asm, opnd!(left), opnd!(right), &function.frame_state(*state))?, Insn::FixnumMult { left, right, state } => gen_fixnum_mult(jit, asm, opnd!(left), opnd!(right), &function.frame_state(*state))?, @@ -450,7 +449,7 @@ fn gen_defined(jit: &JITState, asm: &mut Assembler, op_type: usize, obj: VALUE, let block_handler = asm.load(Opnd::mem(64, lep, SIZEOF_VALUE_I32 * VM_ENV_DATA_INDEX_SPECVAL)); let pushval = asm.load(pushval.into()); asm.cmp(block_handler, VM_BLOCK_HANDLER_NONE.into()); - Some(asm.csel_e(Qnil.into(), pushval.into())) + Some(asm.csel_e(Qnil.into(), pushval)) } else { Some(Qnil.into()) } @@ -533,7 +532,7 @@ fn gen_invokebuiltin(jit: &JITState, asm: &mut Assembler, state: &FrameState, bf fn gen_patch_point(jit: &mut JITState, asm: &mut Assembler, invariant: &Invariant, state: &FrameState) -> Option<()> { let payload_ptr = get_or_create_iseq_payload_ptr(jit.iseq); let label = asm.new_label("patch_point").unwrap_label(); - let invariant = invariant.clone(); + let invariant = *invariant; // Compile a side exit. Fill nop instructions if the last patch point is too close. asm.patch_point(build_side_exit(jit, state, PatchPoint(invariant), Some(label))?); @@ -1123,7 +1122,7 @@ fn gen_guard_bit_equals(jit: &mut JITState, asm: &mut Assembler, val: lir::Opnd, } /// Generate code that increments a counter in ZJIT stats -fn gen_incr_counter(asm: &mut Assembler, counter: Counter) -> () { +fn gen_incr_counter(asm: &mut Assembler, counter: Counter) { let ptr = counter_ptr(counter); let ptr_reg = asm.load(Opnd::const_ptr(ptr as *const u8)); let counter_opnd = Opnd::mem(64, ptr_reg, 0); diff --git a/zjit/src/cruby.rs b/zjit/src/cruby.rs index 095a2988f81c83..5e99d05893469f 100644 --- a/zjit/src/cruby.rs +++ b/zjit/src/cruby.rs @@ -103,7 +103,7 @@ pub type RedefinitionFlag = u32; #[allow(unsafe_op_in_unsafe_fn)] #[allow(dead_code)] -#[allow(clippy::all)] // warning meant to help with reading; not useful for generated code +#[allow(clippy::complexity)] mod autogened { use super::*; // Textually include output from rust-bindgen as suggested by its user guide. @@ -230,7 +230,7 @@ pub fn insn_name(opcode: usize) -> String { let op_name = CStr::from_ptr(op_name).to_str().unwrap(); // Convert into an owned string - op_name.to_string() + op_name.into() } } @@ -696,23 +696,23 @@ pub fn rust_str_to_sym(str: &str) -> VALUE { /// Produce an owned Rust String from a C char pointer pub fn cstr_to_rust_string(c_char_ptr: *const c_char) -> Option { - assert!(c_char_ptr != std::ptr::null()); + assert!(!c_char_ptr.is_null()); let c_str: &CStr = unsafe { CStr::from_ptr(c_char_ptr) }; match c_str.to_str() { - Ok(rust_str) => Some(rust_str.to_string()), + Ok(rust_str) => Some(rust_str.into()), Err(_) => None } } pub fn iseq_name(iseq: IseqPtr) -> String { if iseq.is_null() { - return "".to_string(); + return "".into(); } let iseq_label = unsafe { rb_iseq_label(iseq) }; if iseq_label == Qnil { - "None".to_string() + "None".into() } else { ruby_str_to_rust_string(iseq_label) } @@ -726,13 +726,13 @@ pub fn iseq_get_location(iseq: IseqPtr, pos: u32) -> String { let iseq_lineno = unsafe { rb_iseq_line_no(iseq, pos as usize) }; let mut s = iseq_name(iseq); - s.push_str("@"); + s.push('@'); if iseq_path == Qnil { s.push_str("None"); } else { s.push_str(&ruby_str_to_rust_string(iseq_path)); } - s.push_str(":"); + s.push(':'); s.push_str(&iseq_lineno.to_string()); s } @@ -745,10 +745,7 @@ fn ruby_str_to_rust_string(v: VALUE) -> String { let str_ptr = unsafe { rb_RSTRING_PTR(v) } as *mut u8; let str_len: usize = unsafe { rb_RSTRING_LEN(v) }.try_into().unwrap(); let str_slice: &[u8] = unsafe { std::slice::from_raw_parts(str_ptr, str_len) }; - match String::from_utf8(str_slice.to_vec()) { - Ok(utf8) => utf8, - Err(_) => String::new(), - } + String::from_utf8(str_slice.to_vec()).unwrap_or_default() } pub fn ruby_sym_to_rust_string(v: VALUE) -> String { @@ -1077,7 +1074,7 @@ pub mod test_utils { /// Remove the minimum indent from every line, skipping the first and last lines if `trim_lines`. pub fn unindent(string: &str, trim_lines: bool) -> String { // Break up a string into multiple lines - let mut lines: Vec = string.split_inclusive("\n").map(|s| s.to_string()).collect(); + let mut lines: Vec = string.split_inclusive('\n').map(|s| s.into()).collect(); if trim_lines { // raw string literals come with extra lines lines.remove(0); lines.remove(lines.len() - 1); @@ -1184,7 +1181,7 @@ pub fn get_class_name(class: VALUE) -> String { None }.and_then(|class| unsafe { cstr_to_rust_string(rb_class2name(class)) - }).unwrap_or_else(|| "Unknown".to_string()) + }).unwrap_or_else(|| "Unknown".into()) } /// Interned ID values for Ruby symbols and method names. diff --git a/zjit/src/cruby_methods.rs b/zjit/src/cruby_methods.rs index c9ebcebc86d81b..f4e4f2e50783d1 100644 --- a/zjit/src/cruby_methods.rs +++ b/zjit/src/cruby_methods.rs @@ -113,7 +113,7 @@ fn annotate_builtin_method(props_map: &mut HashMap<*mut c_void, FnProperties>, c opcode == YARVINSN_opt_invokebuiltin_delegate_leave as i32 { // The first operand is the builtin function pointer let bf_value = *pc.add(1); - let bf_ptr = bf_value.as_ptr() as *const rb_builtin_function; + let bf_ptr = bf_value.as_ptr::(); if func_ptr.is_null() { func_ptr = (*bf_ptr).func_ptr as *mut c_void; diff --git a/zjit/src/disasm.rs b/zjit/src/disasm.rs index 09864ef64994d9..404c493e527f6d 100644 --- a/zjit/src/disasm.rs +++ b/zjit/src/disasm.rs @@ -6,7 +6,7 @@ pub const BOLD_END: &str = "\x1b[22m"; pub fn disasm_addr_range(cb: &CodeBlock, start_addr: usize, end_addr: usize) -> String { use std::fmt::Write; - let mut out = String::from(""); + let mut out = String::new(); // Initialize capstone use capstone::prelude::*; diff --git a/zjit/src/distribution.rs b/zjit/src/distribution.rs index 5927ffa5c944ba..02d44c75434341 100644 --- a/zjit/src/distribution.rs +++ b/zjit/src/distribution.rs @@ -105,7 +105,7 @@ impl Distributi DistributionKind::Megamorphic } }; - Self { kind, buckets: dist.buckets.clone() } + Self { kind, buckets: dist.buckets } } pub fn is_monomorphic(&self) -> bool { diff --git a/zjit/src/hir.rs b/zjit/src/hir.rs index 87d2a613d0e930..80b298392d89ac 100644 --- a/zjit/src/hir.rs +++ b/zjit/src/hir.rs @@ -19,9 +19,9 @@ use crate::profile::{TypeDistributionSummary, ProfiledType}; #[derive(Copy, Clone, Ord, PartialOrd, Eq, PartialEq, Hash, Debug)] pub struct InsnId(pub usize); -impl Into for InsnId { - fn into(self) -> usize { - self.0 +impl From for usize { + fn from(val: InsnId) -> Self { + val.0 } } @@ -35,9 +35,9 @@ impl std::fmt::Display for InsnId { #[derive(Copy, Clone, Eq, PartialEq, Hash, Debug)] pub struct BlockId(pub usize); -impl Into for BlockId { - fn into(self) -> usize { - self.0 +impl From for usize { + fn from(val: BlockId) -> Self { + val.0 } } @@ -573,22 +573,16 @@ pub enum Insn { impl Insn { /// Not every instruction returns a value. Return true if the instruction does and false otherwise. pub fn has_output(&self) -> bool { - match self { - Insn::Jump(_) + !matches!(self, Insn::Jump(_) | Insn::IfTrue { .. } | Insn::IfFalse { .. } | Insn::Return { .. } | Insn::PatchPoint { .. } | Insn::SetIvar { .. } | Insn::ArrayExtend { .. } | Insn::ArrayPush { .. } | Insn::SideExit { .. } | Insn::SetGlobal { .. } - | Insn::SetLocal { .. } | Insn::Throw { .. } | Insn::IncrCounter(_) => false, - _ => true, - } + | Insn::SetLocal { .. } | Insn::Throw { .. } | Insn::IncrCounter(_)) } /// Return true if the instruction ends a basic block and false otherwise. pub fn is_terminator(&self) -> bool { - match self { - Insn::Jump(_) | Insn::Return { .. } | Insn::SideExit { .. } | Insn::Throw { .. } => true, - _ => false, - } + matches!(self, Insn::Jump(_) | Insn::Return { .. } | Insn::SideExit { .. } | Insn::Throw { .. }) } pub fn print<'a>(&self, ptr_map: &'a PtrPrintMap) -> InsnPrinter<'a> { @@ -898,7 +892,7 @@ impl + PartialEq> UnionFind { /// Private. Return the internal representation of the forwarding pointer for a given element. fn at(&self, idx: T) -> Option { - self.forwarded.get(idx.into()).map(|x| *x).flatten() + self.forwarded.get(idx.into()).and_then(|x| *x) } /// Private. Set the internal representation of the forwarding pointer for the given element @@ -963,6 +957,7 @@ pub enum ValidationError { DuplicateInstruction(BlockId, InsnId), } +#[allow(clippy::needless_bool)] fn can_direct_send(iseq: *const rb_iseq_t) -> bool { if unsafe { rb_get_iseq_flags_has_rest(iseq) } { false } else if unsafe { rb_get_iseq_flags_has_opt(iseq) } { false } @@ -1051,8 +1046,7 @@ impl Function { fn new_block(&mut self, insn_idx: u32) -> BlockId { let id = BlockId(self.blocks.len()); - let mut block = Block::default(); - block.insn_idx = insn_idx; + let block = Block { insn_idx, ..Default::default() }; self.blocks.push(block); id } @@ -1137,11 +1131,11 @@ impl Function { &StringIntern { val } => StringIntern { val: find!(val) }, &Test { val } => Test { val: find!(val) }, &IsNil { val } => IsNil { val: find!(val) }, - &Jump(ref target) => Jump(find_branch_edge!(target)), + Jump(target) => Jump(find_branch_edge!(target)), &IfTrue { val, ref target } => IfTrue { val: find!(val), target: find_branch_edge!(target) }, &IfFalse { val, ref target } => IfFalse { val: find!(val), target: find_branch_edge!(target) }, - &GuardType { val, guard_type, state } => GuardType { val: find!(val), guard_type: guard_type, state }, - &GuardBitEquals { val, expected, state } => GuardBitEquals { val: find!(val), expected: expected, state }, + &GuardType { val, guard_type, state } => GuardType { val: find!(val), guard_type, state }, + &GuardBitEquals { val, expected, state } => GuardBitEquals { val: find!(val), expected, state }, &FixnumAdd { left, right, state } => FixnumAdd { left: find!(left), right: find!(right), state }, &FixnumSub { left, right, state } => FixnumSub { left: find!(left), right: find!(right), state }, &FixnumMult { left, right, state } => FixnumMult { left: find!(left), right: find!(right), state }, @@ -1157,7 +1151,7 @@ impl Function { &FixnumOr { left, right } => FixnumOr { left: find!(left), right: find!(right) }, &ObjToString { val, cd, state } => ObjToString { val: find!(val), - cd: cd, + cd, state, }, &AnyToString { val, str, state } => AnyToString { @@ -1167,22 +1161,22 @@ impl Function { }, &SendWithoutBlock { self_val, cd, ref args, state } => SendWithoutBlock { self_val: find!(self_val), - cd: cd, + cd, args: find_vec!(args), state, }, &SendWithoutBlockDirect { self_val, cd, cme, iseq, ref args, state } => SendWithoutBlockDirect { self_val: find!(self_val), - cd: cd, - cme: cme, - iseq: iseq, + cd, + cme, + iseq, args: find_vec!(args), state, }, &Send { self_val, cd, blockiseq, ref args, state } => Send { self_val: find!(self_val), - cd: cd, - blockiseq: blockiseq, + cd, + blockiseq, args: find_vec!(args), state, }, @@ -1391,19 +1385,19 @@ impl Function { } fn likely_is_fixnum(&self, val: InsnId, profiled_type: ProfiledType) -> bool { - return self.is_a(val, types::Fixnum) || profiled_type.is_fixnum(); + self.is_a(val, types::Fixnum) || profiled_type.is_fixnum() } fn coerce_to_fixnum(&mut self, block: BlockId, val: InsnId, state: InsnId) -> InsnId { if self.is_a(val, types::Fixnum) { return val; } - return self.push_insn(block, Insn::GuardType { val, guard_type: types::Fixnum, state }); + self.push_insn(block, Insn::GuardType { val, guard_type: types::Fixnum, state }) } fn arguments_likely_fixnums(&mut self, left: InsnId, right: InsnId, state: InsnId) -> bool { let frame_state = self.frame_state(state); - let iseq_insn_idx = frame_state.insn_idx as usize; - let left_profiled_type = self.profiled_type_of_at(left, iseq_insn_idx).unwrap_or(ProfiledType::empty()); - let right_profiled_type = self.profiled_type_of_at(right, iseq_insn_idx).unwrap_or(ProfiledType::empty()); + let iseq_insn_idx = frame_state.insn_idx; + let left_profiled_type = self.profiled_type_of_at(left, iseq_insn_idx).unwrap_or_default(); + let right_profiled_type = self.profiled_type_of_at(right, iseq_insn_idx).unwrap_or_default(); self.likely_is_fixnum(left, left_profiled_type) && self.likely_is_fixnum(right, right_profiled_type) } @@ -1524,9 +1518,9 @@ impl Function { self.try_rewrite_fixnum_op(block, insn_id, &|left, right| Insn::FixnumAnd { left, right }, BOP_AND, self_val, args[0], state), Insn::SendWithoutBlock { self_val, args, state, cd, .. } if ruby_call_method_id(cd) == ID!(or) && args.len() == 1 => self.try_rewrite_fixnum_op(block, insn_id, &|left, right| Insn::FixnumOr { left, right }, BOP_OR, self_val, args[0], state), - Insn::SendWithoutBlock { self_val, args, state, cd, .. } if ruby_call_method_id(cd) == ID!(freeze) && args.len() == 0 => + Insn::SendWithoutBlock { self_val, args, state, cd, .. } if ruby_call_method_id(cd) == ID!(freeze) && args.is_empty() => self.try_rewrite_freeze(block, insn_id, self_val, state), - Insn::SendWithoutBlock { self_val, args, state, cd, .. } if ruby_call_method_id(cd) == ID!(minusat) && args.len() == 0 => + Insn::SendWithoutBlock { self_val, args, state, cd, .. } if ruby_call_method_id(cd) == ID!(minusat) && args.is_empty() => self.try_rewrite_uminus(block, insn_id, self_val, state), Insn::SendWithoutBlock { self_val, args, state, cd, .. } if ruby_call_method_id(cd) == ID!(aref) && args.len() == 1 => self.try_rewrite_aref(block, insn_id, self_val, args[0], state), @@ -1904,7 +1898,7 @@ impl Function { worklist.push_back(val); worklist.push_back(state); } - &Insn::Snapshot { ref state } => { + Insn::Snapshot { state } => { worklist.extend(&state.stack); worklist.extend(&state.locals); } @@ -1951,7 +1945,7 @@ impl Function { worklist.extend(args); worklist.push_back(state) } - &Insn::CCall { ref args, .. } => worklist.extend(args), + Insn::CCall { args, .. } => worklist.extend(args), &Insn::GetIvar { self_val, state, .. } | &Insn::DefinedIvar { self_val, state, .. } => { worklist.push_back(self_val); worklist.push_back(state); @@ -2008,7 +2002,7 @@ impl Function { } } - fn absorb_dst_block(&mut self, num_in_edges: &Vec, block: BlockId) -> bool { + fn absorb_dst_block(&mut self, num_in_edges: &[u32], block: BlockId) -> bool { let Some(terminator_id) = self.blocks[block.0].insns.last() else { return false }; let Insn::Jump(BranchEdge { target, args }) = self.find(*terminator_id) @@ -2127,8 +2121,8 @@ impl Function { // Dump HIR after optimization match get_option!(dump_hir_opt) { - Some(DumpHIR::WithoutSnapshot) => println!("Optimized HIR:\n{}", FunctionPrinter::without_snapshot(&self)), - Some(DumpHIR::All) => println!("Optimized HIR:\n{}", FunctionPrinter::with_snapshot(&self)), + Some(DumpHIR::WithoutSnapshot) => println!("Optimized HIR:\n{}", FunctionPrinter::without_snapshot(self)), + Some(DumpHIR::All) => println!("Optimized HIR:\n{}", FunctionPrinter::with_snapshot(self)), Some(DumpHIR::Debug) => println!("Optimized HIR:\n{:#?}", &self), None => {}, } @@ -2504,7 +2498,7 @@ impl FrameState { // TODO: Modify the register allocator to allow reusing an argument // of another basic block. let mut args = vec![self_param]; - args.extend(self.locals.iter().chain(self.stack.iter()).map(|op| *op)); + args.extend(self.locals.iter().chain(self.stack.iter()).copied()); args } } @@ -2639,7 +2633,7 @@ impl ProfileOracle { fn profile_stack(&mut self, state: &FrameState) { let iseq_insn_idx = state.insn_idx; let Some(operand_types) = self.payload.profile.get_operand_types(iseq_insn_idx) else { return }; - let entry = self.types.entry(iseq_insn_idx).or_insert_with(|| vec![]); + let entry = self.types.entry(iseq_insn_idx).or_default(); // operand_types is always going to be <= stack size (otherwise it would have an underflow // at run-time) so use that to drive iteration. for (idx, insn_type_distribution) in operand_types.iter().rev().enumerate() { diff --git a/zjit/src/hir_type/hir_type.inc.rs b/zjit/src/hir_type/hir_type.inc.rs index 58508740800da3..2e03fdac96b8fc 100644 --- a/zjit/src/hir_type/hir_type.inc.rs +++ b/zjit/src/hir_type/hir_type.inc.rs @@ -66,7 +66,7 @@ mod bits { pub const Symbol: u64 = DynamicSymbol | StaticSymbol; pub const TrueClass: u64 = 1u64 << 40; pub const Undef: u64 = 1u64 << 41; - pub const AllBitPatterns: [(&'static str, u64); 66] = [ + pub const AllBitPatterns: [(&str, u64); 66] = [ ("Any", Any), ("RubyValue", RubyValue), ("Immediate", Immediate), diff --git a/zjit/src/hir_type/mod.rs b/zjit/src/hir_type/mod.rs index 607ccbde84bfae..fdd106948c1490 100644 --- a/zjit/src/hir_type/mod.rs +++ b/zjit/src/hir_type/mod.rs @@ -638,7 +638,7 @@ mod tests { #[test] fn integer_has_exact_ruby_class() { - assert_eq!(Type::fixnum(3).exact_ruby_class(), Some(unsafe { rb_cInteger }.into())); + assert_eq!(Type::fixnum(3).exact_ruby_class(), Some(unsafe { rb_cInteger })); assert_eq!(types::Fixnum.exact_ruby_class(), None); assert_eq!(types::Integer.exact_ruby_class(), None); } @@ -665,7 +665,7 @@ mod tests { #[test] fn integer_has_ruby_class() { - assert_eq!(Type::fixnum(3).inexact_ruby_class(), Some(unsafe { rb_cInteger }.into())); + assert_eq!(Type::fixnum(3).inexact_ruby_class(), Some(unsafe { rb_cInteger })); assert_eq!(types::Fixnum.inexact_ruby_class(), None); assert_eq!(types::Integer.inexact_ruby_class(), None); } diff --git a/zjit/src/options.rs b/zjit/src/options.rs index 92f56b8916559f..b4f2a45c3709b8 100644 --- a/zjit/src/options.rs +++ b/zjit/src/options.rs @@ -75,7 +75,7 @@ impl Default for Options { /// `ruby --help` descriptions for user-facing options. Do not add options for ZJIT developers. /// Note that --help allows only 80 chars per line, including indentation. 80-char limit --> | -pub const ZJIT_OPTIONS: &'static [(&str, &str)] = &[ +pub const ZJIT_OPTIONS: &[(&str, &str)] = &[ ("--zjit-call-threshold=num", "Number of calls to trigger JIT (default: 2)."), ("--zjit-num-profiles=num", "Number of profiled calls before JIT (default: 1, max: 255)."), ("--zjit-stats", "Enable collecting ZJIT statistics."), @@ -128,7 +128,7 @@ fn parse_jit_list(path_like: &str) -> HashSet { for line in lines.lines() { let trimmed = line.trim(); if !trimmed.is_empty() { - result.insert(trimmed.to_string()); + result.insert(trimmed.into()); } } } else { diff --git a/zjit/src/state.rs b/zjit/src/state.rs index 79be91fd85e5c6..613a0a239a2aa5 100644 --- a/zjit/src/state.rs +++ b/zjit/src/state.rs @@ -78,7 +78,7 @@ impl ZJITState { ); let mem_block = Rc::new(RefCell::new(mem_block)); - CodeBlock::new(mem_block.clone(), get_option!(dump_disasm)) + CodeBlock::new(mem_block, get_option!(dump_disasm)) }; #[cfg(test)] let mut cb = CodeBlock::new_dummy();