|
| 1 | +use crate::utils::{match_def_path, span_help_and_lint}; |
| 2 | +use if_chain::if_chain; |
| 3 | +use rustc::declare_lint_pass; |
| 4 | +use rustc::lint::{LateContext, LateLintPass, LintArray, LintPass}; |
| 5 | +use rustc::ty; |
| 6 | +use rustc_hir::def_id::DefId; |
| 7 | +use rustc_hir::*; |
| 8 | +use rustc_session::declare_tool_lint; |
| 9 | + |
| 10 | +declare_clippy_lint! { |
| 11 | + /// **What it does:** Checks for usage of invalid atomic |
| 12 | + /// ordering in Atomic*::{load, store} calls. |
| 13 | + /// |
| 14 | + /// **Why is this bad?** Using an invalid atomic ordering |
| 15 | + /// will cause a panic at run-time. |
| 16 | + /// |
| 17 | + /// **Known problems:** None. |
| 18 | + /// |
| 19 | + /// **Example:** |
| 20 | + /// ```rust,no_run |
| 21 | + /// # use std::sync::atomic::{AtomicBool, Ordering}; |
| 22 | + /// |
| 23 | + /// let x = AtomicBool::new(true); |
| 24 | + /// |
| 25 | + /// let _ = x.load(Ordering::Release); |
| 26 | + /// let _ = x.load(Ordering::AcqRel); |
| 27 | + /// |
| 28 | + /// x.store(false, Ordering::Acquire); |
| 29 | + /// x.store(false, Ordering::AcqRel); |
| 30 | + /// ``` |
| 31 | + pub INVALID_ATOMIC_ORDERING, |
| 32 | + correctness, |
| 33 | + "usage of invalid atomic ordering in atomic load/store calls" |
| 34 | +} |
| 35 | + |
| 36 | +declare_lint_pass!(AtomicOrdering => [INVALID_ATOMIC_ORDERING]); |
| 37 | + |
| 38 | +const ATOMIC_TYPES: [&str; 12] = [ |
| 39 | + "AtomicBool", |
| 40 | + "AtomicI8", |
| 41 | + "AtomicI16", |
| 42 | + "AtomicI32", |
| 43 | + "AtomicI64", |
| 44 | + "AtomicIsize", |
| 45 | + "AtomicPtr", |
| 46 | + "AtomicU8", |
| 47 | + "AtomicU16", |
| 48 | + "AtomicU32", |
| 49 | + "AtomicU64", |
| 50 | + "AtomicUsize", |
| 51 | +]; |
| 52 | + |
| 53 | +fn type_is_atomic(cx: &LateContext<'_, '_>, expr: &Expr<'_>) -> bool { |
| 54 | + if let ty::Adt(&ty::AdtDef { did, .. }, _) = cx.tables.expr_ty(expr).kind { |
| 55 | + ATOMIC_TYPES |
| 56 | + .iter() |
| 57 | + .any(|ty| match_def_path(cx, did, &["core", "sync", "atomic", ty])) |
| 58 | + } else { |
| 59 | + false |
| 60 | + } |
| 61 | +} |
| 62 | + |
| 63 | +fn match_ordering_def_path(cx: &LateContext<'_, '_>, did: DefId, orderings: &[&str]) -> bool { |
| 64 | + orderings |
| 65 | + .iter() |
| 66 | + .any(|ordering| match_def_path(cx, did, &["core", "sync", "atomic", "Ordering", ordering])) |
| 67 | +} |
| 68 | + |
| 69 | +impl<'a, 'tcx> LateLintPass<'a, 'tcx> for AtomicOrdering { |
| 70 | + fn check_expr(&mut self, cx: &LateContext<'a, 'tcx>, expr: &'tcx Expr<'_>) { |
| 71 | + if_chain! { |
| 72 | + if let ExprKind::MethodCall(ref method_path, _, args) = &expr.kind; |
| 73 | + let method = method_path.ident.name.as_str(); |
| 74 | + if type_is_atomic(cx, &args[0]); |
| 75 | + if method == "load" || method == "store"; |
| 76 | + let ordering_arg = if method == "load" { &args[1] } else { &args[2] }; |
| 77 | + if let ExprKind::Path(ref ordering_qpath) = ordering_arg.kind; |
| 78 | + if let Some(ordering_def_id) = cx.tables.qpath_res(ordering_qpath, ordering_arg.hir_id).opt_def_id(); |
| 79 | + then { |
| 80 | + if method == "load" && |
| 81 | + match_ordering_def_path(cx, ordering_def_id, &["Release", "AcqRel"]) { |
| 82 | + span_help_and_lint( |
| 83 | + cx, |
| 84 | + INVALID_ATOMIC_ORDERING, |
| 85 | + ordering_arg.span, |
| 86 | + "atomic loads cannot have `Release` and `AcqRel` ordering", |
| 87 | + "consider using ordering modes `Acquire`, `SeqCst` or `Relaxed`" |
| 88 | + ); |
| 89 | + } else if method == "store" && |
| 90 | + match_ordering_def_path(cx, ordering_def_id, &["Acquire", "AcqRel"]) { |
| 91 | + span_help_and_lint( |
| 92 | + cx, |
| 93 | + INVALID_ATOMIC_ORDERING, |
| 94 | + ordering_arg.span, |
| 95 | + "atomic stores cannot have `Acquire` and `AcqRel` ordering", |
| 96 | + "consider using ordering modes `Release`, `SeqCst` or `Relaxed`" |
| 97 | + ); |
| 98 | + } |
| 99 | + } |
| 100 | + } |
| 101 | + } |
| 102 | +} |
0 commit comments