Skip to content

Commit 8155de7

Browse files
committed
deny unreachable-pub
1 parent 487778b commit 8155de7

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

54 files changed

+423
-424
lines changed

rayon-core/src/job.rs

Lines changed: 18 additions & 18 deletions
Original file line numberDiff line numberDiff line change
@@ -5,7 +5,7 @@ use std::cell::UnsafeCell;
55
use std::mem;
66
use unwind;
77

8-
pub enum JobResult<T> {
8+
pub(super) enum JobResult<T> {
99
None,
1010
Ok(T),
1111
Panic(Box<Any + Send>),
@@ -16,7 +16,7 @@ pub enum JobResult<T> {
1616
/// arranged in a deque, so that thieves can take from the top of the
1717
/// deque while the main worker manages the bottom of the deque. This
1818
/// deque is managed by the `thread_pool` module.
19-
pub trait Job {
19+
pub(super) trait Job {
2020
/// Unsafe: this may be called from a different thread than the one
2121
/// which scheduled the job, so the implementer must ensure the
2222
/// appropriate traits are met, whether `Send`, `Sync`, or both.
@@ -30,7 +30,7 @@ pub trait Job {
3030
/// true type is something like `*const StackJob<...>`, but we hide
3131
/// it. We also carry the "execute fn" from the `Job` trait.
3232
#[derive(Copy, Clone, Debug, PartialEq, Eq)]
33-
pub struct JobRef {
33+
pub(super) struct JobRef {
3434
pointer: *const (),
3535
execute_fn: unsafe fn(*const ()),
3636
}
@@ -41,7 +41,7 @@ unsafe impl Sync for JobRef {}
4141
impl JobRef {
4242
/// Unsafe: caller asserts that `data` will remain valid until the
4343
/// job is executed.
44-
pub unsafe fn new<T>(data: *const T) -> JobRef
44+
pub(super) unsafe fn new<T>(data: *const T) -> JobRef
4545
where
4646
T: Job,
4747
{
@@ -55,7 +55,7 @@ impl JobRef {
5555
}
5656

5757
#[inline]
58-
pub unsafe fn execute(&self) {
58+
pub(super) unsafe fn execute(&self) {
5959
(self.execute_fn)(self.pointer)
6060
}
6161
}
@@ -64,13 +64,13 @@ impl JobRef {
6464
/// executes it need not free any heap data, the cleanup occurs when
6565
/// the stack frame is later popped. The function parameter indicates
6666
/// `true` if the job was stolen -- executed on a different thread.
67-
pub struct StackJob<L, F, R>
67+
pub(super) struct StackJob<L, F, R>
6868
where
6969
L: Latch + Sync,
7070
F: FnOnce(bool) -> R + Send,
7171
R: Send,
7272
{
73-
pub latch: L,
73+
pub(super) latch: L,
7474
func: UnsafeCell<Option<F>>,
7575
result: UnsafeCell<JobResult<R>>,
7676
}
@@ -81,23 +81,23 @@ where
8181
F: FnOnce(bool) -> R + Send,
8282
R: Send,
8383
{
84-
pub fn new(func: F, latch: L) -> StackJob<L, F, R> {
84+
pub(super) fn new(func: F, latch: L) -> StackJob<L, F, R> {
8585
StackJob {
8686
latch,
8787
func: UnsafeCell::new(Some(func)),
8888
result: UnsafeCell::new(JobResult::None),
8989
}
9090
}
9191

92-
pub unsafe fn as_job_ref(&self) -> JobRef {
92+
pub(super) unsafe fn as_job_ref(&self) -> JobRef {
9393
JobRef::new(self)
9494
}
9595

96-
pub unsafe fn run_inline(self, stolen: bool) -> R {
96+
pub(super) unsafe fn run_inline(self, stolen: bool) -> R {
9797
self.func.into_inner().unwrap()(stolen)
9898
}
9999

100-
pub unsafe fn into_result(self) -> R {
100+
pub(super) unsafe fn into_result(self) -> R {
101101
self.result.into_inner().into_return_value()
102102
}
103103
}
@@ -127,7 +127,7 @@ where
127127
/// signal that the job executed.
128128
///
129129
/// (Probably `StackJob` should be refactored in a similar fashion.)
130-
pub struct HeapJob<BODY>
130+
pub(super) struct HeapJob<BODY>
131131
where
132132
BODY: FnOnce() + Send,
133133
{
@@ -138,7 +138,7 @@ impl<BODY> HeapJob<BODY>
138138
where
139139
BODY: FnOnce() + Send,
140140
{
141-
pub fn new(func: BODY) -> Self {
141+
pub(super) fn new(func: BODY) -> Self {
142142
HeapJob {
143143
job: UnsafeCell::new(Some(func)),
144144
}
@@ -147,7 +147,7 @@ where
147147
/// Creates a `JobRef` from this job -- note that this hides all
148148
/// lifetimes, so it is up to you to ensure that this JobRef
149149
/// doesn't outlive any data that it closes over.
150-
pub unsafe fn as_job_ref(self: Box<Self>) -> JobRef {
150+
pub(super) unsafe fn as_job_ref(self: Box<Self>) -> JobRef {
151151
let this: *const Self = mem::transmute(self);
152152
JobRef::new(this)
153153
}
@@ -169,7 +169,7 @@ impl<T> JobResult<T> {
169169
/// its JobResult is populated) into its return value.
170170
///
171171
/// NB. This will panic if the job panicked.
172-
pub fn into_return_value(self) -> T {
172+
pub(super) fn into_return_value(self) -> T {
173173
match self {
174174
JobResult::None => unreachable!(),
175175
JobResult::Ok(x) => x,
@@ -179,18 +179,18 @@ impl<T> JobResult<T> {
179179
}
180180

181181
/// Indirect queue to provide FIFO job priority.
182-
pub struct JobFifo {
182+
pub(super) struct JobFifo {
183183
inner: SegQueue<JobRef>,
184184
}
185185

186186
impl JobFifo {
187-
pub fn new() -> Self {
187+
pub(super) fn new() -> Self {
188188
JobFifo {
189189
inner: SegQueue::new(),
190190
}
191191
}
192192

193-
pub unsafe fn push(&self, job_ref: JobRef) -> JobRef {
193+
pub(super) unsafe fn push(&self, job_ref: JobRef) -> JobRef {
194194
// A little indirection ensures that spawns are always prioritized in FIFO order. The
195195
// jobs in a thread's deque may be popped from the back (LIFO) or stolen from the front
196196
// (FIFO), but either way they will end up popping from the front of this queue.

rayon-core/src/latch.rs

Lines changed: 12 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -30,26 +30,26 @@ use sleep::Sleep;
3030
/// - Once `set()` occurs, the next `probe()` *will* observe it. This
3131
/// typically requires a seq-cst ordering. See [the "tickle-then-get-sleepy" scenario in the sleep
3232
/// README](/src/sleep/README.md#tickle-then-get-sleepy) for details.
33-
pub trait Latch: LatchProbe {
33+
pub(super) trait Latch: LatchProbe {
3434
/// Set the latch, signalling others.
3535
fn set(&self);
3636
}
3737

38-
pub trait LatchProbe {
38+
pub(super) trait LatchProbe {
3939
/// Test if the latch is set.
4040
fn probe(&self) -> bool;
4141
}
4242

4343
/// Spin latches are the simplest, most efficient kind, but they do
4444
/// not support a `wait()` operation. They just have a boolean flag
4545
/// that becomes true when `set()` is called.
46-
pub struct SpinLatch {
46+
pub(super) struct SpinLatch {
4747
b: AtomicBool,
4848
}
4949

5050
impl SpinLatch {
5151
#[inline]
52-
pub fn new() -> SpinLatch {
52+
pub(super) fn new() -> SpinLatch {
5353
SpinLatch {
5454
b: AtomicBool::new(false),
5555
}
@@ -72,22 +72,22 @@ impl Latch for SpinLatch {
7272

7373
/// A Latch starts as false and eventually becomes true. You can block
7474
/// until it becomes true.
75-
pub struct LockLatch {
75+
pub(super) struct LockLatch {
7676
m: Mutex<bool>,
7777
v: Condvar,
7878
}
7979

8080
impl LockLatch {
8181
#[inline]
82-
pub fn new() -> LockLatch {
82+
pub(super) fn new() -> LockLatch {
8383
LockLatch {
8484
m: Mutex::new(false),
8585
v: Condvar::new(),
8686
}
8787
}
8888

8989
/// Block until latch is set.
90-
pub fn wait(&self) {
90+
pub(super) fn wait(&self) {
9191
let mut guard = self.m.lock().unwrap();
9292
while !*guard {
9393
guard = self.v.wait(guard).unwrap();
@@ -119,20 +119,20 @@ impl Latch for LockLatch {
119119
/// decrements the counter. The latch is only "set" (in the sense that
120120
/// `probe()` returns true) once the counter reaches zero.
121121
#[derive(Debug)]
122-
pub struct CountLatch {
122+
pub(super) struct CountLatch {
123123
counter: AtomicUsize,
124124
}
125125

126126
impl CountLatch {
127127
#[inline]
128-
pub fn new() -> CountLatch {
128+
pub(super) fn new() -> CountLatch {
129129
CountLatch {
130130
counter: AtomicUsize::new(1),
131131
}
132132
}
133133

134134
#[inline]
135-
pub fn increment(&self) {
135+
pub(super) fn increment(&self) {
136136
debug_assert!(!self.probe());
137137
self.counter.fetch_add(1, Ordering::Relaxed);
138138
}
@@ -157,14 +157,14 @@ impl Latch for CountLatch {
157157
/// A tickling latch wraps another latch type, and will also awaken a thread
158158
/// pool when it is set. This is useful for jobs injected between thread pools,
159159
/// so the source pool can continue processing its own work while waiting.
160-
pub struct TickleLatch<'a, L: Latch> {
160+
pub(super) struct TickleLatch<'a, L: Latch> {
161161
inner: L,
162162
sleep: &'a Sleep,
163163
}
164164

165165
impl<'a, L: Latch> TickleLatch<'a, L> {
166166
#[inline]
167-
pub fn new(latch: L, sleep: &'a Sleep) -> Self {
167+
pub(super) fn new(latch: L, sleep: &'a Sleep) -> Self {
168168
TickleLatch {
169169
inner: latch,
170170
sleep,

rayon-core/src/lib.rs

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -22,6 +22,7 @@
2222
#![doc(html_root_url = "https://docs.rs/rayon-core/1.5")]
2323
#![deny(missing_debug_implementations)]
2424
#![deny(missing_docs)]
25+
#![deny(unreachable_pub)]
2526

2627
use std::any::Any;
2728
use std::env;
@@ -180,7 +181,7 @@ impl ThreadPoolBuilder {
180181

181182
/// Create a new `ThreadPool` initialized using this configuration.
182183
pub fn build(self) -> Result<ThreadPool, ThreadPoolBuildError> {
183-
thread_pool::build(self)
184+
ThreadPool::build(self)
184185
}
185186

186187
/// Initializes the global thread pool. This initialization is

rayon-core/src/log.rs

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -11,7 +11,7 @@
1111
use std::env;
1212

1313
#[derive(Debug)]
14-
pub enum Event {
14+
pub(super) enum Event {
1515
Tickle {
1616
worker: usize,
1717
old_state: usize,
@@ -87,10 +87,10 @@ pub enum Event {
8787
},
8888
}
8989

90-
pub const DUMP_LOGS: bool = cfg!(debug_assertions);
90+
pub(super) const DUMP_LOGS: bool = cfg!(debug_assertions);
9191

9292
lazy_static! {
93-
pub static ref LOG_ENV: bool =
93+
pub(super) static ref LOG_ENV: bool =
9494
env::var("RAYON_LOG").is_ok() || env::var("RAYON_RS_LOG").is_ok();
9595
}
9696

0 commit comments

Comments
 (0)