Skip to content

Commit fb803a8

Browse files
committed
Require types to opt-in Sync
1 parent c43efee commit fb803a8

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

48 files changed

+742
-452
lines changed

src/liballoc/arc.rs

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -129,6 +129,10 @@ pub struct Weak<T> {
129129
_ptr: *mut ArcInner<T>,
130130
}
131131

132+
impl<T: Sync + Send> Send for Arc<T> { }
133+
134+
impl<T: Sync + Send> Sync for Arc<T> { }
135+
132136
struct ArcInner<T> {
133137
strong: atomic::AtomicUint,
134138
weak: atomic::AtomicUint,

src/liballoc/boxed.rs

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -19,6 +19,7 @@ use core::hash::{mod, Hash};
1919
use core::kinds::Sized;
2020
use core::mem;
2121
use core::option::Option;
22+
use core::ptr::OwnedPtr;
2223
use core::raw::TraitObject;
2324
use core::result::Result;
2425
use core::result::Result::{Ok, Err};
@@ -44,7 +45,7 @@ pub static HEAP: () = ();
4445
/// A type that represents a uniquely-owned value.
4546
#[lang = "owned_box"]
4647
#[unstable = "custom allocators will add an additional type parameter (with default)"]
47-
pub struct Box<T>(*mut T);
48+
pub struct Box<T>(OwnedPtr<T>);
4849

4950
#[stable]
5051
impl<T: Default> Default for Box<T> {

src/libcollections/vec.rs

Lines changed: 28 additions & 26 deletions
Original file line numberDiff line numberDiff line change
@@ -58,7 +58,7 @@ use core::kinds::marker::{ContravariantLifetime, InvariantType};
5858
use core::mem;
5959
use core::num::{Int, UnsignedInt};
6060
use core::ops;
61-
use core::ptr;
61+
use core::ptr::{mod, OwnedPtr};
6262
use core::raw::Slice as RawSlice;
6363
use core::uint;
6464

@@ -133,7 +133,7 @@ use slice::CloneSliceExt;
133133
#[unsafe_no_drop_flag]
134134
#[stable]
135135
pub struct Vec<T> {
136-
ptr: *mut T,
136+
ptr: OwnedPtr<T>,
137137
len: uint,
138138
cap: uint,
139139
}
@@ -176,7 +176,7 @@ impl<T> Vec<T> {
176176
// non-null value which is fine since we never call deallocate on the ptr
177177
// if cap is 0. The reason for this is because the pointer of a slice
178178
// being NULL would break the null pointer optimization for enums.
179-
Vec { ptr: EMPTY as *mut T, len: 0, cap: 0 }
179+
Vec { ptr: OwnedPtr(EMPTY as *mut T), len: 0, cap: 0 }
180180
}
181181

182182
/// Constructs a new, empty `Vec<T>` with the specified capacity.
@@ -209,15 +209,15 @@ impl<T> Vec<T> {
209209
#[stable]
210210
pub fn with_capacity(capacity: uint) -> Vec<T> {
211211
if mem::size_of::<T>() == 0 {
212-
Vec { ptr: EMPTY as *mut T, len: 0, cap: uint::MAX }
212+
Vec { ptr: OwnedPtr(EMPTY as *mut T), len: 0, cap: uint::MAX }
213213
} else if capacity == 0 {
214214
Vec::new()
215215
} else {
216216
let size = capacity.checked_mul(mem::size_of::<T>())
217217
.expect("capacity overflow");
218218
let ptr = unsafe { allocate(size, mem::min_align_of::<T>()) };
219219
if ptr.is_null() { ::alloc::oom() }
220-
Vec { ptr: ptr as *mut T, len: 0, cap: capacity }
220+
Vec { ptr: OwnedPtr(ptr as *mut T), len: 0, cap: capacity }
221221
}
222222
}
223223

@@ -284,7 +284,7 @@ impl<T> Vec<T> {
284284
#[unstable = "needs finalization"]
285285
pub unsafe fn from_raw_parts(ptr: *mut T, length: uint,
286286
capacity: uint) -> Vec<T> {
287-
Vec { ptr: ptr, len: length, cap: capacity }
287+
Vec { ptr: OwnedPtr(ptr), len: length, cap: capacity }
288288
}
289289

290290
/// Creates a vector by copying the elements from a raw pointer.
@@ -795,19 +795,19 @@ impl<T> Vec<T> {
795795
if self.len == 0 {
796796
if self.cap != 0 {
797797
unsafe {
798-
dealloc(self.ptr, self.cap)
798+
dealloc(self.ptr.0, self.cap)
799799
}
800800
self.cap = 0;
801801
}
802802
} else {
803803
unsafe {
804804
// Overflow check is unnecessary as the vector is already at
805805
// least this large.
806-
self.ptr = reallocate(self.ptr as *mut u8,
807-
self.cap * mem::size_of::<T>(),
808-
self.len * mem::size_of::<T>(),
809-
mem::min_align_of::<T>()) as *mut T;
810-
if self.ptr.is_null() { ::alloc::oom() }
806+
self.ptr = OwnedPtr(reallocate(self.ptr.0 as *mut u8,
807+
self.cap * mem::size_of::<T>(),
808+
self.len * mem::size_of::<T>(),
809+
mem::min_align_of::<T>()) as *mut T);
810+
if self.ptr.0.is_null() { ::alloc::oom() }
811811
}
812812
self.cap = self.len;
813813
}
@@ -867,7 +867,7 @@ impl<T> Vec<T> {
867867
pub fn as_mut_slice<'a>(&'a mut self) -> &'a mut [T] {
868868
unsafe {
869869
mem::transmute(RawSlice {
870-
data: self.ptr as *const T,
870+
data: self.ptr.0 as *const T,
871871
len: self.len,
872872
})
873873
}
@@ -890,9 +890,9 @@ impl<T> Vec<T> {
890890
#[unstable = "matches collection reform specification, waiting for dust to settle"]
891891
pub fn into_iter(self) -> IntoIter<T> {
892892
unsafe {
893-
let ptr = self.ptr;
893+
let ptr = self.ptr.0;
894894
let cap = self.cap;
895-
let begin = self.ptr as *const T;
895+
let begin = self.ptr.0 as *const T;
896896
let end = if mem::size_of::<T>() == 0 {
897897
(ptr as uint + self.len()) as *const T
898898
} else {
@@ -1110,14 +1110,14 @@ impl<T> Vec<T> {
11101110
let size = max(old_size, 2 * mem::size_of::<T>()) * 2;
11111111
if old_size > size { panic!("capacity overflow") }
11121112
unsafe {
1113-
self.ptr = alloc_or_realloc(self.ptr, old_size, size);
1114-
if self.ptr.is_null() { ::alloc::oom() }
1113+
self.ptr = OwnedPtr(alloc_or_realloc(self.ptr.0, old_size, size));
1114+
if self.ptr.0.is_null() { ::alloc::oom() }
11151115
}
11161116
self.cap = max(self.cap, 2) * 2;
11171117
}
11181118

11191119
unsafe {
1120-
let end = (self.ptr as *const T).offset(self.len as int) as *mut T;
1120+
let end = self.ptr.0.offset(self.len as int);
11211121
ptr::write(&mut *end, value);
11221122
self.len += 1;
11231123
}
@@ -1162,11 +1162,11 @@ impl<T> Vec<T> {
11621162
#[unstable = "matches collection reform specification, waiting for dust to settle"]
11631163
pub fn drain<'a>(&'a mut self) -> Drain<'a, T> {
11641164
unsafe {
1165-
let begin = self.ptr as *const T;
1165+
let begin = self.ptr.0 as *const T;
11661166
let end = if mem::size_of::<T>() == 0 {
1167-
(self.ptr as uint + self.len()) as *const T
1167+
(self.ptr.0 as uint + self.len()) as *const T
11681168
} else {
1169-
self.ptr.offset(self.len() as int) as *const T
1169+
self.ptr.0.offset(self.len() as int) as *const T
11701170
};
11711171
self.set_len(0);
11721172
Drain {
@@ -1231,8 +1231,10 @@ impl<T> Vec<T> {
12311231
let size = capacity.checked_mul(mem::size_of::<T>())
12321232
.expect("capacity overflow");
12331233
unsafe {
1234-
self.ptr = alloc_or_realloc(self.ptr, self.cap * mem::size_of::<T>(), size);
1235-
if self.ptr.is_null() { ::alloc::oom() }
1234+
self.ptr = OwnedPtr(alloc_or_realloc(self.ptr.0,
1235+
self.cap * mem::size_of::<T>(),
1236+
size));
1237+
if self.ptr.0.is_null() { ::alloc::oom() }
12361238
}
12371239
self.cap = capacity;
12381240
}
@@ -1355,7 +1357,7 @@ impl<T> AsSlice<T> for Vec<T> {
13551357
fn as_slice<'a>(&'a self) -> &'a [T] {
13561358
unsafe {
13571359
mem::transmute(RawSlice {
1358-
data: self.ptr as *const T,
1360+
data: self.ptr.0 as *const T,
13591361
len: self.len
13601362
})
13611363
}
@@ -1380,7 +1382,7 @@ impl<T> Drop for Vec<T> {
13801382
for x in self.iter() {
13811383
ptr::read(x);
13821384
}
1383-
dealloc(self.ptr, self.cap)
1385+
dealloc(self.ptr.0, self.cap)
13841386
}
13851387
}
13861388
}
@@ -1418,7 +1420,7 @@ impl<T> IntoIter<T> {
14181420
for _x in self { }
14191421
let IntoIter { allocation, cap, ptr: _ptr, end: _end } = self;
14201422
mem::forget(self);
1421-
Vec { ptr: allocation, cap: cap, len: 0 }
1423+
Vec { ptr: OwnedPtr(allocation), cap: cap, len: 0 }
14221424
}
14231425
}
14241426

src/libcore/atomic.rs

Lines changed: 12 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -15,30 +15,30 @@
1515
pub use self::Ordering::*;
1616

1717
use intrinsics;
18-
use cell::UnsafeCell;
18+
use cell::{UnsafeCell, RacyCell};
1919

2020
/// A boolean type which can be safely shared between threads.
2121
#[stable]
2222
pub struct AtomicBool {
23-
v: UnsafeCell<uint>,
23+
v: RacyCell<uint>,
2424
}
2525

2626
/// A signed integer type which can be safely shared between threads.
2727
#[stable]
2828
pub struct AtomicInt {
29-
v: UnsafeCell<int>,
29+
v: RacyCell<int>,
3030
}
3131

3232
/// An unsigned integer type which can be safely shared between threads.
3333
#[stable]
3434
pub struct AtomicUint {
35-
v: UnsafeCell<uint>,
35+
v: RacyCell<uint>,
3636
}
3737

3838
/// A raw pointer type which can be safely shared between threads.
3939
#[stable]
4040
pub struct AtomicPtr<T> {
41-
p: UnsafeCell<uint>,
41+
p: RacyCell<uint>,
4242
}
4343

4444
/// Atomic memory orderings
@@ -80,15 +80,15 @@ pub enum Ordering {
8080
/// An `AtomicBool` initialized to `false`.
8181
#[unstable = "may be renamed, pending conventions for static initalizers"]
8282
pub const INIT_ATOMIC_BOOL: AtomicBool =
83-
AtomicBool { v: UnsafeCell { value: 0 } };
83+
AtomicBool { v: RacyCell(UnsafeCell { value: 0 }) };
8484
/// An `AtomicInt` initialized to `0`.
8585
#[unstable = "may be renamed, pending conventions for static initalizers"]
8686
pub const INIT_ATOMIC_INT: AtomicInt =
87-
AtomicInt { v: UnsafeCell { value: 0 } };
87+
AtomicInt { v: RacyCell(UnsafeCell { value: 0 }) };
8888
/// An `AtomicUint` initialized to `0`.
8989
#[unstable = "may be renamed, pending conventions for static initalizers"]
9090
pub const INIT_ATOMIC_UINT: AtomicUint =
91-
AtomicUint { v: UnsafeCell { value: 0, } };
91+
AtomicUint { v: RacyCell(UnsafeCell { value: 0 }) };
9292

9393
// NB: Needs to be -1 (0b11111111...) to make fetch_nand work correctly
9494
const UINT_TRUE: uint = -1;
@@ -108,7 +108,7 @@ impl AtomicBool {
108108
#[stable]
109109
pub fn new(v: bool) -> AtomicBool {
110110
let val = if v { UINT_TRUE } else { 0 };
111-
AtomicBool { v: UnsafeCell::new(val) }
111+
AtomicBool { v: RacyCell::new(val) }
112112
}
113113

114114
/// Loads a value from the bool.
@@ -348,7 +348,7 @@ impl AtomicInt {
348348
#[inline]
349349
#[stable]
350350
pub fn new(v: int) -> AtomicInt {
351-
AtomicInt {v: UnsafeCell::new(v)}
351+
AtomicInt {v: RacyCell::new(v)}
352352
}
353353

354354
/// Loads a value from the int.
@@ -534,7 +534,7 @@ impl AtomicUint {
534534
#[inline]
535535
#[stable]
536536
pub fn new(v: uint) -> AtomicUint {
537-
AtomicUint { v: UnsafeCell::new(v) }
537+
AtomicUint { v: RacyCell::new(v) }
538538
}
539539

540540
/// Loads a value from the uint.
@@ -721,7 +721,7 @@ impl<T> AtomicPtr<T> {
721721
#[inline]
722722
#[stable]
723723
pub fn new(p: *mut T) -> AtomicPtr<T> {
724-
AtomicPtr { p: UnsafeCell::new(p as uint) }
724+
AtomicPtr { p: RacyCell::new(p as uint) }
725725
}
726726

727727
/// Loads a value from the pointer.

src/libcore/cell.rs

Lines changed: 26 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -158,7 +158,7 @@
158158
use clone::Clone;
159159
use cmp::PartialEq;
160160
use default::Default;
161-
use kinds::{marker, Copy};
161+
use kinds::{marker, Copy, Send, Sync};
162162
use ops::{Deref, DerefMut, Drop};
163163
use option::Option;
164164
use option::Option::{None, Some};
@@ -555,3 +555,28 @@ impl<T> UnsafeCell<T> {
555555
#[deprecated = "renamed to into_inner()"]
556556
pub unsafe fn unwrap(self) -> T { self.into_inner() }
557557
}
558+
559+
/// A version of `UnsafeCell` intended for use in concurrent data
560+
/// structures (for example, you might put it in an `Arc`).
561+
pub struct RacyCell<T>(pub UnsafeCell<T>);
562+
563+
impl<T> RacyCell<T> {
564+
/// DOX
565+
pub fn new(value: T) -> RacyCell<T> {
566+
RacyCell(UnsafeCell { value: value })
567+
}
568+
569+
/// DOX
570+
pub unsafe fn get(&self) -> *mut T {
571+
self.0.get()
572+
}
573+
574+
/// DOX
575+
pub unsafe fn into_inner(self) -> T {
576+
self.0.into_inner()
577+
}
578+
}
579+
580+
impl<T:Send> Send for RacyCell<T> { }
581+
582+
impl<T> Sync for RacyCell<T> { } // Oh dear

src/libcore/ptr.rs

Lines changed: 33 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -92,6 +92,7 @@ use clone::Clone;
9292
use intrinsics;
9393
use option::Option;
9494
use option::Option::{Some, None};
95+
use kinds::{Send, Sync};
9596

9697
use cmp::{PartialEq, Eq, Ord, PartialOrd, Equiv};
9798
use cmp::Ordering;
@@ -501,3 +502,35 @@ impl<T> PartialOrd for *mut T {
501502
#[inline]
502503
fn ge(&self, other: &*mut T) -> bool { *self >= *other }
503504
}
505+
506+
/// A wrapper around a raw `*mut T` that indicates that the possessor
507+
/// of this wrapper owns the referent. This in turn implies that the
508+
/// `OwnedPtr<T>` is `Send`/`Sync` if `T` is `Send`/`Sync`, unlike a
509+
/// raw `*mut T` (which conveys no particular ownership semantics).
510+
/// Useful for building abstractions like `Vec<T>` or `Box<T>`, which
511+
/// internally use raw pointers to manage the memory that they own.
512+
pub struct OwnedPtr<T>(pub *mut T);
513+
514+
/// `OwnedPtr` pointers are `Send` if `T` is `Send` because the data they
515+
/// reference is unaliased. Note that this aliasing invariant is
516+
/// unenforced by the type system; the abstraction using the
517+
/// `OwnedPtr` must enforce it.
518+
impl<T:Send> Send for OwnedPtr<T> { }
519+
520+
/// `OwnedPtr` pointers are `Sync` if `T` is `Sync` because the data they
521+
/// reference is unaliased. Note that this aliasing invariant is
522+
/// unenforced by the type system; the abstraction using the
523+
/// `OwnedPtr` must enforce it.
524+
impl<T:Sync> Sync for OwnedPtr<T> { }
525+
526+
impl<T> OwnedPtr<T> {
527+
/// Returns a null OwnedPtr.
528+
pub fn null() -> OwnedPtr<T> {
529+
OwnedPtr(RawPtr::null())
530+
}
531+
532+
/// Return an (unsafe) pointer into the memory owned by `self`.
533+
pub unsafe fn offset(self, offset: int) -> *mut T {
534+
(self.0 as *const T).offset(offset) as *mut T
535+
}
536+
}

0 commit comments

Comments
 (0)