Skip to content

Commit ff60152

Browse files
committed
Prevent globals from being optimized without relying on llvm.used
1 parent 5bf02df commit ff60152

File tree

3 files changed

+93
-119
lines changed

3 files changed

+93
-119
lines changed

compiler/rustc_codegen_llvm/src/builder/gpu_offload.rs

Lines changed: 28 additions & 53 deletions
Original file line numberDiff line numberDiff line change
@@ -170,7 +170,7 @@ impl KernelArgsTy {
170170
fn new<'ll>(
171171
cx: &'ll SimpleCx<'_>,
172172
num_args: u64,
173-
memtransfer_types: &[&'ll Value],
173+
memtransfer_types: &'ll Value,
174174
geps: [&'ll Value; 3],
175175
) -> [(Align, &'ll Value); 13] {
176176
let four = Align::from_bytes(4).expect("4 Byte alignment should work");
@@ -184,7 +184,7 @@ impl KernelArgsTy {
184184
(eight, geps[0]),
185185
(eight, geps[1]),
186186
(eight, geps[2]),
187-
(eight, memtransfer_types[0]),
187+
(eight, memtransfer_types),
188188
// The next two are debug infos. FIXME(offload): set them
189189
(eight, cx.const_null(cx.type_ptr())), // dbg
190190
(eight, cx.const_null(cx.type_ptr())), // dbg
@@ -265,7 +265,7 @@ pub(crate) fn gen_define_handling<'ll>(
265265
metadata: &[OffloadMetadata],
266266
types: &[&Type],
267267
symbol: &str,
268-
) -> (&'ll llvm::Value, &'ll llvm::Value) {
268+
) -> (&'ll llvm::Value, &'ll llvm::Value, &'ll llvm::Value, &'ll llvm::Value) {
269269
// It seems like non-pointer values are automatically mapped. So here, we focus on pointer (or
270270
// reference) types.
271271
let ptr_meta = types.iter().zip(metadata).filter_map(|(&x, meta)| match cx.type_kind(x) {
@@ -313,16 +313,15 @@ pub(crate) fn gen_define_handling<'ll>(
313313

314314
let initializer = crate::common::named_struct(offload_entry_ty, &elems);
315315
let c_name = CString::new(name).unwrap();
316-
let llglobal = llvm::add_global(cx.llmod, offload_entry_ty, &c_name);
317-
llvm::set_global_constant(llglobal, true);
318-
llvm::set_linkage(llglobal, WeakAnyLinkage);
319-
llvm::set_initializer(llglobal, initializer);
320-
llvm::set_alignment(llglobal, Align::EIGHT);
316+
let offload_entry = llvm::add_global(cx.llmod, offload_entry_ty, &c_name);
317+
llvm::set_global_constant(offload_entry, true);
318+
llvm::set_linkage(offload_entry, WeakAnyLinkage);
319+
llvm::set_initializer(offload_entry, initializer);
320+
llvm::set_alignment(offload_entry, Align::EIGHT);
321321
let c_section_name = CString::new("llvm_offload_entries").unwrap();
322-
llvm::set_section(llglobal, &c_section_name);
322+
llvm::set_section(offload_entry, &c_section_name);
323323

324-
add_to_llvm_used(cx, &[offload_sizes, memtransfer_types, region_id, llglobal]);
325-
(memtransfer_types, region_id)
324+
(offload_sizes, memtransfer_types, region_id, offload_entry)
326325
}
327326

328327
fn declare_offload_fn<'ll>(
@@ -363,8 +362,10 @@ fn declare_offload_fn<'ll>(
363362
pub(crate) fn gen_call_handling<'ll>(
364363
cx: &SimpleCx<'ll>,
365364
bb: &BasicBlock,
366-
memtransfer_types: &[&'ll llvm::Value],
367-
region_ids: &[&'ll llvm::Value],
365+
offload_sizes: &'ll llvm::Value,
366+
offload_entry: &'ll llvm::Value,
367+
memtransfer_types: &'ll llvm::Value,
368+
region_id: &'ll llvm::Value,
368369
args: &[&'ll Value],
369370
types: &[&Type],
370371
metadata: &[OffloadMetadata],
@@ -382,6 +383,18 @@ pub(crate) fn gen_call_handling<'ll>(
382383

383384
let mut builder = SBuilder::build(cx, bb);
384385

386+
for val in [offload_sizes, offload_entry] {
387+
unsafe {
388+
let dummy = llvm::LLVMBuildLoad2(
389+
&builder.llbuilder,
390+
llvm::LLVMTypeOf(val),
391+
val,
392+
b"dummy\0".as_ptr() as *const _,
393+
);
394+
llvm::LLVMSetVolatile(dummy, llvm::TRUE);
395+
}
396+
}
397+
385398
let num_args = types.len() as u64;
386399

387400
// Step 0)
@@ -479,7 +492,7 @@ pub(crate) fn gen_call_handling<'ll>(
479492

480493
// Step 2)
481494
let s_ident_t = generate_at_one(&cx);
482-
let o = memtransfer_types[0];
495+
let o = memtransfer_types;
483496
let geps = get_geps(&mut builder, &cx, ty, ty2, a1, a2, a4);
484497
generate_mapper_call(&mut builder, &cx, geps, o, begin_mapper_decl, fn_ty, num_args, s_ident_t);
485498
let values = KernelArgsTy::new(&cx, num_args, memtransfer_types, geps);
@@ -498,7 +511,7 @@ pub(crate) fn gen_call_handling<'ll>(
498511
// FIXME(offload): Don't hardcode the numbers of threads in the future.
499512
cx.get_const_i32(2097152),
500513
cx.get_const_i32(256),
501-
region_ids[0],
514+
region_id,
502515
a5,
503516
];
504517
builder.call(tgt_target_kernel_ty, tgt_decl, &args, None);
@@ -512,41 +525,3 @@ pub(crate) fn gen_call_handling<'ll>(
512525

513526
drop(builder);
514527
}
515-
516-
// TODO(Sa4dUs): check if there's a better way of doing this, also move to a proper location
517-
fn add_to_llvm_used<'ll>(cx: &'ll SimpleCx<'_>, globals: &[&'ll Value]) {
518-
let ptr_ty = cx.type_ptr();
519-
let arr_ty = cx.type_array(ptr_ty, globals.len() as u64);
520-
let arr_val = cx.const_array(ptr_ty, globals);
521-
522-
let name = CString::new("llvm.used").unwrap();
523-
524-
let used_global_opt = unsafe { llvm::LLVMGetNamedGlobal(cx.llmod, name.as_ptr()) };
525-
526-
if used_global_opt.is_none() {
527-
let new_global = unsafe { llvm::LLVMAddGlobal(cx.llmod, arr_ty, name.as_ptr()) };
528-
unsafe { llvm::LLVMSetLinkage(new_global, llvm::Linkage::AppendingLinkage) };
529-
unsafe {
530-
llvm::LLVMSetSection(new_global, CString::new("llvm.metadata").unwrap().as_ptr())
531-
};
532-
unsafe { llvm::LLVMSetInitializer(new_global, arr_val) };
533-
llvm::LLVMSetGlobalConstant(new_global, llvm::TRUE);
534-
return;
535-
}
536-
537-
let used_global = used_global_opt.expect("expected @llvm.used");
538-
let mut combined: Vec<&'ll Value> = Vec::new();
539-
540-
if let Some(existing_init) = llvm::LLVMGetInitializer(used_global) {
541-
let num_elems = unsafe { llvm::LLVMGetNumOperands(existing_init) };
542-
for i in 0..num_elems {
543-
if let Some(elem) = unsafe { llvm::LLVMGetOperand(existing_init, i) } {
544-
combined.push(elem);
545-
}
546-
}
547-
}
548-
549-
combined.extend_from_slice(globals);
550-
let new_arr = cx.const_array(ptr_ty, &combined);
551-
unsafe { llvm::LLVMSetInitializer(used_global, new_arr) };
552-
}

compiler/rustc_codegen_llvm/src/intrinsic.rs

Lines changed: 12 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -1275,21 +1275,24 @@ fn codegen_offload<'ll, 'tcx>(
12751275
let types = inputs.iter().map(|ty| cx.layout_of(*ty).llvm_type(cx)).collect::<Vec<_>>();
12761276

12771277
// TODO(Sa4dUs): separate globals from call-independent headers and use typetrees to reserve the correct amount of memory
1278-
let (memtransfer_type, region_id) = crate::builder::gpu_offload::gen_define_handling(
1279-
cx,
1280-
offload_entry_ty,
1281-
&metadata,
1282-
&types,
1283-
&target_symbol,
1284-
);
1278+
let (offload_sizes, memtransfer_types, region_id, offload_entry) =
1279+
crate::builder::gpu_offload::gen_define_handling(
1280+
cx,
1281+
offload_entry_ty,
1282+
&metadata,
1283+
&types,
1284+
&target_symbol,
1285+
);
12851286

12861287
// TODO(Sa4dUs): this is just to a void lifetime's issues
12871288
let bb = unsafe { llvm::LLVMGetInsertBlock(bx.llbuilder) };
12881289
crate::builder::gpu_offload::gen_call_handling(
12891290
cx,
12901291
bb,
1291-
&[memtransfer_type],
1292-
&[region_id],
1292+
offload_sizes,
1293+
offload_entry,
1294+
memtransfer_types,
1295+
region_id,
12931296
&args,
12941297
&types,
12951298
&metadata,

tests/codegen-llvm/gpu_offload/gpu_host.rs

Lines changed: 53 additions & 57 deletions
Original file line numberDiff line numberDiff line change
@@ -17,7 +17,7 @@
1717
#[unsafe(no_mangle)]
1818
fn main() {
1919
let mut x = [3.0; 256];
20-
kernel(&mut x);
20+
kernel_1(&mut x);
2121
core::hint::black_box(&x);
2222
}
2323

@@ -26,11 +26,11 @@ fn main() {
2626
// CHECK: %struct.__tgt_bin_desc = type { i32, ptr, ptr, ptr }
2727
// CHECK: %struct.__tgt_kernel_arguments = type { i32, i32, ptr, ptr, ptr, ptr, ptr, ptr, i64, i64, [3 x i32], [3 x i32], i32 }
2828

29-
// CHECK: @.offload_sizes._kernel = private unnamed_addr constant [1 x i64] [i64 1024]
30-
// CHECK: @.offload_maptypes._kernel = private unnamed_addr constant [1 x i64] [i64 35]
31-
// CHECK: @._kernel.region_id = weak unnamed_addr constant i8 0
32-
// CHECK: @.offloading.entry_name._kernel = internal unnamed_addr constant [8 x i8] c"_kernel\00", section ".llvm.rodata.offloading", align 1
33-
// CHECK: @.offloading.entry._kernel = weak constant %struct.__tgt_offload_entry { i64 0, i16 1, i16 1, i32 0, ptr @._kernel.region_id, ptr @.offloading.entry_name._kernel, i64 0, i64 0, ptr null }, section "llvm_offload_entries", align 8
29+
// CHECK: @.offload_sizes._kernel_1 = private unnamed_addr constant [1 x i64] [i64 1024]
30+
// CHECK: @.offload_maptypes._kernel_1 = private unnamed_addr constant [1 x i64] [i64 35]
31+
// CHECK: @._kernel_1.region_id = weak unnamed_addr constant i8 0
32+
// CHECK: @.offloading.entry_name._kernel_1 = internal unnamed_addr constant [10 x i8] c"_kernel_1\00", section ".llvm.rodata.offloading", align 1
33+
// CHECK: @.offloading.entry._kernel_1 = weak constant %struct.__tgt_offload_entry { i64 0, i16 1, i16 1, i32 0, ptr @._kernel_1.region_id, ptr @.offloading.entry_name._kernel_1, i64 0, i64 0, ptr null }, section "llvm_offload_entries", align 8
3434

3535
// CHECK: @anon.{{.*}}.0 = private unnamed_addr constant [23 x i8] c";unknown;unknown;0;0;;\00", align 1
3636
// CHECK: @anon.{{.*}}.1 = private unnamed_addr constant %struct.ident_t { i32 0, i32 2, i32 0, i32 22, ptr @anon.{{.*}}.0 }, align 8
@@ -40,74 +40,70 @@ fn main() {
4040
// CHECK-NEXT: start:
4141
// CHECK-NEXT: %0 = alloca [8 x i8], align 8
4242
// CHECK-NEXT: %x = alloca [1024 x i8], align 16
43+
// CHECK: call void @kernel_1(ptr noalias noundef nonnull align 4 dereferenceable(1024) %x)
44+
// CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 8, ptr nonnull %0)
45+
// CHECK-NEXT: store ptr %x, ptr %0, align 8
46+
// CHECK-NEXT: call void asm sideeffect "", "r,~{memory}"(ptr nonnull %0) #4, !srcloc !4
47+
// CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 8, ptr nonnull %0)
48+
// CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 1024, ptr nonnull %x)
49+
// CHECK-NEXT: ret void
50+
// CHECK-NEXT: }
51+
52+
// CHECK: define{{( dso_local)?}} void @kernel_1(ptr noalias noundef align 4 dereferenceable(1024) %x)
53+
// CHECK-NEXT: start:
54+
// CHECK-NEXT: %dummy = load volatile ptr, ptr @.offload_sizes._kernel_1, align 8
55+
// CHECK-NEXT: %dummy1 = load volatile ptr, ptr @.offloading.entry._kernel_1, align 8
4356
// CHECK-NEXT: %EmptyDesc = alloca %struct.__tgt_bin_desc, align 8
4457
// CHECK-NEXT: %.offload_baseptrs = alloca [1 x ptr], align 8
4558
// CHECK-NEXT: %.offload_ptrs = alloca [1 x ptr], align 8
4659
// CHECK-NEXT: %.offload_sizes = alloca [1 x i64], align 8
4760
// CHECK-NEXT: %kernel_args = alloca %struct.__tgt_kernel_arguments, align 8
48-
// CHECK: call void @llvm.memset.p0.i64(ptr align 8 %EmptyDesc, i8 0, i64 32, i1 false)
49-
// CHECK-NEXT: %1 = getelementptr inbounds float, ptr %x, i32 0
50-
// CHECK-NEXT: call void @__tgt_register_lib(ptr %EmptyDesc)
61+
// CHECK-NEXT: call void @llvm.memset.p0.i64(ptr noundef nonnull align 8 dereferenceable(32) %EmptyDesc, i8 0, i64 32, i1 false)
62+
// CHECK-NEXT: call void @__tgt_register_lib(ptr nonnull %EmptyDesc)
5163
// CHECK-NEXT: call void @__tgt_init_all_rtls()
52-
// CHECK-NEXT: %2 = getelementptr inbounds [1 x ptr], ptr %.offload_baseptrs, i32 0, i32 0
53-
// CHECK-NEXT: store ptr %x, ptr %2, align 8
54-
// CHECK-NEXT: %3 = getelementptr inbounds [1 x ptr], ptr %.offload_ptrs, i32 0, i32 0
55-
// CHECK-NEXT: store ptr %1, ptr %3, align 8
56-
// CHECK-NEXT: %4 = getelementptr inbounds [1 x i64], ptr %.offload_sizes, i32 0, i32 0
57-
// CHECK-NEXT: store i64 1024, ptr %4, align 8
58-
// CHECK-NEXT: %5 = getelementptr inbounds [1 x ptr], ptr %.offload_baseptrs, i32 0, i32 0
59-
// CHECK-NEXT: %6 = getelementptr inbounds [1 x ptr], ptr %.offload_ptrs, i32 0, i32 0
60-
// CHECK-NEXT: %7 = getelementptr inbounds [1 x i64], ptr %.offload_sizes, i32 0, i32 0
61-
// CHECK-NEXT: call void @__tgt_target_data_begin_mapper(ptr @1, i64 -1, i32 1, ptr %5, ptr %6, ptr %7, ptr @.offload_maptypes.1, ptr null, ptr null)
62-
// CHECK-NEXT: %8 = getelementptr inbounds %struct.__tgt_kernel_arguments, ptr %kernel_args, i32 0, i32 0
63-
// CHECK-NEXT: store i32 3, ptr %8, align 4
64-
// CHECK-NEXT: %9 = getelementptr inbounds %struct.__tgt_kernel_arguments, ptr %kernel_args, i32 0, i32 1
65-
// CHECK-NEXT: store i32 1, ptr %9, align 4
66-
// CHECK-NEXT: %10 = getelementptr inbounds %struct.__tgt_kernel_arguments, ptr %kernel_args, i32 0, i32 2
67-
// CHECK-NEXT: store ptr %5, ptr %10, align 8
68-
// CHECK-NEXT: %11 = getelementptr inbounds %struct.__tgt_kernel_arguments, ptr %kernel_args, i32 0, i32 3
69-
// CHECK-NEXT: store ptr %6, ptr %11, align 8
70-
// CHECK-NEXT: %12 = getelementptr inbounds %struct.__tgt_kernel_arguments, ptr %kernel_args, i32 0, i32 4
71-
// CHECK-NEXT: store ptr %7, ptr %12, align 8
72-
// CHECK-NEXT: %13 = getelementptr inbounds %struct.__tgt_kernel_arguments, ptr %kernel_args, i32 0, i32 5
73-
// CHECK-NEXT: store ptr @.offload_maptypes.1, ptr %13, align 8
74-
// CHECK-NEXT: %14 = getelementptr inbounds %struct.__tgt_kernel_arguments, ptr %kernel_args, i32 0, i32 6
75-
// CHECK-NEXT: store ptr null, ptr %14, align 8
76-
// CHECK-NEXT: %15 = getelementptr inbounds %struct.__tgt_kernel_arguments, ptr %kernel_args, i32 0, i32 7
77-
// CHECK-NEXT: store ptr null, ptr %15, align 8
78-
// CHECK-NEXT: %16 = getelementptr inbounds %struct.__tgt_kernel_arguments, ptr %kernel_args, i32 0, i32 8
79-
// CHECK-NEXT: store i64 0, ptr %16, align 8
80-
// CHECK-NEXT: %17 = getelementptr inbounds %struct.__tgt_kernel_arguments, ptr %kernel_args, i32 0, i32 9
81-
// CHECK-NEXT: store i64 0, ptr %17, align 8
82-
// CHECK-NEXT: %18 = getelementptr inbounds %struct.__tgt_kernel_arguments, ptr %kernel_args, i32 0, i32 10
83-
// CHECK-NEXT: store [3 x i32] [i32 2097152, i32 0, i32 0], ptr %18, align 4
84-
// CHECK-NEXT: %19 = getelementptr inbounds %struct.__tgt_kernel_arguments, ptr %kernel_args, i32 0, i32 11
85-
// CHECK-NEXT: store [3 x i32] [i32 256, i32 0, i32 0], ptr %19, align 4
86-
// CHECK-NEXT: %20 = getelementptr inbounds %struct.__tgt_kernel_arguments, ptr %kernel_args, i32 0, i32 12
87-
// CHECK-NEXT: store i32 0, ptr %20, align 4
88-
// CHECK-NEXT: %21 = call i32 @__tgt_target_kernel(ptr @1, i64 -1, i32 2097152, i32 256, ptr @.kernel_1.region_id, ptr %kernel_args)
89-
// CHECK-NEXT: %22 = getelementptr inbounds [1 x ptr], ptr %.offload_baseptrs, i32 0, i32 0
90-
// CHECK-NEXT: %23 = getelementptr inbounds [1 x ptr], ptr %.offload_ptrs, i32 0, i32 0
91-
// CHECK-NEXT: %24 = getelementptr inbounds [1 x i64], ptr %.offload_sizes, i32 0, i32 0
92-
// CHECK-NEXT: call void @__tgt_target_data_end_mapper(ptr @1, i64 -1, i32 1, ptr %22, ptr %23, ptr %24, ptr @.offload_maptypes.1, ptr null, ptr null)
93-
// CHECK-NEXT: call void @__tgt_unregister_lib(ptr %EmptyDesc)
94-
// CHECK: store ptr %x, ptr %0, align 8
95-
// CHECK-NEXT: call void asm sideeffect "", "r,~{memory}"(ptr nonnull %0)
96-
// CHECK: ret void
64+
// CHECK-NEXT: store ptr %x, ptr %.offload_baseptrs, align 8
65+
// CHECK-NEXT: store ptr %x, ptr %.offload_ptrs, align 8
66+
// CHECK-NEXT: store i64 1024, ptr %.offload_sizes, align 8
67+
// CHECK-NEXT: call void @__tgt_target_data_begin_mapper(ptr nonnull @anon.{{.*}}.1, i64 -1, i32 1, ptr nonnull %.offload_baseptrs, ptr nonnull %.offload_ptrs, ptr nonnull %.offload_sizes, ptr nonnull @.offload_maptypes._kernel_1, ptr null, ptr null)
68+
// CHECK-NEXT: store i32 3, ptr %kernel_args, align 8
69+
// CHECK-NEXT: %0 = getelementptr inbounds nuw i8, ptr %kernel_args, i64 4
70+
// CHECK-NEXT: store i32 1, ptr %0, align 4
71+
// CHECK-NEXT: %1 = getelementptr inbounds nuw i8, ptr %kernel_args, i64 8
72+
// CHECK-NEXT: store ptr %.offload_baseptrs, ptr %1, align 8
73+
// CHECK-NEXT: %2 = getelementptr inbounds nuw i8, ptr %kernel_args, i64 16
74+
// CHECK-NEXT: store ptr %.offload_ptrs, ptr %2, align 8
75+
// CHECK-NEXT: %3 = getelementptr inbounds nuw i8, ptr %kernel_args, i64 24
76+
// CHECK-NEXT: store ptr %.offload_sizes, ptr %3, align 8
77+
// CHECK-NEXT: %4 = getelementptr inbounds nuw i8, ptr %kernel_args, i64 32
78+
// CHECK-NEXT: store ptr @.offload_maptypes._kernel_1, ptr %4, align 8
79+
// CHECK-NEXT: %5 = getelementptr inbounds nuw i8, ptr %kernel_args, i64 40
80+
// CHECK-NEXT: %6 = getelementptr inbounds nuw i8, ptr %kernel_args, i64 72
81+
// CHECK-NEXT: call void @llvm.memset.p0.i64(ptr noundef nonnull align 8 dereferenceable(32) %5, i8 0, i64 32, i1 false)
82+
// CHECK-NEXT: store <4 x i32> <i32 2097152, i32 0, i32 0, i32 256>, ptr %6, align 8
83+
// CHECK-NEXT: %.fca.1.gep2 = getelementptr inbounds nuw i8, ptr %kernel_args, i64 88
84+
// CHECK-NEXT: store i32 0, ptr %.fca.1.gep2, align 8
85+
// CHECK-NEXT: %.fca.2.gep3 = getelementptr inbounds nuw i8, ptr %kernel_args, i64 92
86+
// CHECK-NEXT: store i32 0, ptr %.fca.2.gep3, align 4
87+
// CHECK-NEXT: %7 = getelementptr inbounds nuw i8, ptr %kernel_args, i64 96
88+
// CHECK-NEXT: store i32 0, ptr %7, align 8
89+
// CHECK-NEXT: %8 = call i32 @__tgt_target_kernel(ptr nonnull @anon.{{.*}}.1, i64 -1, i32 2097152, i32 256, ptr nonnull @._kernel_1.region_id, ptr nonnull %kernel_args)
90+
// CHECK-NEXT: call void @__tgt_target_data_end_mapper(ptr nonnull @anon.{{.*}}.1, i64 -1, i32 1, ptr nonnull %.offload_baseptrs, ptr nonnull %.offload_ptrs, ptr nonnull %.offload_sizes, ptr nonnull @.offload_maptypes._kernel_1, ptr null, ptr null)
91+
// CHECK-NEXT: call void @__tgt_unregister_lib(ptr nonnull %EmptyDesc)
92+
// CHECK-NEXT: ret void
9793
// CHECK-NEXT: }
9894

9995
// CHECK: Function Attrs: nounwind
10096
// CHECK: declare i32 @__tgt_target_kernel(ptr, i64, i32, i32, ptr, ptr)
10197

10298
#[unsafe(no_mangle)]
10399
#[inline(never)]
104-
pub fn kernel(x: &mut [f32; 256]) {
105-
core::intrinsics::offload(_kernel, (x,))
100+
pub fn kernel_1(x: &mut [f32; 256]) {
101+
core::intrinsics::offload(_kernel_1, (x,))
106102
}
107103

108104
#[unsafe(no_mangle)]
109105
#[inline(never)]
110-
pub fn _kernel(x: &mut [f32; 256]) {
106+
pub fn _kernel_1(x: &mut [f32; 256]) {
111107
for i in 0..256 {
112108
x[i] = 21.0;
113109
}

0 commit comments

Comments
 (0)