|
17 | 17 | #include <linux/vmalloc.h>
|
18 | 18 | #include <linux/slab.h>
|
19 | 19 | #include <linux/prefetch.h>
|
| 20 | +#include <linux/io.h> |
| 21 | +#include <asm/mshyperv.h> |
20 | 22 |
|
21 | 23 | #include "hyperv_vmbus.h"
|
22 | 24 |
|
@@ -183,32 +185,59 @@ void hv_ringbuffer_pre_init(struct vmbus_channel *channel)
|
183 | 185 | int hv_ringbuffer_init(struct hv_ring_buffer_info *ring_info,
|
184 | 186 | struct page *pages, u32 page_cnt, u32 max_pkt_size)
|
185 | 187 | {
|
186 |
| - int i; |
187 | 188 | struct page **pages_wraparound;
|
| 189 | + unsigned long *pfns_wraparound; |
| 190 | + u64 pfn; |
| 191 | + int i; |
188 | 192 |
|
189 | 193 | BUILD_BUG_ON((sizeof(struct hv_ring_buffer) != PAGE_SIZE));
|
190 | 194 |
|
191 | 195 | /*
|
192 | 196 | * First page holds struct hv_ring_buffer, do wraparound mapping for
|
193 | 197 | * the rest.
|
194 | 198 | */
|
195 |
| - pages_wraparound = kcalloc(page_cnt * 2 - 1, sizeof(struct page *), |
196 |
| - GFP_KERNEL); |
197 |
| - if (!pages_wraparound) |
198 |
| - return -ENOMEM; |
| 199 | + if (hv_isolation_type_snp()) { |
| 200 | + pfn = page_to_pfn(pages) + |
| 201 | + PFN_DOWN(ms_hyperv.shared_gpa_boundary); |
| 202 | + |
| 203 | + pfns_wraparound = kcalloc(page_cnt * 2 - 1, |
| 204 | + sizeof(unsigned long), GFP_KERNEL); |
| 205 | + if (!pfns_wraparound) |
| 206 | + return -ENOMEM; |
| 207 | + |
| 208 | + pfns_wraparound[0] = pfn; |
| 209 | + for (i = 0; i < 2 * (page_cnt - 1); i++) |
| 210 | + pfns_wraparound[i + 1] = pfn + i % (page_cnt - 1) + 1; |
199 | 211 |
|
200 |
| - pages_wraparound[0] = pages; |
201 |
| - for (i = 0; i < 2 * (page_cnt - 1); i++) |
202 |
| - pages_wraparound[i + 1] = &pages[i % (page_cnt - 1) + 1]; |
| 212 | + ring_info->ring_buffer = (struct hv_ring_buffer *) |
| 213 | + vmap_pfn(pfns_wraparound, page_cnt * 2 - 1, |
| 214 | + PAGE_KERNEL); |
| 215 | + kfree(pfns_wraparound); |
203 | 216 |
|
204 |
| - ring_info->ring_buffer = (struct hv_ring_buffer *) |
205 |
| - vmap(pages_wraparound, page_cnt * 2 - 1, VM_MAP, PAGE_KERNEL); |
| 217 | + if (!ring_info->ring_buffer) |
| 218 | + return -ENOMEM; |
| 219 | + |
| 220 | + /* Zero ring buffer after setting memory host visibility. */ |
| 221 | + memset(ring_info->ring_buffer, 0x00, PAGE_SIZE * page_cnt); |
| 222 | + } else { |
| 223 | + pages_wraparound = kcalloc(page_cnt * 2 - 1, |
| 224 | + sizeof(struct page *), |
| 225 | + GFP_KERNEL); |
| 226 | + |
| 227 | + pages_wraparound[0] = pages; |
| 228 | + for (i = 0; i < 2 * (page_cnt - 1); i++) |
| 229 | + pages_wraparound[i + 1] = |
| 230 | + &pages[i % (page_cnt - 1) + 1]; |
206 | 231 |
|
207 |
| - kfree(pages_wraparound); |
| 232 | + ring_info->ring_buffer = (struct hv_ring_buffer *) |
| 233 | + vmap(pages_wraparound, page_cnt * 2 - 1, VM_MAP, |
| 234 | + PAGE_KERNEL); |
208 | 235 |
|
| 236 | + kfree(pages_wraparound); |
| 237 | + if (!ring_info->ring_buffer) |
| 238 | + return -ENOMEM; |
| 239 | + } |
209 | 240 |
|
210 |
| - if (!ring_info->ring_buffer) |
211 |
| - return -ENOMEM; |
212 | 241 |
|
213 | 242 | ring_info->ring_buffer->read_index =
|
214 | 243 | ring_info->ring_buffer->write_index = 0;
|
|
0 commit comments