We read every piece of feedback, and take your input very seriously.
To see all available qualifiers, see our documentation.
There was an error while loading. Please reload this page.
1 parent 05bdf4e commit 614856dCopy full SHA for 614856d
csrc/cache.h
@@ -1,3 +1,5 @@
1
+#pragma once
2
+
3
#include <torch/extension.h>
4
5
#include <map>
csrc/cuda_utils.h
int get_device_attribute(
csrc/dispatch_utils.h
@@ -2,6 +2,8 @@
* Adapted from
* https://github.com/pytorch/pytorch/blob/v2.0.1/aten/src/ATen/Dispatch.h
*/
6
7
8
9
#define VLLM_DISPATCH_CASE_FLOATING_TYPES(...) \
csrc/ops.h
void paged_attention_v1(
0 commit comments