Skip to content

Commit 15a26c2

Browse files
committed
Merge branch 'for-6.15/features' into fwctl
Add CXL mailbox Features commands enabling. This is also preparation for CXL fwctl enabling. The same code will also be utilized by the CXL EDAC enabling. The commands 'Get Supported Features', 'Get Feature', and 'Set Feature' are enabled for kernel usages. Required for the CXL fwctl driver. * branch 'for-6.15/features' cxl: Setup exclusive CXL features that are reserved for the kernel cxl/mbox: Add SET_FEATURE mailbox command cxl/mbox: Add GET_FEATURE mailbox command cxl/test: Add Get Supported Features mailbox command support cxl: Add Get Supported Features command for kernel usage cxl: Enumerate feature commands cxl: Refactor user ioctl command path from mds to mailbox Signed-off-by: Jason Gunthorpe <[email protected]>
2 parents a1ded2c + a8b773f commit 15a26c2

File tree

12 files changed

+772
-97
lines changed

12 files changed

+772
-97
lines changed

drivers/cxl/Kconfig

Lines changed: 11 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -102,6 +102,17 @@ config CXL_MEM
102102

103103
If unsure say 'm'.
104104

105+
config CXL_FEATURES
106+
bool "CXL: Features"
107+
depends on CXL_PCI
108+
help
109+
Enable support for CXL Features. A CXL device that includes a mailbox
110+
supports commands that allows listing, getting, and setting of
111+
optionally defined features such as memory sparing or post package
112+
sparing. Vendors may define custom features for the device.
113+
114+
If unsure say 'n'
115+
105116
config CXL_PORT
106117
default CXL_BUS
107118
tristate

drivers/cxl/core/Makefile

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -16,3 +16,4 @@ cxl_core-y += pmu.o
1616
cxl_core-y += cdat.o
1717
cxl_core-$(CONFIG_TRACING) += trace.o
1818
cxl_core-$(CONFIG_CXL_REGION) += region.o
19+
cxl_core-$(CONFIG_CXL_FEATURES) += features.o

drivers/cxl/core/core.h

Lines changed: 15 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -4,6 +4,8 @@
44
#ifndef __CXL_CORE_H__
55
#define __CXL_CORE_H__
66

7+
#include <cxl/mailbox.h>
8+
79
extern const struct device_type cxl_nvdimm_bridge_type;
810
extern const struct device_type cxl_nvdimm_type;
911
extern const struct device_type cxl_pmu_type;
@@ -65,9 +67,9 @@ static inline void cxl_region_exit(void)
6567

6668
struct cxl_send_command;
6769
struct cxl_mem_query_commands;
68-
int cxl_query_cmd(struct cxl_memdev *cxlmd,
70+
int cxl_query_cmd(struct cxl_mailbox *cxl_mbox,
6971
struct cxl_mem_query_commands __user *q);
70-
int cxl_send_cmd(struct cxl_memdev *cxlmd, struct cxl_send_command __user *s);
72+
int cxl_send_cmd(struct cxl_mailbox *cxl_mbox, struct cxl_send_command __user *s);
7173
void __iomem *devm_cxl_iomap_block(struct device *dev, resource_size_t addr,
7274
resource_size_t length);
7375

@@ -115,4 +117,15 @@ bool cxl_need_node_perf_attrs_update(int nid);
115117
int cxl_port_get_switch_dport_bandwidth(struct cxl_port *port,
116118
struct access_coordinate *c);
117119

120+
#ifdef CONFIG_CXL_FEATURES
121+
size_t cxl_get_feature(struct cxl_mailbox *cxl_mbox, const uuid_t *feat_uuid,
122+
enum cxl_get_feat_selection selection,
123+
void *feat_out, size_t feat_out_size, u16 offset,
124+
u16 *return_code);
125+
int cxl_set_feature(struct cxl_mailbox *cxl_mbox, const uuid_t *feat_uuid,
126+
u8 feat_version, const void *feat_data,
127+
size_t feat_data_size, u32 feat_flag, u16 offset,
128+
u16 *return_code);
129+
#endif
130+
118131
#endif /* __CXL_CORE_H__ */

drivers/cxl/core/features.c

Lines changed: 333 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,333 @@
1+
// SPDX-License-Identifier: GPL-2.0-only
2+
/* Copyright(c) 2024-2025 Intel Corporation. All rights reserved. */
3+
#include <linux/device.h>
4+
#include <cxl/mailbox.h>
5+
#include <cxl/features.h>
6+
#include "cxl.h"
7+
#include "core.h"
8+
#include "cxlmem.h"
9+
10+
/* All the features below are exclusive to the kernel */
11+
static const uuid_t cxl_exclusive_feats[] = {
12+
CXL_FEAT_PATROL_SCRUB_UUID,
13+
CXL_FEAT_ECS_UUID,
14+
CXL_FEAT_SPPR_UUID,
15+
CXL_FEAT_HPPR_UUID,
16+
CXL_FEAT_CACHELINE_SPARING_UUID,
17+
CXL_FEAT_ROW_SPARING_UUID,
18+
CXL_FEAT_BANK_SPARING_UUID,
19+
CXL_FEAT_RANK_SPARING_UUID,
20+
};
21+
22+
static bool is_cxl_feature_exclusive(struct cxl_feat_entry *entry)
23+
{
24+
for (int i = 0; i < ARRAY_SIZE(cxl_exclusive_feats); i++) {
25+
if (uuid_equal(&entry->uuid, &cxl_exclusive_feats[i]))
26+
return true;
27+
}
28+
29+
return false;
30+
}
31+
32+
inline struct cxl_features_state *to_cxlfs(struct cxl_dev_state *cxlds)
33+
{
34+
return cxlds->cxlfs;
35+
}
36+
EXPORT_SYMBOL_NS_GPL(to_cxlfs, "CXL");
37+
38+
static int cxl_get_supported_features_count(struct cxl_mailbox *cxl_mbox)
39+
{
40+
struct cxl_mbox_get_sup_feats_out mbox_out;
41+
struct cxl_mbox_get_sup_feats_in mbox_in;
42+
struct cxl_mbox_cmd mbox_cmd;
43+
int rc;
44+
45+
memset(&mbox_in, 0, sizeof(mbox_in));
46+
mbox_in.count = cpu_to_le32(sizeof(mbox_out));
47+
memset(&mbox_out, 0, sizeof(mbox_out));
48+
mbox_cmd = (struct cxl_mbox_cmd) {
49+
.opcode = CXL_MBOX_OP_GET_SUPPORTED_FEATURES,
50+
.size_in = sizeof(mbox_in),
51+
.payload_in = &mbox_in,
52+
.size_out = sizeof(mbox_out),
53+
.payload_out = &mbox_out,
54+
.min_out = sizeof(mbox_out),
55+
};
56+
rc = cxl_internal_send_cmd(cxl_mbox, &mbox_cmd);
57+
if (rc < 0)
58+
return rc;
59+
60+
return le16_to_cpu(mbox_out.supported_feats);
61+
}
62+
63+
static struct cxl_feat_entries *
64+
get_supported_features(struct cxl_features_state *cxlfs)
65+
{
66+
int remain_feats, max_size, max_feats, start, rc, hdr_size;
67+
struct cxl_mailbox *cxl_mbox = &cxlfs->cxlds->cxl_mbox;
68+
int feat_size = sizeof(struct cxl_feat_entry);
69+
struct cxl_mbox_get_sup_feats_in mbox_in;
70+
struct cxl_feat_entry *entry;
71+
struct cxl_mbox_cmd mbox_cmd;
72+
int user_feats = 0;
73+
int count;
74+
75+
count = cxl_get_supported_features_count(cxl_mbox);
76+
if (count <= 0)
77+
return NULL;
78+
79+
struct cxl_feat_entries *entries __free(kvfree) =
80+
kvmalloc(struct_size(entries, ent, count), GFP_KERNEL);
81+
if (!entries)
82+
return NULL;
83+
84+
struct cxl_mbox_get_sup_feats_out *mbox_out __free(kvfree) =
85+
kvmalloc(cxl_mbox->payload_size, GFP_KERNEL);
86+
if (!mbox_out)
87+
return NULL;
88+
89+
hdr_size = struct_size(mbox_out, ents, 0);
90+
max_size = cxl_mbox->payload_size - hdr_size;
91+
/* max feat entries that can fit in mailbox max payload size */
92+
max_feats = max_size / feat_size;
93+
entry = entries->ent;
94+
95+
start = 0;
96+
remain_feats = count;
97+
do {
98+
int retrieved, alloc_size, copy_feats;
99+
int num_entries;
100+
101+
if (remain_feats > max_feats) {
102+
alloc_size = struct_size(mbox_out, ents, max_feats);
103+
remain_feats = remain_feats - max_feats;
104+
copy_feats = max_feats;
105+
} else {
106+
alloc_size = struct_size(mbox_out, ents, remain_feats);
107+
copy_feats = remain_feats;
108+
remain_feats = 0;
109+
}
110+
111+
memset(&mbox_in, 0, sizeof(mbox_in));
112+
mbox_in.count = cpu_to_le32(alloc_size);
113+
mbox_in.start_idx = cpu_to_le16(start);
114+
memset(mbox_out, 0, alloc_size);
115+
mbox_cmd = (struct cxl_mbox_cmd) {
116+
.opcode = CXL_MBOX_OP_GET_SUPPORTED_FEATURES,
117+
.size_in = sizeof(mbox_in),
118+
.payload_in = &mbox_in,
119+
.size_out = alloc_size,
120+
.payload_out = mbox_out,
121+
.min_out = hdr_size,
122+
};
123+
rc = cxl_internal_send_cmd(cxl_mbox, &mbox_cmd);
124+
if (rc < 0)
125+
return NULL;
126+
127+
if (mbox_cmd.size_out <= hdr_size)
128+
return NULL;
129+
130+
/*
131+
* Make sure retrieved out buffer is multiple of feature
132+
* entries.
133+
*/
134+
retrieved = mbox_cmd.size_out - hdr_size;
135+
if (retrieved % feat_size)
136+
return NULL;
137+
138+
num_entries = le16_to_cpu(mbox_out->num_entries);
139+
/*
140+
* If the reported output entries * defined entry size !=
141+
* retrieved output bytes, then the output package is incorrect.
142+
*/
143+
if (num_entries * feat_size != retrieved)
144+
return NULL;
145+
146+
memcpy(entry, mbox_out->ents, retrieved);
147+
for (int i = 0; i < num_entries; i++) {
148+
if (!is_cxl_feature_exclusive(entry + i))
149+
user_feats++;
150+
}
151+
entry += num_entries;
152+
/*
153+
* If the number of output entries is less than expected, add the
154+
* remaining entries to the next batch.
155+
*/
156+
remain_feats += copy_feats - num_entries;
157+
start += num_entries;
158+
} while (remain_feats);
159+
160+
entries->num_features = count;
161+
entries->num_user_features = user_feats;
162+
163+
return no_free_ptr(entries);
164+
}
165+
166+
static void free_cxlfs(void *_cxlfs)
167+
{
168+
struct cxl_features_state *cxlfs = _cxlfs;
169+
struct cxl_dev_state *cxlds = cxlfs->cxlds;
170+
171+
cxlds->cxlfs = NULL;
172+
kvfree(cxlfs->entries);
173+
kfree(cxlfs);
174+
}
175+
176+
/**
177+
* devm_cxl_setup_features() - Allocate and initialize features context
178+
* @cxlds: CXL device context
179+
*
180+
* Return 0 on success or -errno on failure.
181+
*/
182+
int devm_cxl_setup_features(struct cxl_dev_state *cxlds)
183+
{
184+
struct cxl_mailbox *cxl_mbox = &cxlds->cxl_mbox;
185+
186+
if (cxl_mbox->feat_cap < CXL_FEATURES_RO)
187+
return -ENODEV;
188+
189+
struct cxl_features_state *cxlfs __free(kfree) =
190+
kzalloc(sizeof(*cxlfs), GFP_KERNEL);
191+
if (!cxlfs)
192+
return -ENOMEM;
193+
194+
cxlfs->cxlds = cxlds;
195+
196+
cxlfs->entries = get_supported_features(cxlfs);
197+
if (!cxlfs->entries)
198+
return -ENOMEM;
199+
200+
cxlds->cxlfs = cxlfs;
201+
202+
return devm_add_action_or_reset(cxlds->dev, free_cxlfs, no_free_ptr(cxlfs));
203+
}
204+
EXPORT_SYMBOL_NS_GPL(devm_cxl_setup_features, "CXL");
205+
206+
size_t cxl_get_feature(struct cxl_mailbox *cxl_mbox, const uuid_t *feat_uuid,
207+
enum cxl_get_feat_selection selection,
208+
void *feat_out, size_t feat_out_size, u16 offset,
209+
u16 *return_code)
210+
{
211+
size_t data_to_rd_size, size_out;
212+
struct cxl_mbox_get_feat_in pi;
213+
struct cxl_mbox_cmd mbox_cmd;
214+
size_t data_rcvd_size = 0;
215+
int rc;
216+
217+
if (return_code)
218+
*return_code = CXL_MBOX_CMD_RC_INPUT;
219+
220+
if (!feat_out || !feat_out_size)
221+
return 0;
222+
223+
size_out = min(feat_out_size, cxl_mbox->payload_size);
224+
uuid_copy(&pi.uuid, feat_uuid);
225+
pi.selection = selection;
226+
do {
227+
data_to_rd_size = min(feat_out_size - data_rcvd_size,
228+
cxl_mbox->payload_size);
229+
pi.offset = cpu_to_le16(offset + data_rcvd_size);
230+
pi.count = cpu_to_le16(data_to_rd_size);
231+
232+
mbox_cmd = (struct cxl_mbox_cmd) {
233+
.opcode = CXL_MBOX_OP_GET_FEATURE,
234+
.size_in = sizeof(pi),
235+
.payload_in = &pi,
236+
.size_out = size_out,
237+
.payload_out = feat_out + data_rcvd_size,
238+
.min_out = data_to_rd_size,
239+
};
240+
rc = cxl_internal_send_cmd(cxl_mbox, &mbox_cmd);
241+
if (rc < 0 || !mbox_cmd.size_out) {
242+
if (return_code)
243+
*return_code = mbox_cmd.return_code;
244+
return 0;
245+
}
246+
data_rcvd_size += mbox_cmd.size_out;
247+
} while (data_rcvd_size < feat_out_size);
248+
249+
if (return_code)
250+
*return_code = CXL_MBOX_CMD_RC_SUCCESS;
251+
252+
return data_rcvd_size;
253+
}
254+
255+
/*
256+
* FEAT_DATA_MIN_PAYLOAD_SIZE - min extra number of bytes should be
257+
* available in the mailbox for storing the actual feature data so that
258+
* the feature data transfer would work as expected.
259+
*/
260+
#define FEAT_DATA_MIN_PAYLOAD_SIZE 10
261+
int cxl_set_feature(struct cxl_mailbox *cxl_mbox,
262+
const uuid_t *feat_uuid, u8 feat_version,
263+
const void *feat_data, size_t feat_data_size,
264+
u32 feat_flag, u16 offset, u16 *return_code)
265+
{
266+
size_t data_in_size, data_sent_size = 0;
267+
struct cxl_mbox_cmd mbox_cmd;
268+
size_t hdr_size;
269+
270+
if (return_code)
271+
*return_code = CXL_MBOX_CMD_RC_INPUT;
272+
273+
struct cxl_mbox_set_feat_in *pi __free(kfree) =
274+
kzalloc(cxl_mbox->payload_size, GFP_KERNEL);
275+
if (!pi)
276+
return -ENOMEM;
277+
278+
uuid_copy(&pi->uuid, feat_uuid);
279+
pi->version = feat_version;
280+
feat_flag &= ~CXL_SET_FEAT_FLAG_DATA_TRANSFER_MASK;
281+
feat_flag |= CXL_SET_FEAT_FLAG_DATA_SAVED_ACROSS_RESET;
282+
hdr_size = sizeof(pi->hdr);
283+
/*
284+
* Check minimum mbox payload size is available for
285+
* the feature data transfer.
286+
*/
287+
if (hdr_size + FEAT_DATA_MIN_PAYLOAD_SIZE > cxl_mbox->payload_size)
288+
return -ENOMEM;
289+
290+
if (hdr_size + feat_data_size <= cxl_mbox->payload_size) {
291+
pi->flags = cpu_to_le32(feat_flag |
292+
CXL_SET_FEAT_FLAG_FULL_DATA_TRANSFER);
293+
data_in_size = feat_data_size;
294+
} else {
295+
pi->flags = cpu_to_le32(feat_flag |
296+
CXL_SET_FEAT_FLAG_INITIATE_DATA_TRANSFER);
297+
data_in_size = cxl_mbox->payload_size - hdr_size;
298+
}
299+
300+
do {
301+
int rc;
302+
303+
pi->offset = cpu_to_le16(offset + data_sent_size);
304+
memcpy(pi->feat_data, feat_data + data_sent_size, data_in_size);
305+
mbox_cmd = (struct cxl_mbox_cmd) {
306+
.opcode = CXL_MBOX_OP_SET_FEATURE,
307+
.size_in = hdr_size + data_in_size,
308+
.payload_in = pi,
309+
};
310+
rc = cxl_internal_send_cmd(cxl_mbox, &mbox_cmd);
311+
if (rc < 0) {
312+
if (return_code)
313+
*return_code = mbox_cmd.return_code;
314+
return rc;
315+
}
316+
317+
data_sent_size += data_in_size;
318+
if (data_sent_size >= feat_data_size) {
319+
if (return_code)
320+
*return_code = CXL_MBOX_CMD_RC_SUCCESS;
321+
return 0;
322+
}
323+
324+
if ((feat_data_size - data_sent_size) <= (cxl_mbox->payload_size - hdr_size)) {
325+
data_in_size = feat_data_size - data_sent_size;
326+
pi->flags = cpu_to_le32(feat_flag |
327+
CXL_SET_FEAT_FLAG_FINISH_DATA_TRANSFER);
328+
} else {
329+
pi->flags = cpu_to_le32(feat_flag |
330+
CXL_SET_FEAT_FLAG_CONTINUE_DATA_TRANSFER);
331+
}
332+
} while (true);
333+
}

0 commit comments

Comments
 (0)