@@ -2816,26 +2816,21 @@ u64 cxl_trace_hpa(struct cxl_region *cxlr, const struct cxl_memdev *cxlmd,
2816
2816
2817
2817
static struct lock_class_key cxl_pmem_region_key ;
2818
2818
2819
- static struct cxl_pmem_region * cxl_pmem_region_alloc (struct cxl_region * cxlr )
2819
+ static int cxl_pmem_region_alloc (struct cxl_region * cxlr )
2820
2820
{
2821
2821
struct cxl_region_params * p = & cxlr -> params ;
2822
2822
struct cxl_nvdimm_bridge * cxl_nvb ;
2823
- struct cxl_pmem_region * cxlr_pmem ;
2824
2823
struct device * dev ;
2825
2824
int i ;
2826
2825
2827
- down_read (& cxl_region_rwsem );
2828
- if (p -> state != CXL_CONFIG_COMMIT ) {
2829
- cxlr_pmem = ERR_PTR (- ENXIO );
2830
- goto out ;
2831
- }
2826
+ guard (rwsem_read )(& cxl_region_rwsem );
2827
+ if (p -> state != CXL_CONFIG_COMMIT )
2828
+ return - ENXIO ;
2832
2829
2833
- cxlr_pmem = kzalloc (struct_size (cxlr_pmem , mapping , p -> nr_targets ),
2834
- GFP_KERNEL );
2835
- if (!cxlr_pmem ) {
2836
- cxlr_pmem = ERR_PTR (- ENOMEM );
2837
- goto out ;
2838
- }
2830
+ struct cxl_pmem_region * cxlr_pmem __free (kfree ) =
2831
+ kzalloc (struct_size (cxlr_pmem , mapping , p -> nr_targets ), GFP_KERNEL );
2832
+ if (!cxlr_pmem )
2833
+ return - ENOMEM ;
2839
2834
2840
2835
cxlr_pmem -> hpa_range .start = p -> res -> start ;
2841
2836
cxlr_pmem -> hpa_range .end = p -> res -> end ;
@@ -2853,11 +2848,8 @@ static struct cxl_pmem_region *cxl_pmem_region_alloc(struct cxl_region *cxlr)
2853
2848
*/
2854
2849
if (i == 0 ) {
2855
2850
cxl_nvb = cxl_find_nvdimm_bridge (cxlmd );
2856
- if (!cxl_nvb ) {
2857
- kfree (cxlr_pmem );
2858
- cxlr_pmem = ERR_PTR (- ENODEV );
2859
- goto out ;
2860
- }
2851
+ if (!cxl_nvb )
2852
+ return - ENODEV ;
2861
2853
cxlr -> cxl_nvb = cxl_nvb ;
2862
2854
}
2863
2855
m -> cxlmd = cxlmd ;
@@ -2868,18 +2860,16 @@ static struct cxl_pmem_region *cxl_pmem_region_alloc(struct cxl_region *cxlr)
2868
2860
}
2869
2861
2870
2862
dev = & cxlr_pmem -> dev ;
2871
- cxlr_pmem -> cxlr = cxlr ;
2872
- cxlr -> cxlr_pmem = cxlr_pmem ;
2873
2863
device_initialize (dev );
2874
2864
lockdep_set_class (& dev -> mutex , & cxl_pmem_region_key );
2875
2865
device_set_pm_not_required (dev );
2876
2866
dev -> parent = & cxlr -> dev ;
2877
2867
dev -> bus = & cxl_bus_type ;
2878
2868
dev -> type = & cxl_pmem_region_type ;
2879
- out :
2880
- up_read ( & cxl_region_rwsem );
2869
+ cxlr_pmem -> cxlr = cxlr ;
2870
+ cxlr -> cxlr_pmem = no_free_ptr ( cxlr_pmem );
2881
2871
2882
- return cxlr_pmem ;
2872
+ return 0 ;
2883
2873
}
2884
2874
2885
2875
static void cxl_dax_region_release (struct device * dev )
@@ -2996,9 +2986,10 @@ static int devm_cxl_add_pmem_region(struct cxl_region *cxlr)
2996
2986
struct device * dev ;
2997
2987
int rc ;
2998
2988
2999
- cxlr_pmem = cxl_pmem_region_alloc (cxlr );
3000
- if (IS_ERR (cxlr_pmem ))
3001
- return PTR_ERR (cxlr_pmem );
2989
+ rc = cxl_pmem_region_alloc (cxlr );
2990
+ if (rc )
2991
+ return rc ;
2992
+ cxlr_pmem = cxlr -> cxlr_pmem ;
3002
2993
cxl_nvb = cxlr -> cxl_nvb ;
3003
2994
3004
2995
dev = & cxlr_pmem -> dev ;
0 commit comments