@@ -1612,21 +1612,23 @@ static void nvme_free_host_mem(struct nvme_dev *dev)
16121612 dev -> host_mem_descs = NULL ;
16131613}
16141614
1615- static int nvme_alloc_host_mem (struct nvme_dev * dev , u64 min , u64 preferred )
1615+ static int __nvme_alloc_host_mem (struct nvme_dev * dev , u64 preferred ,
1616+ u32 chunk_size )
16161617{
16171618 struct nvme_host_mem_buf_desc * descs ;
1618- u32 chunk_size , max_entries , len ;
1619+ u32 max_entries , len ;
16191620 dma_addr_t descs_dma ;
16201621 int i = 0 ;
16211622 void * * bufs ;
16221623 u64 size = 0 , tmp ;
16231624
1624- /* start big and work our way down */
1625- chunk_size = min (preferred , (u64 )PAGE_SIZE << MAX_ORDER );
1626- retry :
16271625 tmp = (preferred + chunk_size - 1 );
16281626 do_div (tmp , chunk_size );
16291627 max_entries = tmp ;
1628+
1629+ if (dev -> ctrl .hmmaxd && dev -> ctrl .hmmaxd < max_entries )
1630+ max_entries = dev -> ctrl .hmmaxd ;
1631+
16301632 descs = dma_zalloc_coherent (dev -> dev , max_entries * sizeof (* descs ),
16311633 & descs_dma , GFP_KERNEL );
16321634 if (!descs )
@@ -1650,15 +1652,9 @@ static int nvme_alloc_host_mem(struct nvme_dev *dev, u64 min, u64 preferred)
16501652 i ++ ;
16511653 }
16521654
1653- if (!size || (min && size < min )) {
1654- dev_warn (dev -> ctrl .device ,
1655- "failed to allocate host memory buffer.\n" );
1655+ if (!size )
16561656 goto out_free_bufs ;
1657- }
16581657
1659- dev_info (dev -> ctrl .device ,
1660- "allocated %lld MiB host memory buffer.\n" ,
1661- size >> ilog2 (SZ_1M ));
16621658 dev -> nr_host_mem_descs = i ;
16631659 dev -> host_mem_size = size ;
16641660 dev -> host_mem_descs = descs ;
@@ -1679,29 +1675,43 @@ static int nvme_alloc_host_mem(struct nvme_dev *dev, u64 min, u64 preferred)
16791675 dma_free_coherent (dev -> dev , max_entries * sizeof (* descs ), descs ,
16801676 descs_dma );
16811677out :
1682- /* try a smaller chunk size if we failed early */
1683- if (chunk_size >= PAGE_SIZE * 2 && (i == 0 || size < min )) {
1684- chunk_size /= 2 ;
1685- goto retry ;
1686- }
16871678 dev -> host_mem_descs = NULL ;
16881679 return - ENOMEM ;
16891680}
16901681
1691- static void nvme_setup_host_mem (struct nvme_dev * dev )
1682+ static int nvme_alloc_host_mem (struct nvme_dev * dev , u64 min , u64 preferred )
1683+ {
1684+ u32 chunk_size ;
1685+
1686+ /* start big and work our way down */
1687+ for (chunk_size = min_t (u64 , preferred , PAGE_SIZE * MAX_ORDER_NR_PAGES );
1688+ chunk_size >= max_t (u32 , dev -> ctrl .hmminds * 4096 , PAGE_SIZE * 2 );
1689+ chunk_size /= 2 ) {
1690+ if (!__nvme_alloc_host_mem (dev , preferred , chunk_size )) {
1691+ if (!min || dev -> host_mem_size >= min )
1692+ return 0 ;
1693+ nvme_free_host_mem (dev );
1694+ }
1695+ }
1696+
1697+ return - ENOMEM ;
1698+ }
1699+
1700+ static int nvme_setup_host_mem (struct nvme_dev * dev )
16921701{
16931702 u64 max = (u64 )max_host_mem_size_mb * SZ_1M ;
16941703 u64 preferred = (u64 )dev -> ctrl .hmpre * 4096 ;
16951704 u64 min = (u64 )dev -> ctrl .hmmin * 4096 ;
16961705 u32 enable_bits = NVME_HOST_MEM_ENABLE ;
1706+ int ret = 0 ;
16971707
16981708 preferred = min (preferred , max );
16991709 if (min > max ) {
17001710 dev_warn (dev -> ctrl .device ,
17011711 "min host memory (%lld MiB) above limit (%d MiB).\n" ,
17021712 min >> ilog2 (SZ_1M ), max_host_mem_size_mb );
17031713 nvme_free_host_mem (dev );
1704- return ;
1714+ return 0 ;
17051715 }
17061716
17071717 /*
@@ -1715,12 +1725,21 @@ static void nvme_setup_host_mem(struct nvme_dev *dev)
17151725 }
17161726
17171727 if (!dev -> host_mem_descs ) {
1718- if (nvme_alloc_host_mem (dev , min , preferred ))
1719- return ;
1728+ if (nvme_alloc_host_mem (dev , min , preferred )) {
1729+ dev_warn (dev -> ctrl .device ,
1730+ "failed to allocate host memory buffer.\n" );
1731+ return 0 ; /* controller must work without HMB */
1732+ }
1733+
1734+ dev_info (dev -> ctrl .device ,
1735+ "allocated %lld MiB host memory buffer.\n" ,
1736+ dev -> host_mem_size >> ilog2 (SZ_1M ));
17201737 }
17211738
1722- if (nvme_set_host_mem (dev , enable_bits ))
1739+ ret = nvme_set_host_mem (dev , enable_bits );
1740+ if (ret )
17231741 nvme_free_host_mem (dev );
1742+ return ret ;
17241743}
17251744
17261745static int nvme_setup_io_queues (struct nvme_dev * dev )
@@ -2164,8 +2183,11 @@ static void nvme_reset_work(struct work_struct *work)
21642183 "unable to allocate dma for dbbuf\n" );
21652184 }
21662185
2167- if (dev -> ctrl .hmpre )
2168- nvme_setup_host_mem (dev );
2186+ if (dev -> ctrl .hmpre ) {
2187+ result = nvme_setup_host_mem (dev );
2188+ if (result < 0 )
2189+ goto out ;
2190+ }
21692191
21702192 result = nvme_setup_io_queues (dev );
21712193 if (result )
@@ -2497,6 +2519,10 @@ static const struct pci_device_id nvme_id_table[] = {
24972519 .driver_data = NVME_QUIRK_DELAY_BEFORE_CHK_RDY , },
24982520 { PCI_DEVICE (0x144d , 0xa822 ), /* Samsung PM1725a */
24992521 .driver_data = NVME_QUIRK_DELAY_BEFORE_CHK_RDY , },
2522+ { PCI_DEVICE (0x1d1d , 0x1f1f ), /* LighNVM qemu device */
2523+ .driver_data = NVME_QUIRK_LIGHTNVM , },
2524+ { PCI_DEVICE (0x1d1d , 0x2807 ), /* CNEX WL */
2525+ .driver_data = NVME_QUIRK_LIGHTNVM , },
25002526 { PCI_DEVICE_CLASS (PCI_CLASS_STORAGE_EXPRESS , 0xffffff ) },
25012527 { PCI_DEVICE (PCI_VENDOR_ID_APPLE , 0x2001 ) },
25022528 { PCI_DEVICE (PCI_VENDOR_ID_APPLE , 0x2003 ) },
0 commit comments