@@ -511,7 +511,11 @@ static inline void nv_vfree(void *ptr, NvU64 size)
511511
512512static inline void * nv_ioremap (NvU64 phys , NvU64 size )
513513{
514+ #if IS_ENABLED (CONFIG_INTEL_TDX_GUEST ) && defined(NV_IOREMAP_DRIVER_HARDENED_PRESENT )
515+ void * ptr = ioremap_driver_hardened (phys , size );
516+ #else
514517 void * ptr = ioremap (phys , size );
518+ #endif
515519 if (ptr )
516520 NV_MEMDBG_ADD (ptr , size );
517521 return ptr ;
@@ -524,11 +528,11 @@ static inline void *nv_ioremap_nocache(NvU64 phys, NvU64 size)
524528
525529static inline void * nv_ioremap_cache (NvU64 phys , NvU64 size )
526530{
527- #if defined( NV_IOREMAP_CACHE_PRESENT )
528- void * ptr = ioremap_cache ( phys , size );
529- if ( ptr )
530- NV_MEMDBG_ADD ( ptr , size );
531- return ptr ;
531+ void * ptr = NULL ;
532+ #if IS_ENABLED ( CONFIG_INTEL_TDX_GUEST ) && defined( NV_IOREMAP_CACHE_SHARED_PRESENT )
533+ ptr = ioremap_cache_shared ( phys , size );
534+ #elif defined( NV_IOREMAP_CACHE_PRESENT )
535+ ptr = ioremap_cache ( phys , size ) ;
532536#elif defined(NVCPU_PPC64LE )
533537 //
534538 // ioremap_cache() has been only implemented correctly for ppc64le with
@@ -543,25 +547,32 @@ static inline void *nv_ioremap_cache(NvU64 phys, NvU64 size)
543547 // (commit 40f1ce7fb7e8, kernel 3.0+) and that covers all kernels we
544548 // support on power.
545549 //
546- void * ptr = ioremap_prot (phys , size , pgprot_val (PAGE_KERNEL ));
547- if (ptr )
548- NV_MEMDBG_ADD (ptr , size );
549- return ptr ;
550+ ptr = ioremap_prot (phys , size , pgprot_val (PAGE_KERNEL ));
550551#else
551552 return nv_ioremap (phys , size );
552553#endif
553- }
554554
555- static inline void * nv_ioremap_wc (NvU64 phys , NvU64 size )
556- {
557- #if defined(NV_IOREMAP_WC_PRESENT )
558- void * ptr = ioremap_wc (phys , size );
559555 if (ptr )
560556 NV_MEMDBG_ADD (ptr , size );
557+
561558 return ptr ;
559+ }
560+
561+ static inline void * nv_ioremap_wc (NvU64 phys , NvU64 size )
562+ {
563+ void * ptr = NULL ;
564+ #if IS_ENABLED (CONFIG_INTEL_TDX_GUEST ) && defined(NV_IOREMAP_DRIVER_HARDENED_WC_PRESENT )
565+ ptr = ioremap_driver_hardened_wc (phys , size );
566+ #elif defined(NV_IOREMAP_WC_PRESENT )
567+ ptr = ioremap_wc (phys , size );
562568#else
563569 return nv_ioremap_nocache (phys , size );
564570#endif
571+
572+ if (ptr )
573+ NV_MEMDBG_ADD (ptr , size );
574+
575+ return ptr ;
565576}
566577
567578static inline void nv_iounmap (void * ptr , NvU64 size )
@@ -634,37 +645,24 @@ static NvBool nv_numa_node_has_memory(int node_id)
634645 free_pages(ptr, order); \
635646 }
636647
637- extern NvU64 nv_shared_gpa_boundary ;
648+ static inline pgprot_t nv_sme_clr (pgprot_t prot )
649+ {
650+ #if defined(__sme_clr )
651+ return __pgprot (__sme_clr (pgprot_val (prot )));
652+ #else
653+ return prot ;
654+ #endif // __sme_clr
655+ }
638656
639657static inline pgprot_t nv_adjust_pgprot (pgprot_t vm_prot , NvU32 extra )
640658{
641659 pgprot_t prot = __pgprot (pgprot_val (vm_prot ) | extra );
642- #if defined(CONFIG_AMD_MEM_ENCRYPT ) && defined(NV_PGPROT_DECRYPTED_PRESENT )
643- /*
644- * When AMD memory encryption is enabled, device memory mappings with the
645- * C-bit set read as 0xFF, so ensure the bit is cleared for user mappings.
646- *
647- * If cc_mkdec() is present, then pgprot_decrypted() can't be used.
648- */
649- #if defined(NV_CC_MKDEC_PRESENT )
650- if (nv_shared_gpa_boundary != 0 )
651- {
652- /*
653- * By design, a VM using vTOM doesn't see the SEV setting and
654- * for AMD with vTOM, *set* means decrypted.
655- */
656- prot = __pgprot (nv_shared_gpa_boundary | (pgprot_val (vm_prot )));
657- }
658- else
659- {
660- prot = __pgprot (__sme_clr (pgprot_val (vm_prot )));
661- }
662- #else
663- prot = pgprot_decrypted (prot );
664- #endif
665- #endif
666660
667- return prot ;
661+ #if defined(pgprot_decrypted )
662+ return pgprot_decrypted (prot );
663+ #else
664+ return nv_sme_clr (prot );
665+ #endif // pgprot_decrypted
668666}
669667
670668#if defined(PAGE_KERNEL_NOENC )
@@ -1324,7 +1322,7 @@ nv_dma_maps_swiotlb(struct device *dev)
13241322 * SEV memory encryption") forces SWIOTLB to be enabled when AMD SEV
13251323 * is active in all cases.
13261324 */
1327- if (os_sev_enabled )
1325+ if (os_cc_enabled )
13281326 swiotlb_in_use = NV_TRUE ;
13291327#endif
13301328
0 commit comments