@@ -454,7 +454,7 @@ static int __init intel_iommu_setup(char *str)
454454 iommu_dma_forcedac = true;
455455 } else if (!strncmp (str , "strict" , 6 )) {
456456 pr_warn ("intel_iommu=strict deprecated; use iommu.strict=1 instead\n" );
457- iommu_set_dma_strict (true );
457+ iommu_set_dma_strict ();
458458 } else if (!strncmp (str , "sp_off" , 6 )) {
459459 pr_info ("Disable supported super page\n" );
460460 intel_iommu_superpage = 0 ;
@@ -4394,7 +4394,7 @@ int __init intel_iommu_init(void)
43944394 */
43954395 if (cap_caching_mode (iommu -> cap )) {
43964396 pr_info_once ("IOMMU batching disallowed due to virtualization\n" );
4397- iommu_set_dma_strict (true );
4397+ iommu_set_dma_strict ();
43984398 }
43994399 iommu_device_sysfs_add (& iommu -> iommu , NULL ,
44004400 intel_iommu_groups ,
@@ -5712,7 +5712,7 @@ static void quirk_calpella_no_shadow_gtt(struct pci_dev *dev)
57125712 } else if (dmar_map_gfx ) {
57135713 /* we have to ensure the gfx device is idle before we flush */
57145714 pci_info (dev , "Disabling batched IOTLB flush on Ironlake\n" );
5715- iommu_set_dma_strict (true );
5715+ iommu_set_dma_strict ();
57165716 }
57175717}
57185718DECLARE_PCI_FIXUP_HEADER (PCI_VENDOR_ID_INTEL , 0x0040 , quirk_calpella_no_shadow_gtt );
0 commit comments