diff --git a/config/sources/families/include/uefi_common.inc b/config/sources/families/include/uefi_common.inc index 2d7cfb911b7c..e634fa3d8ff9 100644 --- a/config/sources/families/include/uefi_common.inc +++ b/config/sources/families/include/uefi_common.inc @@ -22,7 +22,7 @@ case "${BRANCH}" in ;; cloud) declare -g GRUB_CMDLINE_LINUX_DEFAULT="selinux=0 loglevel=3 max_loop=32" - declare -g KERNEL_MAJOR_MINOR="6.12" + declare -g KERNEL_MAJOR_MINOR="6.18" declare -g LINUXCONFIG="linux-uefi-${LINUXFAMILY}-cloud" declare -g INSTALL_ARMBIAN_FIRMWARE="no" declare -g EXTRAWIFI="no" @@ -34,15 +34,15 @@ case "${BRANCH}" in ;; legacy) - declare -g KERNEL_MAJOR_MINOR="6.6" + declare -g KERNEL_MAJOR_MINOR="6.12" ;; current) - declare -g KERNEL_MAJOR_MINOR="6.12" + declare -g KERNEL_MAJOR_MINOR="6.18" ;; edge) - declare -g KERNEL_MAJOR_MINOR="6.18" + declare -g KERNEL_MAJOR_MINOR="6.19" ;; esac diff --git a/config/sources/families/uefi-loong64.conf b/config/sources/families/uefi-loong64.conf index fecee3d09601..e6a75a0de8b9 100644 --- a/config/sources/families/uefi-loong64.conf +++ b/config/sources/families/uefi-loong64.conf @@ -14,3 +14,11 @@ declare -g ARCH="loong64" # shellcheck source=config/sources/families/include/uefi_common.inc source "${BASH_SOURCE%/*}/include/uefi_common.inc" enable_extension "grub" + +# @TODO: TEMP: hold loong64 edge kernel at 6.18 while amazingfate reworks 6.19 +case "${BRANCH}" in + edge) + declare -g KERNEL_MAJOR_MINOR="6.18" + declare -g KERNELPATCHDIR="archive/uefi-${LINUXFAMILY}-${KERNEL_MAJOR_MINOR}" # override uefi_common + ;; +esac diff --git a/patch/kernel/archive/uefi-arm64-6.19/1000-net-stmicro-stmmac-Phytium-onboard-ethernet-drivers-and-ACPI-glue-for-6.x.patch b/patch/kernel/archive/uefi-arm64-6.19/1000-net-stmicro-stmmac-Phytium-onboard-ethernet-drivers-and-ACPI-glue-for-6.x.patch new file mode 100644 index 000000000000..260a132717d0 --- /dev/null +++ b/patch/kernel/archive/uefi-arm64-6.19/1000-net-stmicro-stmmac-Phytium-onboard-ethernet-drivers-and-ACPI-glue-for-6.x.patch @@ -0,0 +1,660 @@ +From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001 +From: Ricardo Pardini +Date: Thu, 4 Aug 2022 21:49:10 +0200 +Subject: net: stmicro: stmmac: Phytium onboard ethernet drivers and ACPI glue + for 6.x + +My board has two eths; only eth1 has an actual PHY and works. +Source is https://gitee.com/atzlinux/atzlinux-kernel/tree/master/debian/patch +rpardini hammered: +- small api change in upstream around 5.19 +- hand-merged for 6.5 + - use `.remove_new` due to "net: stmmac: Make stmmac_pltfr_remove() return void" +- hand-merged for 6.6: + - OXNAS is no more, Makefile lost reference, fix manually +- Remove Kconfig deps from Feiteng stuff not in mainline +- Default Kconfig to module +- fix stmmac acpi glue for Feiteng on 6.6.y +- drop the (now-qcom) phy hibernate stuff as it landed by 6.12.y +- rework stmmac_probe_config_acpi addition for 6.12.y +- rework for 6.13; remove_new is just remove again + +Signed-off-by: Ricardo Pardini +--- + drivers/net/ethernet/stmicro/stmmac/Kconfig | 10 + + drivers/net/ethernet/stmicro/stmmac/Makefile | 1 + + drivers/net/ethernet/stmicro/stmmac/dwmac-generic.c | 19 + + drivers/net/ethernet/stmicro/stmmac/dwmac-phytium.c | 223 ++++++++ + drivers/net/ethernet/stmicro/stmmac/stmmac_main.c | 1 + + drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c | 255 +++++++++- + drivers/net/ethernet/stmicro/stmmac/stmmac_platform.h | 2 + + 7 files changed, 510 insertions(+), 1 deletion(-) + +diff --git a/drivers/net/ethernet/stmicro/stmmac/Kconfig b/drivers/net/ethernet/stmicro/stmmac/Kconfig +index 111111111111..222222222222 100644 +--- a/drivers/net/ethernet/stmicro/stmmac/Kconfig ++++ b/drivers/net/ethernet/stmicro/stmmac/Kconfig +@@ -132,6 +132,16 @@ config DWMAC_MESON + the stmmac device driver. This driver is used for Meson6, + Meson8, Meson8b and GXBB SoCs. + ++config DWMAC_PHYTIUM ++ tristate "Phytium DWMAC support" ++ default m ++ depends on ACPI ++ help ++ Support for Ethernet controllers on Phytium SoCs. ++ ++ This selects the Phytium DWMAC glue layer support for the stmmac ++ device driver. ++ + config DWMAC_QCOM_ETHQOS + tristate "Qualcomm ETHQOS support" + default ARCH_QCOM +diff --git a/drivers/net/ethernet/stmicro/stmmac/Makefile b/drivers/net/ethernet/stmicro/stmmac/Makefile +index 111111111111..222222222222 100644 +--- a/drivers/net/ethernet/stmicro/stmmac/Makefile ++++ b/drivers/net/ethernet/stmicro/stmmac/Makefile +@@ -20,6 +20,7 @@ obj-$(CONFIG_DWMAC_IPQ806X) += dwmac-ipq806x.o + obj-$(CONFIG_DWMAC_LPC18XX) += dwmac-lpc18xx.o + obj-$(CONFIG_DWMAC_MEDIATEK) += dwmac-mediatek.o + obj-$(CONFIG_DWMAC_MESON) += dwmac-meson.o dwmac-meson8b.o ++obj-$(CONFIG_DWMAC_PHYTIUM) += dwmac-phytium.o + obj-$(CONFIG_DWMAC_QCOM_ETHQOS) += dwmac-qcom-ethqos.o + obj-$(CONFIG_DWMAC_RENESAS_GBETH) += dwmac-renesas-gbeth.o + obj-$(CONFIG_DWMAC_ROCKCHIP) += dwmac-rk.o +diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-generic.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-generic.c +index 111111111111..222222222222 100644 +--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-generic.c ++++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-generic.c +@@ -9,6 +9,7 @@ + * warranty of any kind, whether express or implied. + */ + ++#include + #include + #include + #include +@@ -32,6 +33,12 @@ static int dwmac_generic_probe(struct platform_device *pdev) + dev_err(&pdev->dev, "dt configuration failed\n"); + return PTR_ERR(plat_dat); + } ++ } else if (has_acpi_companion(&pdev->dev)) { ++ plat_dat = stmmac_probe_config_acpi(pdev, stmmac_res.mac); ++ if (!plat_dat) { ++ dev_err(&pdev->dev, "acpi configuration failed\n"); ++ return -EINVAL; ++ } + } else { + plat_dat = dev_get_platdata(&pdev->dev); + if (!plat_dat) { +@@ -66,12 +73,24 @@ static const struct of_device_id dwmac_generic_match[] = { + }; + MODULE_DEVICE_TABLE(of, dwmac_generic_match); + ++#ifdef CONFIG_ACPI ++static const struct acpi_device_id dwmac_acpi_ids[] = { ++ { .id = "PHYT0004" }, ++ {}, ++}; ++ ++MODULE_DEVICE_TABLE(acpi, dwmac_acpi_ids); ++#else ++#define dwmac_acpi_ids NULL ++#endif ++ + static struct platform_driver dwmac_generic_driver = { + .probe = dwmac_generic_probe, + .driver = { + .name = STMMAC_RESOURCE_NAME, + .pm = &stmmac_pltfr_pm_ops, + .of_match_table = dwmac_generic_match, ++ .acpi_match_table = ACPI_PTR(dwmac_acpi_ids), + }, + }; + module_platform_driver(dwmac_generic_driver); +diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-phytium.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-phytium.c +new file mode 100644 +index 000000000000..111111111111 +--- /dev/null ++++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-phytium.c +@@ -0,0 +1,223 @@ ++// SPDX-License-Identifier: GPL-2.0-only ++/* ++ * Phytium DWMAC platform glue driver ++ * ++ * Copyright (C) 2022 Icenowy Zheng ++ */ ++ ++#include ++#include ++#include ++#include ++#include ++ ++#include "stmmac.h" ++#include "stmmac_platform.h" ++ ++/** ++ * Acquire Phytium DWMAC resources from ACPI ++ */ ++int dwmac_phytium_get_resources(struct platform_device *pdev, ++ struct stmmac_resources *stmmac_res) ++{ ++ memset(stmmac_res, 0, sizeof(*stmmac_res)); ++ ++ stmmac_res->irq = platform_get_irq(pdev, 0); ++ if (stmmac_res->irq < 0) ++ return stmmac_res->irq; ++ ++ stmmac_res->addr = devm_platform_ioremap_resource(pdev, 0); ++ stmmac_res->wol_irq = stmmac_res->irq; ++ stmmac_res->lpi_irq = -ENOENT; ++ ++ return PTR_ERR_OR_ZERO(stmmac_res->addr); ++} ++ ++/** ++ * Parse Phytium ACPI properties ++ */ ++static struct plat_stmmacenet_data * ++dwmac_phytium_parse_config_acpi(struct platform_device *pdev, const char *mac) ++{ ++ struct device *dev = &pdev->dev; ++ struct fwnode_handle *np; ++ struct plat_stmmacenet_data *plat; ++ struct stmmac_dma_cfg *dma_cfg; ++ struct stmmac_axi *axi; ++ struct clk_hw *clk_hw; ++ u64 clk_freq; ++ int ret; ++ ++ plat = devm_kzalloc(dev, sizeof(*plat), GFP_KERNEL); ++ if (!plat) ++ return ERR_PTR(-ENOMEM); ++ ++ np = dev_fwnode(dev); ++ ++ plat->phy_interface = fwnode_get_phy_mode(np); ++ ++ ++ /* Get max speed of operation from properties */ ++ if (fwnode_property_read_u32(np, "max-speed", &plat->max_speed)) ++ plat->max_speed = 1000; ++ ++ if (fwnode_property_read_u32(np, "bus_id", &plat->bus_id)) ++ plat->bus_id = 2; ++ ++ /* Default to PHY auto-detection */ ++ plat->phy_addr = -1; ++ ++ plat->mdio_bus_data = devm_kzalloc(dev, ++ sizeof(struct stmmac_mdio_bus_data), ++ GFP_KERNEL); ++ ++ fwnode_property_read_u32(np, "tx-fifo-depth", &plat->tx_fifo_size); ++ fwnode_property_read_u32(np, "rx-fifo-depth", &plat->rx_fifo_size); ++ if (plat->tx_fifo_size == 0) ++ plat->tx_fifo_size = 0x10000; ++ if (plat->rx_fifo_size == 0) ++ plat->rx_fifo_size = 0x10000; ++ ++ plat->force_sf_dma_mode = ++ fwnode_property_read_bool(np, "snps,force_sf_dma_mode"); ++ ++ if (fwnode_property_read_bool(np, "snps,en-tx-lpi-clockgating")) ++ plat->flags |= STMMAC_FLAG_EN_TX_LPI_CLOCKGATING; ++ ++ /* Set the maxmtu to a default of JUMBO_LEN in case the ++ * parameter is not present. ++ */ ++ plat->maxmtu = JUMBO_LEN; ++ ++ /* Set default value for multicast hash bins */ ++ plat->multicast_filter_bins = HASH_TABLE_SIZE; ++ ++ /* Set default value for unicast filter entries */ ++ plat->unicast_filter_entries = 1; ++ ++ fwnode_property_read_u32(np, "max-frame-size", &plat->maxmtu); ++ plat->has_gmac = 1; ++ plat->pmt = 1; ++ ++ dma_cfg = devm_kzalloc(dev, sizeof(*dma_cfg), GFP_KERNEL); ++ if (!dma_cfg) ++ return ERR_PTR(-ENOMEM); ++ plat->dma_cfg = dma_cfg; ++ ++ fwnode_property_read_u32(np, "snps,pbl", &dma_cfg->pbl); ++ if (!dma_cfg->pbl) ++ dma_cfg->pbl = DEFAULT_DMA_PBL; ++ ++ fwnode_property_read_u32(np, "snps,txpbl", &dma_cfg->txpbl); ++ fwnode_property_read_u32(np, "snps,rxpbl", &dma_cfg->rxpbl); ++ dma_cfg->pblx8 = !fwnode_property_read_bool(np, "snps,no-pbl-x8"); ++ ++ dma_cfg->aal = fwnode_property_read_bool(np, "snps,aal"); ++ dma_cfg->fixed_burst = fwnode_property_read_bool(np, "snps,fixed-burst"); ++ dma_cfg->mixed_burst = fwnode_property_read_bool(np, "snps,mixed-burst"); ++ ++ plat->force_thresh_dma_mode = fwnode_property_read_bool(np, "snps,force_thresh_dma_mode"); ++ if (plat->force_thresh_dma_mode) ++ plat->force_sf_dma_mode = 0; ++ ++ fwnode_property_read_u32(np, "snps,ps-speed", &plat->mac_port_sel_speed); ++ ++ axi = devm_kzalloc(&pdev->dev, sizeof(*axi), GFP_KERNEL); ++ if (!axi) ++ return ERR_PTR(-ENOMEM); ++ plat->axi = axi; ++ ++ axi->axi_wr_osr_lmt = 1; ++ axi->axi_rd_osr_lmt = 1; ++ ++ plat->rx_queues_to_use = 1; ++ plat->tx_queues_to_use = 1; ++ ++ /** ++ * First Queue must always be in DCB mode. As MTL_QUEUE_DCB=1 we need ++ * to always set this, otherwise Queue will be classified as AVB ++ * (because MTL_QUEUE_AVB = 0). ++ */ ++ plat->rx_queues_cfg[0].mode_to_use = MTL_QUEUE_DCB; ++ plat->tx_queues_cfg[0].mode_to_use = MTL_QUEUE_DCB; ++ ++ plat->rx_queues_cfg[0].use_prio = true; ++ ++ plat->rx_queues_cfg[0].pkt_route = 0x0; ++ ++ plat->rx_sched_algorithm = MTL_RX_ALGORITHM_SP; ++ plat->tx_sched_algorithm = MTL_TX_ALGORITHM_SP; ++ ++ ret = fwnode_property_read_u64(np, "clock-frequency", &clk_freq); ++ if (ret < 0) ++ clk_freq = 125000000; /* default to 125MHz */ ++ ++ clk_hw = clk_hw_register_fixed_rate(dev, dev_name(dev), NULL, ++ 0, clk_freq); ++ if (IS_ERR(clk_hw)) ++ return ERR_PTR(PTR_ERR(clk_hw)); ++ ret = devm_clk_hw_register_clkdev(dev, clk_hw, dev_name(dev), ++ dev_name(dev)); ++ if (ret) ++ return ERR_PTR(ret); ++ plat->stmmac_clk = clk_hw->clk; ++ clk_prepare_enable(plat->stmmac_clk); ++ ++ return plat; ++} ++ ++static int dwmac_phytium_probe(struct platform_device *pdev) ++{ ++ struct plat_stmmacenet_data *plat_dat; ++ struct stmmac_resources stmmac_res; ++ int ret; ++ ++ ret = dwmac_phytium_get_resources(pdev, &stmmac_res); ++ if (ret) ++ return ret; ++ ++ if (has_acpi_companion(&pdev->dev)) { ++ plat_dat = dwmac_phytium_parse_config_acpi(pdev, stmmac_res.mac); ++ if (IS_ERR(plat_dat)) { ++ dev_err(&pdev->dev, "ACPI configuration failed\n"); ++ return PTR_ERR(plat_dat); ++ } ++ } else { ++ dev_err(&pdev->dev, "no ACPI properties\n"); ++ return -EINVAL; ++ } ++ ++ ret = stmmac_dvr_probe(&pdev->dev, plat_dat, &stmmac_res); ++ if (ret) ++ goto err_exit; ++ ++ return 0; ++ ++err_exit: ++ if (plat_dat->exit) ++ plat_dat->exit(pdev, plat_dat->bsp_priv); ++ ++ return ret; ++} ++ ++static const struct acpi_device_id dwmac_phytium_acpi_match[] = { ++ { ++ .id = "PHYT0004", ++ }, ++ {} ++}; ++MODULE_DEVICE_TABLE(acpi, dwmac_phytium_acpi_match); ++ ++static struct platform_driver dwmac_phytium_driver = { ++ .probe = dwmac_phytium_probe, ++ .remove = stmmac_pltfr_remove, ++ .driver = { ++ .name = "dwmac-phytium", ++ .pm = &stmmac_pltfr_pm_ops, ++ .acpi_match_table = ACPI_PTR(dwmac_phytium_acpi_match), ++ }, ++}; ++module_platform_driver(dwmac_phytium_driver); ++ ++MODULE_DESCRIPTION("Glue driver for Phytium DWMAC"); ++MODULE_LICENSE("GPL v2"); +diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c +index 111111111111..222222222222 100644 +--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c ++++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c +@@ -14,6 +14,7 @@ + https://bugzilla.stlinux.com/ + *******************************************************************************/ + ++#include + #include + #include + #include +diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c +index 111111111111..222222222222 100644 +--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c ++++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c +@@ -9,6 +9,9 @@ + *******************************************************************************/ + + #include ++#include ++#include ++#include + #include + #include + #include +@@ -697,6 +700,249 @@ struct clk *stmmac_pltfr_find_clk(struct plat_stmmacenet_data *plat_dat, + } + EXPORT_SYMBOL_GPL(stmmac_pltfr_find_clk); + ++#ifdef CONFIG_ACPI ++/* ++ * Parse ACPI _DSD to setup AXI register ++ */ ++static struct stmmac_axi * stmmac_axi_setup_acpi(struct platform_device *pdev) ++{ ++ struct fwnode_handle *np = dev_fwnode(&(pdev->dev)); ++ struct stmmac_axi * axi; ++ ++ axi = devm_kzalloc(&pdev->dev, sizeof(*axi), GFP_KERNEL); ++ if (!axi) ++ return ERR_PTR(-ENOMEM); ++ ++ axi->axi_lpi_en = fwnode_property_read_bool(np, "snps,lpi_en"); ++ axi->axi_xit_frm = fwnode_property_read_bool(np, "snps,xit_frm"); ++ axi->axi_kbbe = fwnode_property_read_bool(np, "snps,axi_kbbe"); ++ axi->axi_fb = fwnode_property_read_bool(np, "snps,axi_fb"); ++ axi->axi_mb = fwnode_property_read_bool(np, "snps,axi_mb"); ++ axi->axi_rb = fwnode_property_read_bool(np, "snps,axi_rb"); ++ ++ if (fwnode_property_read_u32(np, "snps,wr_osr_lmt", &axi->axi_wr_osr_lmt)) ++ axi->axi_wr_osr_lmt = 1; ++ if (fwnode_property_read_u32(np, "snps,rd_osr_lmt", &axi->axi_rd_osr_lmt)) ++ axi->axi_rd_osr_lmt = 1; ++ fwnode_property_read_u32_array(np, "snps,blen", axi->axi_blen, AXI_BLEN); ++ ++ return axi; ++} ++ ++/** ++ * Parse ACPI _DSD parameters for multiple queues configuration ++ */ ++static void stmmac_mtl_setup_acpi(struct platform_device *pdev, ++ struct plat_stmmacenet_data *plat) ++{ ++ plat->rx_queues_to_use = 1; ++ plat->tx_queues_to_use = 1; ++ ++ /** ++ * First Queue must always be in DCB mode. As MTL_QUEUE_DCB=1 we need ++ * to always set this, otherwise Queue will be classified as AVB ++ * (because MTL_QUEUE_AVB = 0). ++ */ ++ plat->rx_queues_cfg[0].mode_to_use = MTL_QUEUE_DCB; ++ plat->tx_queues_cfg[0].mode_to_use = MTL_QUEUE_DCB; ++ ++ plat->rx_queues_cfg[0].use_prio = true; ++ ++ plat->rx_queues_cfg[0].pkt_route = 0x0; ++ ++ plat->rx_sched_algorithm = MTL_RX_ALGORITHM_SP; ++ plat->tx_sched_algorithm = MTL_TX_ALGORITHM_SP; ++ ++ plat->tx_queues_cfg[0].use_prio = true; ++} ++ ++static int stmmac_acpi_phy(struct plat_stmmacenet_data *plat, ++ struct fwnode_handle *np, struct device *dev) ++{ ++ plat->mdio_bus_data = devm_kzalloc(dev, ++ sizeof(struct stmmac_mdio_bus_data), ++ GFP_KERNEL); ++ ++ return 0; ++} ++ ++int fw_get_phy_mode(struct fwnode_handle *np) ++{ ++ const char *pm; ++ int err, i; ++ ++ err = fwnode_property_read_string(np, "phy-mode", &pm); ++ if (err < 0) ++ err = fwnode_property_read_string(np, "phy-connection-mode", &pm); ++ if (err < 0) ++ return err; ++ ++ for (i = 0; i < PHY_INTERFACE_MODE_MAX; i++) { ++ if (!strcasecmp(pm, phy_modes(i))) ++ return i; ++ } ++ ++ return -ENODEV; ++} ++ ++int stmmac_acpi_clock_setup(struct plat_stmmacenet_data *plat, ++ struct platform_device *pdev) ++{ ++ struct fwnode_handle *np = dev_fwnode(&(pdev->dev)); ++ struct device * dev = &pdev->dev; ++ struct clk *clk = ERR_PTR(-ENODEV); ++ u64 clk_freq = 0; ++ int err; ++ ++ err = fwnode_property_read_u64(np, "clock-frequency", &clk_freq); ++ if (err < 0) ++ clk_freq = 125000000; /* default to 125MHz */ ++ ++ plat->stmmac_clk = devm_clk_get(dev, dev_name(dev)); ++ if (IS_ERR(plat->stmmac_clk)) { ++ clk = clk_register_fixed_rate(dev, dev_name(dev), NULL, 0, clk_freq); ++ if (IS_ERR(clk)) ++ return -1; ++ if (clk_register_clkdev(clk, dev_name(dev), dev_name(dev))) ++ return -1; ++ plat->stmmac_clk = clk; ++ } ++ clk_prepare_enable(plat->stmmac_clk); ++ ++ plat->pclk = devm_clk_get(dev, "pclk"); ++ if (IS_ERR(plat->pclk)) ++ plat->pclk = NULL; ++ clk_prepare_enable(plat->pclk); ++ ++ plat->clk_ptp_ref = devm_clk_get(dev, "ptp_ref"); ++ if (IS_ERR(plat->clk_ptp_ref)) { ++ plat->clk_ptp_rate = clk_get_rate(plat->stmmac_clk); ++ plat->clk_ptp_ref = NULL; ++ } ++ ++ plat->stmmac_rst = devm_reset_control_get(dev,STMMAC_RESOURCE_NAME); ++ if (IS_ERR(plat->stmmac_rst)) { ++ dev_info(dev, "no reset control found\n"); ++ plat->stmmac_rst = NULL; ++ } ++ ++ return 0; ++} ++ ++/** ++ * Parse ACPI driver parameters ++ */ ++struct plat_stmmacenet_data * ++stmmac_probe_config_acpi(struct platform_device *pdev, u8 *mac) ++{ ++ struct fwnode_handle *np; ++ struct plat_stmmacenet_data *plat; ++ struct stmmac_dma_cfg *dma_cfg; ++ ++ plat = devm_kzalloc(&pdev->dev, sizeof(*plat), GFP_KERNEL); ++ if (!plat) ++ return ERR_PTR(-ENOMEM); ++ ++ np = dev_fwnode(&(pdev->dev)); ++ ++ plat->phy_interface = fw_get_phy_mode(np); ++ ++ /* Get max speed of operation from device tree */ ++ if (fwnode_property_read_u32(np, "max-speed", &plat->max_speed)) ++ plat->max_speed = -1; ++ ++ if (fwnode_property_read_u32(np, "bus_id", &plat->bus_id)) ++ plat->bus_id = 2; ++ ++ /* Default to PHY auto-detection */ ++ plat->phy_addr = -1; ++ ++ /* "snps,phy-addr" is not a standard property. Mark it as deprecated ++ * and warn of its use. Remove this when PHY node support is added. ++ */ ++ if (fwnode_property_read_u32(np, "snps,phy-addr", &plat->phy_addr) == 0) ++ dev_warn(&pdev->dev, "snps,phy-addr property is deprecated\n"); ++ ++ if (stmmac_acpi_phy(plat, np, &pdev->dev)) ++ return ERR_PTR(-ENODEV); ++ ++ fwnode_property_read_u32(np, "tx-fifo-depth", &plat->tx_fifo_size); ++ fwnode_property_read_u32(np, "rx-fifo-depth", &plat->rx_fifo_size); ++ if (plat->tx_fifo_size == 0) ++ plat->tx_fifo_size = 0x10000; ++ if (plat->rx_fifo_size == 0) ++ plat->rx_fifo_size = 0x10000; ++ ++ plat->force_sf_dma_mode = ++ fwnode_property_read_bool(np, "snps,force_sf_dma_mode"); ++ ++ if (fwnode_property_read_bool(np, "snps,en-tx-lpi-clockgating")) ++ plat->flags |= STMMAC_FLAG_EN_TX_LPI_CLOCKGATING; ++ ++ /* Set the maxmtu to a default of JUMBO_LEN in case the ++ * parameter is not present. ++ */ ++ plat->maxmtu = JUMBO_LEN; ++ ++ /* Set default value for multicast hash bins */ ++ plat->multicast_filter_bins = HASH_TABLE_SIZE; ++ ++ /* Set default value for unicast filter entries */ ++ plat->unicast_filter_entries = 1; ++ ++ /* Only to "snps,dwmac" */ ++ fwnode_property_read_u32(np, "max-frame-size", &plat->maxmtu); ++ fwnode_property_read_u32(np, "snps,multicast-filter-bins", ++ &plat->multicast_filter_bins); ++ fwnode_property_read_u32(np, "snps,perfect-filter-entries", ++ &plat->unicast_filter_entries); ++ plat->unicast_filter_entries = dwmac1000_validate_ucast_entries( ++ &pdev->dev, plat->unicast_filter_entries); ++ plat->multicast_filter_bins = dwmac1000_validate_mcast_bins( ++ &pdev->dev, plat->multicast_filter_bins); ++ plat->has_gmac = 1; ++ plat->pmt = 1; ++ ++ dma_cfg = devm_kzalloc(&pdev->dev, sizeof(*dma_cfg), GFP_KERNEL); ++ if (!dma_cfg) ++ return ERR_PTR(-ENOMEM); ++ plat->dma_cfg = dma_cfg; ++ ++ fwnode_property_read_u32(np, "snps,pbl", &dma_cfg->pbl); ++ if (!dma_cfg->pbl) ++ dma_cfg->pbl = DEFAULT_DMA_PBL; ++ ++ fwnode_property_read_u32(np, "snps,txpbl", &dma_cfg->txpbl); ++ fwnode_property_read_u32(np, "snps,rxpbl", &dma_cfg->rxpbl); ++ dma_cfg->pblx8 = !fwnode_property_read_bool(np, "snps,no-pbl-x8"); ++ ++ dma_cfg->aal = fwnode_property_read_bool(np, "snps,aal"); ++ dma_cfg->fixed_burst = fwnode_property_read_bool(np, "snps,fixed-burst"); ++ dma_cfg->mixed_burst = fwnode_property_read_bool(np, "snps,mixed-burst"); ++ ++ plat->force_thresh_dma_mode = fwnode_property_read_bool(np, "snps,force_thresh_dma_mode"); ++ if (plat->force_thresh_dma_mode) ++ plat->force_sf_dma_mode = 0; ++ ++ fwnode_property_read_u32(np, "snps,ps-speed", &plat->mac_port_sel_speed); ++ ++ plat->axi = stmmac_axi_setup_acpi(pdev); ++ ++ stmmac_mtl_setup_acpi(pdev, plat); ++ ++ stmmac_acpi_clock_setup(plat,pdev); ++ ++ return plat; ++} ++#else ++struct plat_stmmacenet_data * ++stmmac_probe_config_acpi(struct platform_device *pdev, u8 *mac) ++{ ++ return ERR_PTR(-EINVAL); ++} ++#endif /* CONFIG_ACPI */ ++EXPORT_SYMBOL_GPL(stmmac_probe_config_acpi); ++ + int stmmac_get_platform_resources(struct platform_device *pdev, + struct stmmac_resources *stmmac_res) + { +@@ -704,8 +950,14 @@ int stmmac_get_platform_resources(struct platform_device *pdev, + + /* Get IRQ information early to have an ability to ask for deferred + * probe if needed before we went too far with resource allocation. ++ * For ACPI _byname does not work, so we have to trust, that the ++ * first interrupt is the right one + */ +- stmmac_res->irq = platform_get_irq_byname(pdev, "macirq"); ++ if (has_acpi_companion(&pdev->dev)) { ++ stmmac_res->irq = platform_get_irq(pdev, 0); ++ } else { ++ stmmac_res->irq = platform_get_irq_byname(pdev, "macirq"); ++ } + if (stmmac_res->irq < 0) + return stmmac_res->irq; + +@@ -723,6 +975,7 @@ int stmmac_get_platform_resources(struct platform_device *pdev, + return -EPROBE_DEFER; + dev_info(&pdev->dev, "IRQ eth_wake_irq not found\n"); + stmmac_res->wol_irq = stmmac_res->irq; ++ stmmac_res->lpi_irq = -1; + } + + stmmac_res->lpi_irq = +diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.h b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.h +index 111111111111..222222222222 100644 +--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.h ++++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.h +@@ -13,6 +13,8 @@ + + struct plat_stmmacenet_data * + devm_stmmac_probe_config_dt(struct platform_device *pdev, u8 *mac); ++struct plat_stmmacenet_data * ++stmmac_probe_config_acpi(struct platform_device *pdev, u8 *mac); + + struct clk *stmmac_pltfr_find_clk(struct plat_stmmacenet_data *plat_dat, + const char *name); +-- +Armbian + diff --git a/patch/kernel/archive/uefi-arm64-6.19/1001-net-stmicro-stmmac-Phytium-adapt-to-net-stmmac-remove-axi_blen-array.patch b/patch/kernel/archive/uefi-arm64-6.19/1001-net-stmicro-stmmac-Phytium-adapt-to-net-stmmac-remove-axi_blen-array.patch new file mode 100644 index 000000000000..c39ec766123d --- /dev/null +++ b/patch/kernel/archive/uefi-arm64-6.19/1001-net-stmicro-stmmac-Phytium-adapt-to-net-stmmac-remove-axi_blen-array.patch @@ -0,0 +1,30 @@ +From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001 +From: Ricardo Pardini +Date: Mon, 29 Dec 2025 14:35:40 +0100 +Subject: net: stmicro: stmmac: Phytium: adapt to "net: stmmac: remove axi_blen + array" + +Signed-off-by: Ricardo Pardini +--- + drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c | 5 ++++- + 1 file changed, 4 insertions(+), 1 deletion(-) + +diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c +index 111111111111..222222222222 100644 +--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c ++++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c +@@ -724,7 +724,10 @@ static struct stmmac_axi * stmmac_axi_setup_acpi(struct platform_device *pdev) + axi->axi_wr_osr_lmt = 1; + if (fwnode_property_read_u32(np, "snps,rd_osr_lmt", &axi->axi_rd_osr_lmt)) + axi->axi_rd_osr_lmt = 1; +- fwnode_property_read_u32_array(np, "snps,blen", axi->axi_blen, AXI_BLEN); ++ ++ u32 axi_blen[AXI_BLEN]; // adapt to "net: stmmac: remove axi_blen array" ++ fwnode_property_read_u32_array(np, "snps,blen", axi_blen, AXI_BLEN); ++ stmmac_axi_blen_to_mask(&axi->axi_blen_regval, axi_blen, AXI_BLEN); + + return axi; + } +-- +Armbian + diff --git a/patch/kernel/archive/uefi-arm64-6.19/1002-net-stmicro-stmmac-Phytium-adapt-to-net-stmmac-replace-has_xxxx-with-core_type.patch b/patch/kernel/archive/uefi-arm64-6.19/1002-net-stmicro-stmmac-Phytium-adapt-to-net-stmmac-replace-has_xxxx-with-core_type.patch new file mode 100644 index 000000000000..59f422c760d4 --- /dev/null +++ b/patch/kernel/archive/uefi-arm64-6.19/1002-net-stmicro-stmmac-Phytium-adapt-to-net-stmmac-replace-has_xxxx-with-core_type.patch @@ -0,0 +1,50 @@ +From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001 +From: Ricardo Pardini +Date: Mon, 29 Dec 2025 15:20:32 +0100 +Subject: net: stmicro: stmmac: Phytium: adapt to "net: stmmac: replace + has_xxxx with core_type" + +Signed-off-by: Ricardo Pardini +--- + drivers/net/ethernet/stmicro/stmmac/dwmac-phytium.c | 4 ++-- + drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c | 2 +- + 2 files changed, 3 insertions(+), 3 deletions(-) + +diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-phytium.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-phytium.c +index 111111111111..222222222222 100644 +--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-phytium.c ++++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-phytium.c +@@ -55,7 +55,7 @@ dwmac_phytium_parse_config_acpi(struct platform_device *pdev, const char *mac) + np = dev_fwnode(dev); + + plat->phy_interface = fwnode_get_phy_mode(np); +- ++ + + /* Get max speed of operation from properties */ + if (fwnode_property_read_u32(np, "max-speed", &plat->max_speed)) +@@ -96,7 +96,7 @@ dwmac_phytium_parse_config_acpi(struct platform_device *pdev, const char *mac) + plat->unicast_filter_entries = 1; + + fwnode_property_read_u32(np, "max-frame-size", &plat->maxmtu); +- plat->has_gmac = 1; ++ plat->core_type = DWMAC_CORE_GMAC; // adapt to "net: stmmac: replace has_xxxx with core_type" + plat->pmt = 1; + + dma_cfg = devm_kzalloc(dev, sizeof(*dma_cfg), GFP_KERNEL); +diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c +index 111111111111..222222222222 100644 +--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c ++++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c +@@ -903,7 +903,7 @@ stmmac_probe_config_acpi(struct platform_device *pdev, u8 *mac) + &pdev->dev, plat->unicast_filter_entries); + plat->multicast_filter_bins = dwmac1000_validate_mcast_bins( + &pdev->dev, plat->multicast_filter_bins); +- plat->has_gmac = 1; ++ plat->core_type = DWMAC_CORE_GMAC; // adapt to "net: stmmac: replace has_xxxx with core_type" + plat->pmt = 1; + + dma_cfg = devm_kzalloc(&pdev->dev, sizeof(*dma_cfg), GFP_KERNEL); +-- +Armbian + diff --git a/patch/kernel/archive/uefi-arm64-6.19/1003-net-stmicro-stmmac-Phytium-adapt-to-net-stmmac-pass-struct-device-to-init-exit-methods.patch b/patch/kernel/archive/uefi-arm64-6.19/1003-net-stmicro-stmmac-Phytium-adapt-to-net-stmmac-pass-struct-device-to-init-exit-methods.patch new file mode 100644 index 000000000000..d0fc24cdd616 --- /dev/null +++ b/patch/kernel/archive/uefi-arm64-6.19/1003-net-stmicro-stmmac-Phytium-adapt-to-net-stmmac-pass-struct-device-to-init-exit-methods.patch @@ -0,0 +1,29 @@ +From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001 +From: Ricardo Pardini +Date: Mon, 29 Dec 2025 15:41:31 +0100 +Subject: net: stmicro: stmmac: Phytium: adapt to "net: stmmac: pass struct + device to init()/exit() methods" + +- ref https://github.com/torvalds/linux/commit/85081acc6b1188f2a6e5e605dc644225fcdf327f + +Signed-off-by: Ricardo Pardini +--- + drivers/net/ethernet/stmicro/stmmac/dwmac-phytium.c | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-phytium.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-phytium.c +index 111111111111..222222222222 100644 +--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-phytium.c ++++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-phytium.c +@@ -195,7 +195,7 @@ static int dwmac_phytium_probe(struct platform_device *pdev) + + err_exit: + if (plat_dat->exit) +- plat_dat->exit(pdev, plat_dat->bsp_priv); ++ plat_dat->exit(&pdev->dev, plat_dat->bsp_priv); + + return ret; + } +-- +Armbian + diff --git a/patch/kernel/archive/uefi-arm64-6.19/board-hikey960-usb.patch b/patch/kernel/archive/uefi-arm64-6.19/board-hikey960-usb.patch new file mode 100644 index 000000000000..2116e1566309 --- /dev/null +++ b/patch/kernel/archive/uefi-arm64-6.19/board-hikey960-usb.patch @@ -0,0 +1,77 @@ +From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001 +From: Kevin Schmidt +Date: Thu, 31 Aug 2023 11:41:14 +0200 +Subject: [ARCHEOLOGY] Add board: HiKey960 + +> X-Git-Archeology: - Revision d8200e5c383c1c77569596bfd2b8886ef8258c3f: https://github.com/armbian/build/commit/d8200e5c383c1c77569596bfd2b8886ef8258c3f +> X-Git-Archeology: Date: Thu, 31 Aug 2023 11:41:14 +0200 +> X-Git-Archeology: From: Kevin Schmidt +> X-Git-Archeology: Subject: Add board: HiKey960 +> X-Git-Archeology: +--- + arch/arm64/boot/dts/hisilicon/hi3660-hikey960.dts | 35 +++++++++- + 1 file changed, 33 insertions(+), 2 deletions(-) + +diff --git a/arch/arm64/boot/dts/hisilicon/hi3660-hikey960.dts b/arch/arm64/boot/dts/hisilicon/hi3660-hikey960.dts +index 111111111111..222222222222 100644 +--- a/arch/arm64/boot/dts/hisilicon/hi3660-hikey960.dts ++++ b/arch/arm64/boot/dts/hisilicon/hi3660-hikey960.dts +@@ -197,6 +197,37 @@ optee { + method = "smc"; + }; + }; ++ ++ usb_hub_vdd: usb_hub_vdd { ++ compatible = "regulator-fixed"; ++ regulator-name = "hub-vdd"; ++ regulator-min-microvolt = <3300000>; ++ regulator-max-microvolt = <3300000>; ++ gpio = <&gpio5 6 0>; ++ enable-active-high; ++ }; ++ ++ usb-hub { ++ compatible = "hisilicon,usbhub"; ++ typec-vbus-gpios = <&gpio25 2 GPIO_ACTIVE_HIGH>; ++ otg-switch-gpios = <&gpio25 6 GPIO_ACTIVE_HIGH>; ++ hub-vdd-supply = <&usb_hub_vdd>; ++ usb-role-switch; ++ ++ port { ++ #address-cells = <1>; ++ #size-cells = <0>; ++ ++ hikey_usb_ep0: endpoint@0 { ++ reg = <0>; ++ remote-endpoint = <&dwc3_role_switch>; ++ }; ++ hikey_usb_ep1: endpoint@1 { ++ reg = <1>; ++ remote-endpoint = <&rt1711h_ep>; ++ }; ++ }; ++ }; + }; + + /* +@@ -564,7 +595,7 @@ port { + + rt1711h_ep: endpoint@0 { + reg = <0>; +- remote-endpoint = <&dwc3_role_switch>; ++ remote-endpoint = <&hikey_usb_ep1>; + }; + }; + }; +@@ -686,7 +717,7 @@ port { + #size-cells = <0>; + dwc3_role_switch: endpoint@0 { + reg = <0>; +- remote-endpoint = <&rt1711h_ep>; ++ remote-endpoint = <&hikey_usb_ep0>; + }; + + dwc3_ss: endpoint@1 { +-- +Armbian + diff --git a/patch/kernel/archive/uefi-x86-6.19/1001-Add-apple-bce-driver.patch b/patch/kernel/archive/uefi-x86-6.19/1001-Add-apple-bce-driver.patch new file mode 100644 index 000000000000..89df97945ae8 --- /dev/null +++ b/patch/kernel/archive/uefi-x86-6.19/1001-Add-apple-bce-driver.patch @@ -0,0 +1,5864 @@ +From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001 +From: Aditya Garg +Date: Sat, 25 Oct 2025 08:13:34 +0000 +Subject: Add apple-bce driver + +--- + drivers/staging/apple-bce/Makefile | 28 + + drivers/staging/apple-bce/apple_bce.c | 445 ++++++ + drivers/staging/apple-bce/apple_bce.h | 41 + + drivers/staging/apple-bce/audio/audio.c | 711 +++++++++ + drivers/staging/apple-bce/audio/audio.h | 125 ++ + drivers/staging/apple-bce/audio/description.h | 42 + + drivers/staging/apple-bce/audio/pcm.c | 308 ++++ + drivers/staging/apple-bce/audio/pcm.h | 16 + + drivers/staging/apple-bce/audio/protocol.c | 347 +++++ + drivers/staging/apple-bce/audio/protocol.h | 147 ++ + drivers/staging/apple-bce/audio/protocol_bce.c | 226 +++ + drivers/staging/apple-bce/audio/protocol_bce.h | 72 + + drivers/staging/apple-bce/mailbox.c | 155 ++ + drivers/staging/apple-bce/mailbox.h | 53 + + drivers/staging/apple-bce/queue.c | 415 +++++ + drivers/staging/apple-bce/queue.h | 177 +++ + drivers/staging/apple-bce/queue_dma.c | 220 +++ + drivers/staging/apple-bce/queue_dma.h | 50 + + drivers/staging/apple-bce/vhci/command.h | 204 +++ + drivers/staging/apple-bce/vhci/queue.c | 268 ++++ + drivers/staging/apple-bce/vhci/queue.h | 76 + + drivers/staging/apple-bce/vhci/transfer.c | 661 ++++++++ + drivers/staging/apple-bce/vhci/transfer.h | 73 + + drivers/staging/apple-bce/vhci/vhci.c | 763 ++++++++++ + drivers/staging/apple-bce/vhci/vhci.h | 52 + + 25 files changed, 5675 insertions(+) + +diff --git a/drivers/staging/apple-bce/Makefile b/drivers/staging/apple-bce/Makefile +new file mode 100644 +index 000000000000..111111111111 +--- /dev/null ++++ b/drivers/staging/apple-bce/Makefile +@@ -0,0 +1,28 @@ ++modname := apple-bce ++obj-m += $(modname).o ++ ++apple-bce-objs := apple_bce.o mailbox.o queue.o queue_dma.o vhci/vhci.o vhci/queue.o vhci/transfer.o audio/audio.o audio/protocol.o audio/protocol_bce.o audio/pcm.o ++ ++MY_CFLAGS += -DWITHOUT_NVME_PATCH ++#MY_CFLAGS += -g -DDEBUG ++ccflags-y += ${MY_CFLAGS} ++CC += ${MY_CFLAGS} ++ ++KVERSION := $(KERNELRELEASE) ++ifeq ($(origin KERNELRELEASE), undefined) ++KVERSION := $(shell uname -r) ++endif ++ ++KDIR := /lib/modules/$(KVERSION)/build ++PWD := $(shell pwd) ++ ++.PHONY: all ++ ++all: ++ $(MAKE) -C $(KDIR) M=$(PWD) modules ++ ++clean: ++ $(MAKE) -C $(KDIR) M=$(PWD) clean ++ ++install: ++ $(MAKE) -C $(KDIR) M=$(PWD) modules_install +diff --git a/drivers/staging/apple-bce/apple_bce.c b/drivers/staging/apple-bce/apple_bce.c +new file mode 100644 +index 000000000000..111111111111 +--- /dev/null ++++ b/drivers/staging/apple-bce/apple_bce.c +@@ -0,0 +1,445 @@ ++#include "apple_bce.h" ++#include ++#include ++#include "audio/audio.h" ++#include ++ ++static dev_t bce_chrdev; ++static struct class *bce_class; ++ ++struct apple_bce_device *global_bce; ++ ++static int bce_create_command_queues(struct apple_bce_device *bce); ++static void bce_free_command_queues(struct apple_bce_device *bce); ++static irqreturn_t bce_handle_mb_irq(int irq, void *dev); ++static irqreturn_t bce_handle_dma_irq(int irq, void *dev); ++static int bce_fw_version_handshake(struct apple_bce_device *bce); ++static int bce_register_command_queue(struct apple_bce_device *bce, struct bce_queue_memcfg *cfg, int is_sq); ++ ++static int apple_bce_probe(struct pci_dev *dev, const struct pci_device_id *id) ++{ ++ struct apple_bce_device *bce = NULL; ++ int status = 0; ++ int nvec; ++ ++ pr_info("apple-bce: capturing our device\n"); ++ ++ if (pci_enable_device(dev)) ++ return -ENODEV; ++ if (pci_request_regions(dev, "apple-bce")) { ++ status = -ENODEV; ++ goto fail; ++ } ++ pci_set_master(dev); ++ nvec = pci_alloc_irq_vectors(dev, 1, 8, PCI_IRQ_MSI); ++ if (nvec < 5) { ++ status = -EINVAL; ++ goto fail; ++ } ++ ++ bce = kzalloc(sizeof(struct apple_bce_device), GFP_KERNEL); ++ if (!bce) { ++ status = -ENOMEM; ++ goto fail; ++ } ++ ++ bce->pci = dev; ++ pci_set_drvdata(dev, bce); ++ ++ bce->devt = bce_chrdev; ++ bce->dev = device_create(bce_class, &dev->dev, bce->devt, NULL, "apple-bce"); ++ if (IS_ERR_OR_NULL(bce->dev)) { ++ status = PTR_ERR(bce_class); ++ goto fail; ++ } ++ ++ bce->reg_mem_mb = pci_iomap(dev, 4, 0); ++ bce->reg_mem_dma = pci_iomap(dev, 2, 0); ++ ++ if (IS_ERR_OR_NULL(bce->reg_mem_mb) || IS_ERR_OR_NULL(bce->reg_mem_dma)) { ++ dev_warn(&dev->dev, "apple-bce: Failed to pci_iomap required regions\n"); ++ goto fail; ++ } ++ ++ bce_mailbox_init(&bce->mbox, bce->reg_mem_mb); ++ bce_timestamp_init(&bce->timestamp, bce->reg_mem_mb); ++ ++ spin_lock_init(&bce->queues_lock); ++ ida_init(&bce->queue_ida); ++ ++ if ((status = pci_request_irq(dev, 0, bce_handle_mb_irq, NULL, dev, "bce_mbox"))) ++ goto fail; ++ if ((status = pci_request_irq(dev, 4, NULL, bce_handle_dma_irq, dev, "bce_dma"))) ++ goto fail_interrupt_0; ++ ++ if ((status = dma_set_mask_and_coherent(&dev->dev, DMA_BIT_MASK(37)))) { ++ dev_warn(&dev->dev, "dma: Setting mask failed\n"); ++ goto fail_interrupt; ++ } ++ ++ /* Gets the function 0's interface. This is needed because Apple only accepts DMA on our function if function 0 ++ is a bus master, so we need to work around this. */ ++ bce->pci0 = pci_get_slot(dev->bus, PCI_DEVFN(PCI_SLOT(dev->devfn), 0)); ++#ifndef WITHOUT_NVME_PATCH ++ if ((status = pci_enable_device_mem(bce->pci0))) { ++ dev_warn(&dev->dev, "apple-bce: failed to enable function 0\n"); ++ goto fail_dev0; ++ } ++#endif ++ pci_set_master(bce->pci0); ++ ++ bce_timestamp_start(&bce->timestamp, true); ++ ++ if ((status = bce_fw_version_handshake(bce))) ++ goto fail_ts; ++ pr_info("apple-bce: handshake done\n"); ++ ++ if ((status = bce_create_command_queues(bce))) { ++ pr_info("apple-bce: Creating command queues failed\n"); ++ goto fail_ts; ++ } ++ ++ global_bce = bce; ++ ++ bce_vhci_create(bce, &bce->vhci); ++ ++ return 0; ++ ++fail_ts: ++ bce_timestamp_stop(&bce->timestamp); ++#ifndef WITHOUT_NVME_PATCH ++ pci_disable_device(bce->pci0); ++fail_dev0: ++#endif ++ pci_dev_put(bce->pci0); ++fail_interrupt: ++ pci_free_irq(dev, 4, dev); ++fail_interrupt_0: ++ pci_free_irq(dev, 0, dev); ++fail: ++ if (bce && bce->dev) { ++ device_destroy(bce_class, bce->devt); ++ ++ if (!IS_ERR_OR_NULL(bce->reg_mem_mb)) ++ pci_iounmap(dev, bce->reg_mem_mb); ++ if (!IS_ERR_OR_NULL(bce->reg_mem_dma)) ++ pci_iounmap(dev, bce->reg_mem_dma); ++ ++ kfree(bce); ++ } ++ ++ pci_free_irq_vectors(dev); ++ pci_release_regions(dev); ++ pci_disable_device(dev); ++ ++ if (!status) ++ status = -EINVAL; ++ return status; ++} ++ ++static int bce_create_command_queues(struct apple_bce_device *bce) ++{ ++ int status; ++ struct bce_queue_memcfg *cfg; ++ ++ bce->cmd_cq = bce_alloc_cq(bce, 0, 0x20); ++ bce->cmd_cmdq = bce_alloc_cmdq(bce, 1, 0x20); ++ if (bce->cmd_cq == NULL || bce->cmd_cmdq == NULL) { ++ status = -ENOMEM; ++ goto err; ++ } ++ bce->queues[0] = (struct bce_queue *) bce->cmd_cq; ++ bce->queues[1] = (struct bce_queue *) bce->cmd_cmdq->sq; ++ ++ cfg = kzalloc(sizeof(struct bce_queue_memcfg), GFP_KERNEL); ++ if (!cfg) { ++ status = -ENOMEM; ++ goto err; ++ } ++ bce_get_cq_memcfg(bce->cmd_cq, cfg); ++ if ((status = bce_register_command_queue(bce, cfg, false))) ++ goto err; ++ bce_get_sq_memcfg(bce->cmd_cmdq->sq, bce->cmd_cq, cfg); ++ if ((status = bce_register_command_queue(bce, cfg, true))) ++ goto err; ++ kfree(cfg); ++ ++ return 0; ++ ++err: ++ if (bce->cmd_cq) ++ bce_free_cq(bce, bce->cmd_cq); ++ if (bce->cmd_cmdq) ++ bce_free_cmdq(bce, bce->cmd_cmdq); ++ return status; ++} ++ ++static void bce_free_command_queues(struct apple_bce_device *bce) ++{ ++ bce_free_cq(bce, bce->cmd_cq); ++ bce_free_cmdq(bce, bce->cmd_cmdq); ++ bce->cmd_cq = NULL; ++ bce->queues[0] = NULL; ++} ++ ++static irqreturn_t bce_handle_mb_irq(int irq, void *dev) ++{ ++ struct apple_bce_device *bce = pci_get_drvdata(dev); ++ bce_mailbox_handle_interrupt(&bce->mbox); ++ return IRQ_HANDLED; ++} ++ ++static irqreturn_t bce_handle_dma_irq(int irq, void *dev) ++{ ++ int i; ++ struct apple_bce_device *bce = pci_get_drvdata(dev); ++ spin_lock(&bce->queues_lock); ++ for (i = 0; i < BCE_MAX_QUEUE_COUNT; i++) ++ if (bce->queues[i] && bce->queues[i]->type == BCE_QUEUE_CQ) ++ bce_handle_cq_completions(bce, (struct bce_queue_cq *) bce->queues[i]); ++ spin_unlock(&bce->queues_lock); ++ return IRQ_HANDLED; ++} ++ ++static int bce_fw_version_handshake(struct apple_bce_device *bce) ++{ ++ u64 result; ++ int status; ++ ++ if ((status = bce_mailbox_send(&bce->mbox, BCE_MB_MSG(BCE_MB_SET_FW_PROTOCOL_VERSION, BC_PROTOCOL_VERSION), ++ &result))) ++ return status; ++ if (BCE_MB_TYPE(result) != BCE_MB_SET_FW_PROTOCOL_VERSION || ++ BCE_MB_VALUE(result) != BC_PROTOCOL_VERSION) { ++ pr_err("apple-bce: FW version handshake failed %x:%llx\n", BCE_MB_TYPE(result), BCE_MB_VALUE(result)); ++ return -EINVAL; ++ } ++ return 0; ++} ++ ++static int bce_register_command_queue(struct apple_bce_device *bce, struct bce_queue_memcfg *cfg, int is_sq) ++{ ++ int status; ++ int cmd_type; ++ u64 result; ++ // OS X uses an bidirectional direction, but that's not really needed ++ dma_addr_t a = dma_map_single(&bce->pci->dev, cfg, sizeof(struct bce_queue_memcfg), DMA_TO_DEVICE); ++ if (dma_mapping_error(&bce->pci->dev, a)) ++ return -ENOMEM; ++ cmd_type = is_sq ? BCE_MB_REGISTER_COMMAND_SQ : BCE_MB_REGISTER_COMMAND_CQ; ++ status = bce_mailbox_send(&bce->mbox, BCE_MB_MSG(cmd_type, a), &result); ++ dma_unmap_single(&bce->pci->dev, a, sizeof(struct bce_queue_memcfg), DMA_TO_DEVICE); ++ if (status) ++ return status; ++ if (BCE_MB_TYPE(result) != BCE_MB_REGISTER_COMMAND_QUEUE_REPLY) ++ return -EINVAL; ++ return 0; ++} ++ ++static void apple_bce_remove(struct pci_dev *dev) ++{ ++ struct apple_bce_device *bce = pci_get_drvdata(dev); ++ bce->is_being_removed = true; ++ ++ bce_vhci_destroy(&bce->vhci); ++ ++ bce_timestamp_stop(&bce->timestamp); ++#ifndef WITHOUT_NVME_PATCH ++ pci_disable_device(bce->pci0); ++#endif ++ pci_dev_put(bce->pci0); ++ pci_free_irq(dev, 0, dev); ++ pci_free_irq(dev, 4, dev); ++ bce_free_command_queues(bce); ++ pci_iounmap(dev, bce->reg_mem_mb); ++ pci_iounmap(dev, bce->reg_mem_dma); ++ device_destroy(bce_class, bce->devt); ++ pci_free_irq_vectors(dev); ++ pci_release_regions(dev); ++ pci_disable_device(dev); ++ kfree(bce); ++} ++ ++static int bce_save_state_and_sleep(struct apple_bce_device *bce) ++{ ++ int attempt, status = 0; ++ u64 resp; ++ dma_addr_t dma_addr; ++ void *dma_ptr = NULL; ++ size_t size = max(PAGE_SIZE, 4096UL); ++ ++ for (attempt = 0; attempt < 5; ++attempt) { ++ pr_debug("apple-bce: suspend: attempt %i, buffer size %li\n", attempt, size); ++ dma_ptr = dma_alloc_coherent(&bce->pci->dev, size, &dma_addr, GFP_KERNEL); ++ if (!dma_ptr) { ++ pr_err("apple-bce: suspend failed (data alloc failed)\n"); ++ break; ++ } ++ BUG_ON((dma_addr % 4096) != 0); ++ status = bce_mailbox_send(&bce->mbox, ++ BCE_MB_MSG(BCE_MB_SAVE_STATE_AND_SLEEP, (dma_addr & ~(4096LLU - 1)) | (size / 4096)), &resp); ++ if (status) { ++ pr_err("apple-bce: suspend failed (mailbox send)\n"); ++ break; ++ } ++ if (BCE_MB_TYPE(resp) == BCE_MB_SAVE_RESTORE_STATE_COMPLETE) { ++ bce->saved_data_dma_addr = dma_addr; ++ bce->saved_data_dma_ptr = dma_ptr; ++ bce->saved_data_dma_size = size; ++ return 0; ++ } else if (BCE_MB_TYPE(resp) == BCE_MB_SAVE_STATE_AND_SLEEP_FAILURE) { ++ dma_free_coherent(&bce->pci->dev, size, dma_ptr, dma_addr); ++ /* The 0x10ff magic value was extracted from Apple's driver */ ++ size = (BCE_MB_VALUE(resp) + 0x10ff) & ~(4096LLU - 1); ++ pr_debug("apple-bce: suspend: device requested a larger buffer (%li)\n", size); ++ continue; ++ } else { ++ pr_err("apple-bce: suspend failed (invalid device response)\n"); ++ status = -EINVAL; ++ break; ++ } ++ } ++ if (dma_ptr) ++ dma_free_coherent(&bce->pci->dev, size, dma_ptr, dma_addr); ++ if (!status) ++ return bce_mailbox_send(&bce->mbox, BCE_MB_MSG(BCE_MB_SLEEP_NO_STATE, 0), &resp); ++ return status; ++} ++ ++static int bce_restore_state_and_wake(struct apple_bce_device *bce) ++{ ++ int status; ++ u64 resp; ++ if (!bce->saved_data_dma_ptr) { ++ if ((status = bce_mailbox_send(&bce->mbox, BCE_MB_MSG(BCE_MB_RESTORE_NO_STATE, 0), &resp))) { ++ pr_err("apple-bce: resume with no state failed (mailbox send)\n"); ++ return status; ++ } ++ if (BCE_MB_TYPE(resp) != BCE_MB_RESTORE_NO_STATE) { ++ pr_err("apple-bce: resume with no state failed (invalid device response)\n"); ++ return -EINVAL; ++ } ++ return 0; ++ } ++ ++ if ((status = bce_mailbox_send(&bce->mbox, BCE_MB_MSG(BCE_MB_RESTORE_STATE_AND_WAKE, ++ (bce->saved_data_dma_addr & ~(4096LLU - 1)) | (bce->saved_data_dma_size / 4096)), &resp))) { ++ pr_err("apple-bce: resume with state failed (mailbox send)\n"); ++ goto finish_with_state; ++ } ++ if (BCE_MB_TYPE(resp) != BCE_MB_SAVE_RESTORE_STATE_COMPLETE) { ++ pr_err("apple-bce: resume with state failed (invalid device response)\n"); ++ status = -EINVAL; ++ goto finish_with_state; ++ } ++ ++finish_with_state: ++ dma_free_coherent(&bce->pci->dev, bce->saved_data_dma_size, bce->saved_data_dma_ptr, bce->saved_data_dma_addr); ++ bce->saved_data_dma_ptr = NULL; ++ return status; ++} ++ ++static int apple_bce_suspend(struct device *dev) ++{ ++ struct apple_bce_device *bce = pci_get_drvdata(to_pci_dev(dev)); ++ int status; ++ ++ bce_timestamp_stop(&bce->timestamp); ++ ++ if ((status = bce_save_state_and_sleep(bce))) ++ return status; ++ ++ return 0; ++} ++ ++static int apple_bce_resume(struct device *dev) ++{ ++ struct apple_bce_device *bce = pci_get_drvdata(to_pci_dev(dev)); ++ int status; ++ ++ pci_set_master(bce->pci); ++ pci_set_master(bce->pci0); ++ ++ if ((status = bce_restore_state_and_wake(bce))) ++ return status; ++ ++ bce_timestamp_start(&bce->timestamp, false); ++ ++ return 0; ++} ++ ++static struct pci_device_id apple_bce_ids[ ] = { ++ { PCI_DEVICE(PCI_VENDOR_ID_APPLE, 0x1801) }, ++ { 0, }, ++}; ++ ++MODULE_DEVICE_TABLE(pci, apple_bce_ids); ++ ++struct dev_pm_ops apple_bce_pci_driver_pm = { ++ .suspend = apple_bce_suspend, ++ .resume = apple_bce_resume ++}; ++struct pci_driver apple_bce_pci_driver = { ++ .name = "apple-bce", ++ .id_table = apple_bce_ids, ++ .probe = apple_bce_probe, ++ .remove = apple_bce_remove, ++ .driver = { ++ .pm = &apple_bce_pci_driver_pm ++ } ++}; ++ ++ ++static int __init apple_bce_module_init(void) ++{ ++ int result; ++ if ((result = alloc_chrdev_region(&bce_chrdev, 0, 1, "apple-bce"))) ++ goto fail_chrdev; ++#if LINUX_VERSION_CODE < KERNEL_VERSION(6,4,0) ++ bce_class = class_create(THIS_MODULE, "apple-bce"); ++#else ++ bce_class = class_create("apple-bce"); ++#endif ++ if (IS_ERR(bce_class)) { ++ result = PTR_ERR(bce_class); ++ goto fail_class; ++ } ++ if ((result = bce_vhci_module_init())) { ++ pr_err("apple-bce: bce-vhci init failed"); ++ goto fail_class; ++ } ++ ++ result = pci_register_driver(&apple_bce_pci_driver); ++ if (result) ++ goto fail_drv; ++ ++ aaudio_module_init(); ++ ++ return 0; ++ ++fail_drv: ++ pci_unregister_driver(&apple_bce_pci_driver); ++fail_class: ++ class_destroy(bce_class); ++fail_chrdev: ++ unregister_chrdev_region(bce_chrdev, 1); ++ if (!result) ++ result = -EINVAL; ++ return result; ++} ++static void __exit apple_bce_module_exit(void) ++{ ++ pci_unregister_driver(&apple_bce_pci_driver); ++ ++ aaudio_module_exit(); ++ bce_vhci_module_exit(); ++ class_destroy(bce_class); ++ unregister_chrdev_region(bce_chrdev, 1); ++} ++ ++MODULE_LICENSE("GPL"); ++MODULE_AUTHOR("MrARM"); ++MODULE_DESCRIPTION("Apple BCE Driver"); ++MODULE_VERSION("0.01"); ++module_init(apple_bce_module_init); ++module_exit(apple_bce_module_exit); +diff --git a/drivers/staging/apple-bce/apple_bce.h b/drivers/staging/apple-bce/apple_bce.h +new file mode 100644 +index 000000000000..111111111111 +--- /dev/null ++++ b/drivers/staging/apple-bce/apple_bce.h +@@ -0,0 +1,41 @@ ++#ifndef APPLE_BCE_H ++#define APPLE_BCE_H ++ ++#include ++#include ++#include "mailbox.h" ++#include "queue.h" ++#include "vhci/vhci.h" ++ ++#define BC_PROTOCOL_VERSION 0x20001 ++#define BCE_MAX_QUEUE_COUNT 0x100 ++ ++#define BCE_QUEUE_USER_MIN 2 ++#define BCE_QUEUE_USER_MAX (BCE_MAX_QUEUE_COUNT - 1) ++ ++struct apple_bce_device { ++ struct pci_dev *pci, *pci0; ++ dev_t devt; ++ struct device *dev; ++ void __iomem *reg_mem_mb; ++ void __iomem *reg_mem_dma; ++ struct bce_mailbox mbox; ++ struct bce_timestamp timestamp; ++ struct bce_queue *queues[BCE_MAX_QUEUE_COUNT]; ++ struct spinlock queues_lock; ++ struct ida queue_ida; ++ struct bce_queue_cq *cmd_cq; ++ struct bce_queue_cmdq *cmd_cmdq; ++ struct bce_queue_sq *int_sq_list[BCE_MAX_QUEUE_COUNT]; ++ bool is_being_removed; ++ ++ dma_addr_t saved_data_dma_addr; ++ void *saved_data_dma_ptr; ++ size_t saved_data_dma_size; ++ ++ struct bce_vhci vhci; ++}; ++ ++extern struct apple_bce_device *global_bce; ++ ++#endif //APPLE_BCE_H +diff --git a/drivers/staging/apple-bce/audio/audio.c b/drivers/staging/apple-bce/audio/audio.c +new file mode 100644 +index 000000000000..111111111111 +--- /dev/null ++++ b/drivers/staging/apple-bce/audio/audio.c +@@ -0,0 +1,711 @@ ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include "audio.h" ++#include "pcm.h" ++#include ++ ++static int aaudio_alsa_index = SNDRV_DEFAULT_IDX1; ++static char *aaudio_alsa_id = SNDRV_DEFAULT_STR1; ++ ++static dev_t aaudio_chrdev; ++static struct class *aaudio_class; ++ ++static int aaudio_init_cmd(struct aaudio_device *a); ++static int aaudio_init_bs(struct aaudio_device *a); ++static void aaudio_init_dev(struct aaudio_device *a, aaudio_device_id_t dev_id); ++static void aaudio_free_dev(struct aaudio_subdevice *sdev); ++ ++static int aaudio_probe(struct pci_dev *dev, const struct pci_device_id *id) ++{ ++ struct aaudio_device *aaudio = NULL; ++ struct aaudio_subdevice *sdev = NULL; ++ int status = 0; ++ u32 cfg; ++ ++ pr_info("aaudio: capturing our device\n"); ++ ++ if (pci_enable_device(dev)) ++ return -ENODEV; ++ if (pci_request_regions(dev, "aaudio")) { ++ status = -ENODEV; ++ goto fail; ++ } ++ pci_set_master(dev); ++ ++ aaudio = kzalloc(sizeof(struct aaudio_device), GFP_KERNEL); ++ if (!aaudio) { ++ status = -ENOMEM; ++ goto fail; ++ } ++ ++ aaudio->bce = global_bce; ++ if (!aaudio->bce) { ++ dev_warn(&dev->dev, "aaudio: No BCE available\n"); ++ status = -EINVAL; ++ goto fail; ++ } ++ ++ aaudio->pci = dev; ++ pci_set_drvdata(dev, aaudio); ++ ++ aaudio->devt = aaudio_chrdev; ++ aaudio->dev = device_create(aaudio_class, &dev->dev, aaudio->devt, NULL, "aaudio"); ++ if (IS_ERR_OR_NULL(aaudio->dev)) { ++ status = PTR_ERR(aaudio_class); ++ goto fail; ++ } ++ device_link_add(aaudio->dev, aaudio->bce->dev, DL_FLAG_PM_RUNTIME | DL_FLAG_AUTOREMOVE_CONSUMER); ++ ++ init_completion(&aaudio->remote_alive); ++ INIT_LIST_HEAD(&aaudio->subdevice_list); ++ ++ /* Init: set an unknown flag in the bitset */ ++ if (pci_read_config_dword(dev, 4, &cfg)) ++ dev_warn(&dev->dev, "aaudio: pci_read_config_dword fail\n"); ++ if (pci_write_config_dword(dev, 4, cfg | 6u)) ++ dev_warn(&dev->dev, "aaudio: pci_write_config_dword fail\n"); ++ ++ dev_info(aaudio->dev, "aaudio: bs len = %llx\n", pci_resource_len(dev, 0)); ++ aaudio->reg_mem_bs_dma = pci_resource_start(dev, 0); ++ aaudio->reg_mem_bs = pci_iomap(dev, 0, 0); ++ aaudio->reg_mem_cfg = pci_iomap(dev, 4, 0); ++ ++ aaudio->reg_mem_gpr = (u32 __iomem *) ((u8 __iomem *) aaudio->reg_mem_cfg + 0xC000); ++ ++ if (IS_ERR_OR_NULL(aaudio->reg_mem_bs) || IS_ERR_OR_NULL(aaudio->reg_mem_cfg)) { ++ dev_warn(&dev->dev, "aaudio: Failed to pci_iomap required regions\n"); ++ goto fail; ++ } ++ ++ if (aaudio_bce_init(aaudio)) { ++ dev_warn(&dev->dev, "aaudio: Failed to init BCE command transport\n"); ++ goto fail; ++ } ++ ++ if (snd_card_new(aaudio->dev, aaudio_alsa_index, aaudio_alsa_id, THIS_MODULE, 0, &aaudio->card)) { ++ dev_err(&dev->dev, "aaudio: Failed to create ALSA card\n"); ++ goto fail; ++ } ++ ++ strcpy(aaudio->card->shortname, "Apple T2 Audio"); ++ strcpy(aaudio->card->longname, "Apple T2 Audio"); ++ strcpy(aaudio->card->mixername, "Apple T2 Audio"); ++ /* Dynamic alsa ids start at 100 */ ++ aaudio->next_alsa_id = 100; ++ ++ if (aaudio_init_cmd(aaudio)) { ++ dev_err(&dev->dev, "aaudio: Failed to initialize over BCE\n"); ++ goto fail_snd; ++ } ++ ++ if (aaudio_init_bs(aaudio)) { ++ dev_err(&dev->dev, "aaudio: Failed to initialize BufferStruct\n"); ++ goto fail_snd; ++ } ++ ++ if ((status = aaudio_cmd_set_remote_access(aaudio, AAUDIO_REMOTE_ACCESS_ON))) { ++ dev_err(&dev->dev, "Failed to set remote access\n"); ++ return status; ++ } ++ ++ if (snd_card_register(aaudio->card)) { ++ dev_err(&dev->dev, "aaudio: Failed to register ALSA sound device\n"); ++ goto fail_snd; ++ } ++ ++ list_for_each_entry(sdev, &aaudio->subdevice_list, list) { ++ struct aaudio_buffer_struct_device *dev = &aaudio->bs->devices[sdev->buf_id]; ++ ++ if (sdev->out_stream_cnt == 1 && !strcmp(dev->name, "Speaker")) { ++ struct snd_pcm_hardware *hw = sdev->out_streams[0].alsa_hw_desc; ++ ++ snprintf(aaudio->card->driver, sizeof(aaudio->card->driver) / sizeof(char), "AppleT2x%d", hw->channels_min); ++ } ++ } ++ ++ return 0; ++ ++fail_snd: ++ snd_card_free(aaudio->card); ++fail: ++ if (aaudio && aaudio->dev) ++ device_destroy(aaudio_class, aaudio->devt); ++ kfree(aaudio); ++ ++ if (!IS_ERR_OR_NULL(aaudio->reg_mem_bs)) ++ pci_iounmap(dev, aaudio->reg_mem_bs); ++ if (!IS_ERR_OR_NULL(aaudio->reg_mem_cfg)) ++ pci_iounmap(dev, aaudio->reg_mem_cfg); ++ ++ pci_release_regions(dev); ++ pci_disable_device(dev); ++ ++ if (!status) ++ status = -EINVAL; ++ return status; ++} ++ ++ ++ ++static void aaudio_remove(struct pci_dev *dev) ++{ ++ struct aaudio_subdevice *sdev; ++ struct aaudio_device *aaudio = pci_get_drvdata(dev); ++ ++ snd_card_free(aaudio->card); ++ while (!list_empty(&aaudio->subdevice_list)) { ++ sdev = list_first_entry(&aaudio->subdevice_list, struct aaudio_subdevice, list); ++ list_del(&sdev->list); ++ aaudio_free_dev(sdev); ++ } ++ pci_iounmap(dev, aaudio->reg_mem_bs); ++ pci_iounmap(dev, aaudio->reg_mem_cfg); ++ device_destroy(aaudio_class, aaudio->devt); ++ pci_free_irq_vectors(dev); ++ pci_release_regions(dev); ++ pci_disable_device(dev); ++ kfree(aaudio); ++} ++ ++static int aaudio_suspend(struct device *dev) ++{ ++ struct aaudio_device *aaudio = pci_get_drvdata(to_pci_dev(dev)); ++ ++ if (aaudio_cmd_set_remote_access(aaudio, AAUDIO_REMOTE_ACCESS_OFF)) ++ dev_warn(aaudio->dev, "Failed to reset remote access\n"); ++ ++ pci_disable_device(aaudio->pci); ++ return 0; ++} ++ ++static int aaudio_resume(struct device *dev) ++{ ++ int status; ++ struct aaudio_device *aaudio = pci_get_drvdata(to_pci_dev(dev)); ++ ++ if ((status = pci_enable_device(aaudio->pci))) ++ return status; ++ pci_set_master(aaudio->pci); ++ ++ if ((status = aaudio_cmd_set_remote_access(aaudio, AAUDIO_REMOTE_ACCESS_ON))) { ++ dev_err(aaudio->dev, "Failed to set remote access\n"); ++ return status; ++ } ++ ++ return 0; ++} ++ ++static int aaudio_init_cmd(struct aaudio_device *a) ++{ ++ int status; ++ struct aaudio_send_ctx sctx; ++ struct aaudio_msg buf; ++ u64 dev_cnt, dev_i; ++ aaudio_device_id_t *dev_l; ++ ++ if ((status = aaudio_send(a, &sctx, 500, ++ aaudio_msg_write_alive_notification, 1, 3))) { ++ dev_err(a->dev, "Sending alive notification failed\n"); ++ return status; ++ } ++ ++ if (wait_for_completion_timeout(&a->remote_alive, msecs_to_jiffies(500)) == 0) { ++ dev_err(a->dev, "Timed out waiting for remote\n"); ++ return -ETIMEDOUT; ++ } ++ dev_info(a->dev, "Continuing init\n"); ++ ++ buf = aaudio_reply_alloc(); ++ if ((status = aaudio_cmd_get_device_list(a, &buf, &dev_l, &dev_cnt))) { ++ dev_err(a->dev, "Failed to get device list\n"); ++ aaudio_reply_free(&buf); ++ return status; ++ } ++ for (dev_i = 0; dev_i < dev_cnt; ++dev_i) ++ aaudio_init_dev(a, dev_l[dev_i]); ++ aaudio_reply_free(&buf); ++ ++ return 0; ++} ++ ++static void aaudio_init_stream_info(struct aaudio_subdevice *sdev, struct aaudio_stream *strm); ++static void aaudio_handle_jack_connection_change(struct aaudio_subdevice *sdev); ++ ++static void aaudio_init_dev(struct aaudio_device *a, aaudio_device_id_t dev_id) ++{ ++ struct aaudio_subdevice *sdev; ++ struct aaudio_msg buf = aaudio_reply_alloc(); ++ u64 uid_len, stream_cnt, i; ++ aaudio_object_id_t *stream_list; ++ char *uid; ++ ++ sdev = kzalloc(sizeof(struct aaudio_subdevice), GFP_KERNEL); ++ ++ if (aaudio_cmd_get_property(a, &buf, dev_id, dev_id, AAUDIO_PROP(AAUDIO_PROP_SCOPE_GLOBAL, AAUDIO_PROP_UID, 0), ++ NULL, 0, (void **) &uid, &uid_len) || uid_len > AAUDIO_DEVICE_MAX_UID_LEN) { ++ dev_err(a->dev, "Failed to get device uid for device %llx\n", dev_id); ++ goto fail; ++ } ++ dev_info(a->dev, "Remote device %llx %.*s\n", dev_id, (int) uid_len, uid); ++ ++ sdev->a = a; ++ INIT_LIST_HEAD(&sdev->list); ++ sdev->dev_id = dev_id; ++ sdev->buf_id = AAUDIO_BUFFER_ID_NONE; ++ strncpy(sdev->uid, uid, uid_len); ++ sdev->uid[uid_len + 1] = '\0'; ++ ++ if (aaudio_cmd_get_primitive_property(a, dev_id, dev_id, ++ AAUDIO_PROP(AAUDIO_PROP_SCOPE_INPUT, AAUDIO_PROP_LATENCY, 0), NULL, 0, &sdev->in_latency, sizeof(u32))) ++ dev_warn(a->dev, "Failed to query device input latency\n"); ++ if (aaudio_cmd_get_primitive_property(a, dev_id, dev_id, ++ AAUDIO_PROP(AAUDIO_PROP_SCOPE_OUTPUT, AAUDIO_PROP_LATENCY, 0), NULL, 0, &sdev->out_latency, sizeof(u32))) ++ dev_warn(a->dev, "Failed to query device output latency\n"); ++ ++ if (aaudio_cmd_get_input_stream_list(a, &buf, dev_id, &stream_list, &stream_cnt)) { ++ dev_err(a->dev, "Failed to get input stream list for device %llx\n", dev_id); ++ goto fail; ++ } ++ if (stream_cnt > AAUDIO_DEIVCE_MAX_INPUT_STREAMS) { ++ dev_warn(a->dev, "Device %s input stream count %llu is larger than the supported count of %u\n", ++ sdev->uid, stream_cnt, AAUDIO_DEIVCE_MAX_INPUT_STREAMS); ++ stream_cnt = AAUDIO_DEIVCE_MAX_INPUT_STREAMS; ++ } ++ sdev->in_stream_cnt = stream_cnt; ++ for (i = 0; i < stream_cnt; i++) { ++ sdev->in_streams[i].id = stream_list[i]; ++ sdev->in_streams[i].buffer_cnt = 0; ++ aaudio_init_stream_info(sdev, &sdev->in_streams[i]); ++ sdev->in_streams[i].latency += sdev->in_latency; ++ } ++ ++ if (aaudio_cmd_get_output_stream_list(a, &buf, dev_id, &stream_list, &stream_cnt)) { ++ dev_err(a->dev, "Failed to get output stream list for device %llx\n", dev_id); ++ goto fail; ++ } ++ if (stream_cnt > AAUDIO_DEIVCE_MAX_OUTPUT_STREAMS) { ++ dev_warn(a->dev, "Device %s input stream count %llu is larger than the supported count of %u\n", ++ sdev->uid, stream_cnt, AAUDIO_DEIVCE_MAX_OUTPUT_STREAMS); ++ stream_cnt = AAUDIO_DEIVCE_MAX_OUTPUT_STREAMS; ++ } ++ sdev->out_stream_cnt = stream_cnt; ++ for (i = 0; i < stream_cnt; i++) { ++ sdev->out_streams[i].id = stream_list[i]; ++ sdev->out_streams[i].buffer_cnt = 0; ++ aaudio_init_stream_info(sdev, &sdev->out_streams[i]); ++ sdev->out_streams[i].latency += sdev->in_latency; ++ } ++ ++ if (sdev->is_pcm) ++ aaudio_create_pcm(sdev); ++ /* Headphone Jack status */ ++ if (!strcmp(sdev->uid, "Codec Output")) { ++ if (snd_jack_new(a->card, sdev->uid, SND_JACK_HEADPHONE, &sdev->jack, true, false)) ++ dev_warn(a->dev, "Failed to create an attached jack for %s\n", sdev->uid); ++ aaudio_cmd_property_listener(a, sdev->dev_id, sdev->dev_id, ++ AAUDIO_PROP(AAUDIO_PROP_SCOPE_OUTPUT, AAUDIO_PROP_JACK_PLUGGED, 0)); ++ aaudio_handle_jack_connection_change(sdev); ++ } ++ ++ aaudio_reply_free(&buf); ++ ++ list_add_tail(&sdev->list, &a->subdevice_list); ++ return; ++ ++fail: ++ aaudio_reply_free(&buf); ++ kfree(sdev); ++} ++ ++static void aaudio_init_stream_info(struct aaudio_subdevice *sdev, struct aaudio_stream *strm) ++{ ++ if (aaudio_cmd_get_primitive_property(sdev->a, sdev->dev_id, strm->id, ++ AAUDIO_PROP(AAUDIO_PROP_SCOPE_GLOBAL, AAUDIO_PROP_PHYS_FORMAT, 0), NULL, 0, ++ &strm->desc, sizeof(strm->desc))) ++ dev_warn(sdev->a->dev, "Failed to query stream descriptor\n"); ++ if (aaudio_cmd_get_primitive_property(sdev->a, sdev->dev_id, strm->id, ++ AAUDIO_PROP(AAUDIO_PROP_SCOPE_GLOBAL, AAUDIO_PROP_LATENCY, 0), NULL, 0, &strm->latency, sizeof(u32))) ++ dev_warn(sdev->a->dev, "Failed to query stream latency\n"); ++ if (strm->desc.format_id == AAUDIO_FORMAT_LPCM) ++ sdev->is_pcm = true; ++} ++ ++static void aaudio_free_dev(struct aaudio_subdevice *sdev) ++{ ++ size_t i; ++ for (i = 0; i < sdev->in_stream_cnt; i++) { ++ if (sdev->in_streams[i].alsa_hw_desc) ++ kfree(sdev->in_streams[i].alsa_hw_desc); ++ if (sdev->in_streams[i].buffers) ++ kfree(sdev->in_streams[i].buffers); ++ } ++ for (i = 0; i < sdev->out_stream_cnt; i++) { ++ if (sdev->out_streams[i].alsa_hw_desc) ++ kfree(sdev->out_streams[i].alsa_hw_desc); ++ if (sdev->out_streams[i].buffers) ++ kfree(sdev->out_streams[i].buffers); ++ } ++ kfree(sdev); ++} ++ ++static struct aaudio_subdevice *aaudio_find_dev_by_dev_id(struct aaudio_device *a, aaudio_device_id_t dev_id) ++{ ++ struct aaudio_subdevice *sdev; ++ list_for_each_entry(sdev, &a->subdevice_list, list) { ++ if (dev_id == sdev->dev_id) ++ return sdev; ++ } ++ return NULL; ++} ++ ++static struct aaudio_subdevice *aaudio_find_dev_by_uid(struct aaudio_device *a, const char *uid) ++{ ++ struct aaudio_subdevice *sdev; ++ list_for_each_entry(sdev, &a->subdevice_list, list) { ++ if (!strcmp(uid, sdev->uid)) ++ return sdev; ++ } ++ return NULL; ++} ++ ++static void aaudio_init_bs_stream(struct aaudio_device *a, struct aaudio_stream *strm, ++ struct aaudio_buffer_struct_stream *bs_strm); ++static void aaudio_init_bs_stream_host(struct aaudio_device *a, struct aaudio_stream *strm, ++ struct aaudio_buffer_struct_stream *bs_strm); ++ ++static int aaudio_init_bs(struct aaudio_device *a) ++{ ++ int i, j; ++ struct aaudio_buffer_struct_device *dev; ++ struct aaudio_subdevice *sdev; ++ u32 ver, sig, bs_base; ++ ++ ver = ioread32(&a->reg_mem_gpr[0]); ++ if (ver < 3) { ++ dev_err(a->dev, "aaudio: Bad GPR version (%u)", ver); ++ return -EINVAL; ++ } ++ sig = ioread32(&a->reg_mem_gpr[1]); ++ if (sig != AAUDIO_SIG) { ++ dev_err(a->dev, "aaudio: Bad GPR sig (%x)", sig); ++ return -EINVAL; ++ } ++ bs_base = ioread32(&a->reg_mem_gpr[2]); ++ a->bs = (struct aaudio_buffer_struct *) ((u8 *) a->reg_mem_bs + bs_base); ++ if (a->bs->signature != AAUDIO_SIG) { ++ dev_err(a->dev, "aaudio: Bad BufferStruct sig (%x)", a->bs->signature); ++ return -EINVAL; ++ } ++ dev_info(a->dev, "aaudio: BufferStruct ver = %i\n", a->bs->version); ++ dev_info(a->dev, "aaudio: Num devices = %i\n", a->bs->num_devices); ++ for (i = 0; i < a->bs->num_devices; i++) { ++ dev = &a->bs->devices[i]; ++ dev_info(a->dev, "aaudio: Device %i %s\n", i, dev->name); ++ ++ sdev = aaudio_find_dev_by_uid(a, dev->name); ++ if (!sdev) { ++ dev_err(a->dev, "aaudio: Subdevice not found for BufferStruct device %s\n", dev->name); ++ continue; ++ } ++ sdev->buf_id = (u8) i; ++ dev->num_input_streams = 0; ++ for (j = 0; j < dev->num_output_streams; j++) { ++ dev_info(a->dev, "aaudio: Device %i Stream %i: Output; Buffer Count = %i\n", i, j, ++ dev->output_streams[j].num_buffers); ++ if (j < sdev->out_stream_cnt) ++ aaudio_init_bs_stream(a, &sdev->out_streams[j], &dev->output_streams[j]); ++ } ++ } ++ ++ list_for_each_entry(sdev, &a->subdevice_list, list) { ++ if (sdev->buf_id != AAUDIO_BUFFER_ID_NONE) ++ continue; ++ sdev->buf_id = i; ++ dev_info(a->dev, "aaudio: Created device %i %s\n", i, sdev->uid); ++ strcpy(a->bs->devices[i].name, sdev->uid); ++ a->bs->devices[i].num_input_streams = 0; ++ a->bs->devices[i].num_output_streams = 0; ++ a->bs->num_devices = ++i; ++ } ++ list_for_each_entry(sdev, &a->subdevice_list, list) { ++ if (sdev->in_stream_cnt == 1) { ++ dev_info(a->dev, "aaudio: Device %i Host Stream; Input\n", sdev->buf_id); ++ aaudio_init_bs_stream_host(a, &sdev->in_streams[0], &a->bs->devices[sdev->buf_id].input_streams[0]); ++ a->bs->devices[sdev->buf_id].num_input_streams = 1; ++ wmb(); ++ ++ if (aaudio_cmd_set_input_stream_address_ranges(a, sdev->dev_id)) { ++ dev_err(a->dev, "aaudio: Failed to set input stream address ranges\n"); ++ } ++ } ++ } ++ ++ return 0; ++} ++ ++static void aaudio_init_bs_stream(struct aaudio_device *a, struct aaudio_stream *strm, ++ struct aaudio_buffer_struct_stream *bs_strm) ++{ ++ size_t i; ++ strm->buffer_cnt = bs_strm->num_buffers; ++ if (bs_strm->num_buffers > AAUDIO_DEIVCE_MAX_BUFFER_COUNT) { ++ dev_warn(a->dev, "BufferStruct buffer count %u exceeds driver limit of %u\n", bs_strm->num_buffers, ++ AAUDIO_DEIVCE_MAX_BUFFER_COUNT); ++ strm->buffer_cnt = AAUDIO_DEIVCE_MAX_BUFFER_COUNT; ++ } ++ if (!strm->buffer_cnt) ++ return; ++ strm->buffers = kmalloc_array(strm->buffer_cnt, sizeof(struct aaudio_dma_buf), GFP_KERNEL); ++ if (!strm->buffers) { ++ dev_err(a->dev, "Buffer list allocation failed\n"); ++ return; ++ } ++ for (i = 0; i < strm->buffer_cnt; i++) { ++ strm->buffers[i].dma_addr = a->reg_mem_bs_dma + (dma_addr_t) bs_strm->buffers[i].address; ++ strm->buffers[i].ptr = a->reg_mem_bs + bs_strm->buffers[i].address; ++ strm->buffers[i].size = bs_strm->buffers[i].size; ++ } ++ ++ if (strm->buffer_cnt == 1) { ++ strm->alsa_hw_desc = kmalloc(sizeof(struct snd_pcm_hardware), GFP_KERNEL); ++ if (aaudio_create_hw_info(&strm->desc, strm->alsa_hw_desc, strm->buffers[0].size)) { ++ kfree(strm->alsa_hw_desc); ++ strm->alsa_hw_desc = NULL; ++ } ++ } ++} ++ ++static void aaudio_init_bs_stream_host(struct aaudio_device *a, struct aaudio_stream *strm, ++ struct aaudio_buffer_struct_stream *bs_strm) ++{ ++ size_t size; ++ dma_addr_t dma_addr; ++ void *dma_ptr; ++ size = strm->desc.bytes_per_packet * 16640; ++ dma_ptr = dma_alloc_coherent(&a->pci->dev, size, &dma_addr, GFP_KERNEL); ++ if (!dma_ptr) { ++ dev_err(a->dev, "dma_alloc_coherent failed\n"); ++ return; ++ } ++ bs_strm->buffers[0].address = dma_addr; ++ bs_strm->buffers[0].size = size; ++ bs_strm->num_buffers = 1; ++ ++ memset(dma_ptr, 0, size); ++ ++ strm->buffer_cnt = 1; ++ strm->buffers = kmalloc_array(strm->buffer_cnt, sizeof(struct aaudio_dma_buf), GFP_KERNEL); ++ if (!strm->buffers) { ++ dev_err(a->dev, "Buffer list allocation failed\n"); ++ return; ++ } ++ strm->buffers[0].dma_addr = dma_addr; ++ strm->buffers[0].ptr = dma_ptr; ++ strm->buffers[0].size = size; ++ ++ strm->alsa_hw_desc = kmalloc(sizeof(struct snd_pcm_hardware), GFP_KERNEL); ++ if (aaudio_create_hw_info(&strm->desc, strm->alsa_hw_desc, strm->buffers[0].size)) { ++ kfree(strm->alsa_hw_desc); ++ strm->alsa_hw_desc = NULL; ++ } ++} ++ ++static void aaudio_handle_prop_change(struct aaudio_device *a, struct aaudio_msg *msg); ++ ++void aaudio_handle_notification(struct aaudio_device *a, struct aaudio_msg *msg) ++{ ++ struct aaudio_send_ctx sctx; ++ struct aaudio_msg_base base; ++ if (aaudio_msg_read_base(msg, &base)) ++ return; ++ switch (base.msg) { ++ case AAUDIO_MSG_NOTIFICATION_BOOT: ++ dev_info(a->dev, "Received boot notification from remote\n"); ++ ++ /* Resend the alive notify */ ++ if (aaudio_send(a, &sctx, 500, ++ aaudio_msg_write_alive_notification, 1, 3)) { ++ pr_err("Sending alive notification failed\n"); ++ } ++ break; ++ case AAUDIO_MSG_NOTIFICATION_ALIVE: ++ dev_info(a->dev, "Received alive notification from remote\n"); ++ complete_all(&a->remote_alive); ++ break; ++ case AAUDIO_MSG_PROPERTY_CHANGED: ++ aaudio_handle_prop_change(a, msg); ++ break; ++ default: ++ dev_info(a->dev, "Unhandled notification %i", base.msg); ++ break; ++ } ++} ++ ++struct aaudio_prop_change_work_struct { ++ struct work_struct ws; ++ struct aaudio_device *a; ++ aaudio_device_id_t dev; ++ aaudio_object_id_t obj; ++ struct aaudio_prop_addr prop; ++}; ++ ++static void aaudio_handle_jack_connection_change(struct aaudio_subdevice *sdev) ++{ ++ u32 plugged; ++ if (!sdev->jack) ++ return; ++ /* NOTE: Apple made the plug status scoped to the input and output streams. This makes no sense for us, so I just ++ * always pick the OUTPUT status. */ ++ if (aaudio_cmd_get_primitive_property(sdev->a, sdev->dev_id, sdev->dev_id, ++ AAUDIO_PROP(AAUDIO_PROP_SCOPE_OUTPUT, AAUDIO_PROP_JACK_PLUGGED, 0), NULL, 0, &plugged, sizeof(plugged))) { ++ dev_err(sdev->a->dev, "Failed to get jack enable status\n"); ++ return; ++ } ++ dev_dbg(sdev->a->dev, "Jack is now %s\n", plugged ? "plugged" : "unplugged"); ++ snd_jack_report(sdev->jack, plugged ? sdev->jack->type : 0); ++} ++ ++void aaudio_handle_prop_change_work(struct work_struct *ws) ++{ ++ struct aaudio_prop_change_work_struct *work = container_of(ws, struct aaudio_prop_change_work_struct, ws); ++ struct aaudio_subdevice *sdev; ++ ++ sdev = aaudio_find_dev_by_dev_id(work->a, work->dev); ++ if (!sdev) { ++ dev_err(work->a->dev, "Property notification change: device not found\n"); ++ goto done; ++ } ++ dev_dbg(work->a->dev, "Property changed for device: %s\n", sdev->uid); ++ ++ if (work->prop.scope == AAUDIO_PROP_SCOPE_OUTPUT && work->prop.selector == AAUDIO_PROP_JACK_PLUGGED) { ++ aaudio_handle_jack_connection_change(sdev); ++ } ++ ++done: ++ kfree(work); ++} ++ ++void aaudio_handle_prop_change(struct aaudio_device *a, struct aaudio_msg *msg) ++{ ++ /* NOTE: This is a scheduled work because this callback will generally need to query device information and this ++ * is not possible when we are in the reply parsing code's context. */ ++ struct aaudio_prop_change_work_struct *work; ++ work = kmalloc(sizeof(struct aaudio_prop_change_work_struct), GFP_KERNEL); ++ work->a = a; ++ INIT_WORK(&work->ws, aaudio_handle_prop_change_work); ++ aaudio_msg_read_property_changed(msg, &work->dev, &work->obj, &work->prop); ++ schedule_work(&work->ws); ++} ++ ++#define aaudio_send_cmd_response(a, sctx, msg, fn, ...) \ ++ if (aaudio_send_with_tag(a, sctx, ((struct aaudio_msg_header *) msg->data)->tag, 500, fn, ##__VA_ARGS__)) \ ++ pr_err("aaudio: Failed to reply to a command\n"); ++ ++void aaudio_handle_cmd_timestamp(struct aaudio_device *a, struct aaudio_msg *msg) ++{ ++ ktime_t time_os = ktime_get_boottime(); ++ struct aaudio_send_ctx sctx; ++ struct aaudio_subdevice *sdev; ++ u64 devid, timestamp, update_seed; ++ aaudio_msg_read_update_timestamp(msg, &devid, ×tamp, &update_seed); ++ dev_dbg(a->dev, "Received timestamp update for dev=%llx ts=%llx seed=%llx\n", devid, timestamp, update_seed); ++ ++ sdev = aaudio_find_dev_by_dev_id(a, devid); ++ aaudio_handle_timestamp(sdev, time_os, timestamp); ++ ++ aaudio_send_cmd_response(a, &sctx, msg, ++ aaudio_msg_write_update_timestamp_response); ++} ++ ++void aaudio_handle_command(struct aaudio_device *a, struct aaudio_msg *msg) ++{ ++ struct aaudio_msg_base base; ++ if (aaudio_msg_read_base(msg, &base)) ++ return; ++ switch (base.msg) { ++ case AAUDIO_MSG_UPDATE_TIMESTAMP: ++ aaudio_handle_cmd_timestamp(a, msg); ++ break; ++ default: ++ dev_info(a->dev, "Unhandled device command %i", base.msg); ++ break; ++ } ++} ++ ++static struct pci_device_id aaudio_ids[ ] = { ++ { PCI_DEVICE(PCI_VENDOR_ID_APPLE, 0x1803) }, ++ { 0, }, ++}; ++ ++struct dev_pm_ops aaudio_pci_driver_pm = { ++ .suspend = aaudio_suspend, ++ .resume = aaudio_resume ++}; ++struct pci_driver aaudio_pci_driver = { ++ .name = "aaudio", ++ .id_table = aaudio_ids, ++ .probe = aaudio_probe, ++ .remove = aaudio_remove, ++ .driver = { ++ .pm = &aaudio_pci_driver_pm ++ } ++}; ++ ++ ++int aaudio_module_init(void) ++{ ++ int result; ++ if ((result = alloc_chrdev_region(&aaudio_chrdev, 0, 1, "aaudio"))) ++ goto fail_chrdev; ++#if LINUX_VERSION_CODE < KERNEL_VERSION(6,4,0) ++ aaudio_class = class_create(THIS_MODULE, "aaudio"); ++#else ++ aaudio_class = class_create("aaudio"); ++#endif ++ if (IS_ERR(aaudio_class)) { ++ result = PTR_ERR(aaudio_class); ++ goto fail_class; ++ } ++ ++ result = pci_register_driver(&aaudio_pci_driver); ++ if (result) ++ goto fail_drv; ++ return 0; ++ ++fail_drv: ++ pci_unregister_driver(&aaudio_pci_driver); ++fail_class: ++ class_destroy(aaudio_class); ++fail_chrdev: ++ unregister_chrdev_region(aaudio_chrdev, 1); ++ if (!result) ++ result = -EINVAL; ++ return result; ++} ++ ++void aaudio_module_exit(void) ++{ ++ pci_unregister_driver(&aaudio_pci_driver); ++ class_destroy(aaudio_class); ++ unregister_chrdev_region(aaudio_chrdev, 1); ++} ++ ++struct aaudio_alsa_pcm_id_mapping aaudio_alsa_id_mappings[] = { ++ {"Speaker", 0}, ++ {"Digital Mic", 1}, ++ {"Codec Output", 2}, ++ {"Codec Input", 3}, ++ {"Bridge Loopback", 4}, ++ {} ++}; ++ ++module_param_named(index, aaudio_alsa_index, int, 0444); ++MODULE_PARM_DESC(index, "Index value for Apple Internal Audio soundcard."); ++module_param_named(id, aaudio_alsa_id, charp, 0444); ++MODULE_PARM_DESC(id, "ID string for Apple Internal Audio soundcard."); +diff --git a/drivers/staging/apple-bce/audio/audio.h b/drivers/staging/apple-bce/audio/audio.h +new file mode 100644 +index 000000000000..111111111111 +--- /dev/null ++++ b/drivers/staging/apple-bce/audio/audio.h +@@ -0,0 +1,125 @@ ++#ifndef AAUDIO_H ++#define AAUDIO_H ++ ++#include ++#include ++#include "../apple_bce.h" ++#include "protocol_bce.h" ++#include "description.h" ++ ++#define AAUDIO_SIG 0x19870423 ++ ++#define AAUDIO_DEVICE_MAX_UID_LEN 128 ++#define AAUDIO_DEIVCE_MAX_INPUT_STREAMS 1 ++#define AAUDIO_DEIVCE_MAX_OUTPUT_STREAMS 1 ++#define AAUDIO_DEIVCE_MAX_BUFFER_COUNT 1 ++ ++#define AAUDIO_BUFFER_ID_NONE 0xffu ++ ++struct snd_card; ++struct snd_pcm; ++struct snd_pcm_hardware; ++struct snd_jack; ++ ++struct __attribute__((packed)) __attribute__((aligned(4))) aaudio_buffer_struct_buffer { ++ size_t address; ++ size_t size; ++ size_t pad[4]; ++}; ++struct aaudio_buffer_struct_stream { ++ u8 num_buffers; ++ struct aaudio_buffer_struct_buffer buffers[100]; ++ char filler[32]; ++}; ++struct aaudio_buffer_struct_device { ++ char name[128]; ++ u8 num_input_streams; ++ u8 num_output_streams; ++ struct aaudio_buffer_struct_stream input_streams[5]; ++ struct aaudio_buffer_struct_stream output_streams[5]; ++ char filler[128]; ++}; ++struct aaudio_buffer_struct { ++ u32 version; ++ u32 signature; ++ u32 flags; ++ u8 num_devices; ++ struct aaudio_buffer_struct_device devices[20]; ++}; ++ ++struct aaudio_device; ++struct aaudio_dma_buf { ++ dma_addr_t dma_addr; ++ void *ptr; ++ size_t size; ++}; ++struct aaudio_stream { ++ aaudio_object_id_t id; ++ size_t buffer_cnt; ++ struct aaudio_dma_buf *buffers; ++ ++ struct aaudio_apple_description desc; ++ struct snd_pcm_hardware *alsa_hw_desc; ++ u32 latency; ++ ++ bool waiting_for_first_ts; ++ ++ ktime_t remote_timestamp; ++ snd_pcm_sframes_t frame_min; ++ int started; ++}; ++struct aaudio_subdevice { ++ struct aaudio_device *a; ++ struct list_head list; ++ aaudio_device_id_t dev_id; ++ u32 in_latency, out_latency; ++ u8 buf_id; ++ int alsa_id; ++ char uid[AAUDIO_DEVICE_MAX_UID_LEN + 1]; ++ size_t in_stream_cnt; ++ struct aaudio_stream in_streams[AAUDIO_DEIVCE_MAX_INPUT_STREAMS]; ++ size_t out_stream_cnt; ++ struct aaudio_stream out_streams[AAUDIO_DEIVCE_MAX_OUTPUT_STREAMS]; ++ bool is_pcm; ++ struct snd_pcm *pcm; ++ struct snd_jack *jack; ++}; ++struct aaudio_alsa_pcm_id_mapping { ++ const char *name; ++ int alsa_id; ++}; ++ ++struct aaudio_device { ++ struct pci_dev *pci; ++ dev_t devt; ++ struct device *dev; ++ void __iomem *reg_mem_bs; ++ dma_addr_t reg_mem_bs_dma; ++ void __iomem *reg_mem_cfg; ++ ++ u32 __iomem *reg_mem_gpr; ++ ++ struct aaudio_buffer_struct *bs; ++ ++ struct apple_bce_device *bce; ++ struct aaudio_bce bcem; ++ ++ struct snd_card *card; ++ ++ struct list_head subdevice_list; ++ int next_alsa_id; ++ ++ struct completion remote_alive; ++}; ++ ++void aaudio_handle_notification(struct aaudio_device *a, struct aaudio_msg *msg); ++void aaudio_handle_prop_change_work(struct work_struct *ws); ++void aaudio_handle_cmd_timestamp(struct aaudio_device *a, struct aaudio_msg *msg); ++void aaudio_handle_command(struct aaudio_device *a, struct aaudio_msg *msg); ++ ++int aaudio_module_init(void); ++void aaudio_module_exit(void); ++ ++extern struct aaudio_alsa_pcm_id_mapping aaudio_alsa_id_mappings[]; ++ ++#endif //AAUDIO_H +diff --git a/drivers/staging/apple-bce/audio/description.h b/drivers/staging/apple-bce/audio/description.h +new file mode 100644 +index 000000000000..111111111111 +--- /dev/null ++++ b/drivers/staging/apple-bce/audio/description.h +@@ -0,0 +1,42 @@ ++#ifndef AAUDIO_DESCRIPTION_H ++#define AAUDIO_DESCRIPTION_H ++ ++#include ++ ++struct aaudio_apple_description { ++ u64 sample_rate_double; ++ u32 format_id; ++ u32 format_flags; ++ u32 bytes_per_packet; ++ u32 frames_per_packet; ++ u32 bytes_per_frame; ++ u32 channels_per_frame; ++ u32 bits_per_channel; ++ u32 reserved; ++}; ++ ++enum { ++ AAUDIO_FORMAT_LPCM = 0x6c70636d // 'lpcm' ++}; ++ ++enum { ++ AAUDIO_FORMAT_FLAG_FLOAT = 1, ++ AAUDIO_FORMAT_FLAG_BIG_ENDIAN = 2, ++ AAUDIO_FORMAT_FLAG_SIGNED = 4, ++ AAUDIO_FORMAT_FLAG_PACKED = 8, ++ AAUDIO_FORMAT_FLAG_ALIGNED_HIGH = 16, ++ AAUDIO_FORMAT_FLAG_NON_INTERLEAVED = 32, ++ AAUDIO_FORMAT_FLAG_NON_MIXABLE = 64 ++}; ++ ++static inline u64 aaudio_double_to_u64(u64 d) ++{ ++ u8 sign = (u8) ((d >> 63) & 1); ++ s32 exp = (s32) ((d >> 52) & 0x7ff) - 1023; ++ u64 fr = d & ((1LL << 52) - 1); ++ if (sign || exp < 0) ++ return 0; ++ return (u64) ((1LL << exp) + (fr >> (52 - exp))); ++} ++ ++#endif //AAUDIO_DESCRIPTION_H +diff --git a/drivers/staging/apple-bce/audio/pcm.c b/drivers/staging/apple-bce/audio/pcm.c +new file mode 100644 +index 000000000000..111111111111 +--- /dev/null ++++ b/drivers/staging/apple-bce/audio/pcm.c +@@ -0,0 +1,308 @@ ++#include "pcm.h" ++#include "audio.h" ++ ++static u64 aaudio_get_alsa_fmtbit(struct aaudio_apple_description *desc) ++{ ++ if (desc->format_flags & AAUDIO_FORMAT_FLAG_FLOAT) { ++ if (desc->bits_per_channel == 32) { ++ if (desc->format_flags & AAUDIO_FORMAT_FLAG_BIG_ENDIAN) ++ return SNDRV_PCM_FMTBIT_FLOAT_BE; ++ else ++ return SNDRV_PCM_FMTBIT_FLOAT_LE; ++ } else if (desc->bits_per_channel == 64) { ++ if (desc->format_flags & AAUDIO_FORMAT_FLAG_BIG_ENDIAN) ++ return SNDRV_PCM_FMTBIT_FLOAT64_BE; ++ else ++ return SNDRV_PCM_FMTBIT_FLOAT64_LE; ++ } else { ++ pr_err("aaudio: unsupported bits per channel for float format: %u\n", desc->bits_per_channel); ++ return 0; ++ } ++ } ++#define DEFINE_BPC_OPTION(val, b) \ ++ case val: \ ++ if (desc->format_flags & AAUDIO_FORMAT_FLAG_BIG_ENDIAN) { \ ++ if (desc->format_flags & AAUDIO_FORMAT_FLAG_SIGNED) \ ++ return SNDRV_PCM_FMTBIT_S ## b ## BE; \ ++ else \ ++ return SNDRV_PCM_FMTBIT_U ## b ## BE; \ ++ } else { \ ++ if (desc->format_flags & AAUDIO_FORMAT_FLAG_SIGNED) \ ++ return SNDRV_PCM_FMTBIT_S ## b ## LE; \ ++ else \ ++ return SNDRV_PCM_FMTBIT_U ## b ## LE; \ ++ } ++ if (desc->format_flags & AAUDIO_FORMAT_FLAG_PACKED) { ++ switch (desc->bits_per_channel) { ++ case 8: ++ case 16: ++ case 32: ++ break; ++ DEFINE_BPC_OPTION(24, 24_3) ++ default: ++ pr_err("aaudio: unsupported bits per channel for packed format: %u\n", desc->bits_per_channel); ++ return 0; ++ } ++ } ++ if (desc->format_flags & AAUDIO_FORMAT_FLAG_ALIGNED_HIGH) { ++ switch (desc->bits_per_channel) { ++ DEFINE_BPC_OPTION(24, 32_) ++ default: ++ pr_err("aaudio: unsupported bits per channel for high-aligned format: %u\n", desc->bits_per_channel); ++ return 0; ++ } ++ } ++ switch (desc->bits_per_channel) { ++ case 8: ++ if (desc->format_flags & AAUDIO_FORMAT_FLAG_SIGNED) ++ return SNDRV_PCM_FMTBIT_S8; ++ else ++ return SNDRV_PCM_FMTBIT_U8; ++ DEFINE_BPC_OPTION(16, 16_) ++ DEFINE_BPC_OPTION(24, 24_) ++ DEFINE_BPC_OPTION(32, 32_) ++ default: ++ pr_err("aaudio: unsupported bits per channel: %u\n", desc->bits_per_channel); ++ return 0; ++ } ++} ++int aaudio_create_hw_info(struct aaudio_apple_description *desc, struct snd_pcm_hardware *alsa_hw, ++ size_t buf_size) ++{ ++ uint rate; ++ alsa_hw->info = (SNDRV_PCM_INFO_MMAP | ++ SNDRV_PCM_INFO_BLOCK_TRANSFER | ++ SNDRV_PCM_INFO_MMAP_VALID | ++ SNDRV_PCM_INFO_DOUBLE); ++ if (desc->format_flags & AAUDIO_FORMAT_FLAG_NON_MIXABLE) ++ pr_warn("aaudio: unsupported hw flag: NON_MIXABLE\n"); ++ if (!(desc->format_flags & AAUDIO_FORMAT_FLAG_NON_INTERLEAVED)) ++ alsa_hw->info |= SNDRV_PCM_INFO_INTERLEAVED; ++ alsa_hw->formats = aaudio_get_alsa_fmtbit(desc); ++ if (!alsa_hw->formats) ++ return -EINVAL; ++ rate = (uint) aaudio_double_to_u64(desc->sample_rate_double); ++ alsa_hw->rates = snd_pcm_rate_to_rate_bit(rate); ++ alsa_hw->rate_min = rate; ++ alsa_hw->rate_max = rate; ++ alsa_hw->channels_min = desc->channels_per_frame; ++ alsa_hw->channels_max = desc->channels_per_frame; ++ alsa_hw->buffer_bytes_max = buf_size; ++ alsa_hw->period_bytes_min = desc->bytes_per_packet; ++ alsa_hw->period_bytes_max = desc->bytes_per_packet; ++ alsa_hw->periods_min = (uint) (buf_size / desc->bytes_per_packet); ++ alsa_hw->periods_max = (uint) (buf_size / desc->bytes_per_packet); ++ pr_debug("aaudio_create_hw_info: format = %llu, rate = %u/%u. channels = %u, periods = %u, period size = %lu\n", ++ alsa_hw->formats, alsa_hw->rate_min, alsa_hw->rates, alsa_hw->channels_min, alsa_hw->periods_min, ++ alsa_hw->period_bytes_min); ++ return 0; ++} ++ ++static struct aaudio_stream *aaudio_pcm_stream(struct snd_pcm_substream *substream) ++{ ++ struct aaudio_subdevice *sdev = snd_pcm_substream_chip(substream); ++ if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) ++ return &sdev->out_streams[substream->number]; ++ else ++ return &sdev->in_streams[substream->number]; ++} ++ ++static int aaudio_pcm_open(struct snd_pcm_substream *substream) ++{ ++ pr_debug("aaudio_pcm_open\n"); ++ substream->runtime->hw = *aaudio_pcm_stream(substream)->alsa_hw_desc; ++ ++ return 0; ++} ++ ++static int aaudio_pcm_close(struct snd_pcm_substream *substream) ++{ ++ pr_debug("aaudio_pcm_close\n"); ++ return 0; ++} ++ ++static int aaudio_pcm_prepare(struct snd_pcm_substream *substream) ++{ ++ return 0; ++} ++ ++static int aaudio_pcm_hw_params(struct snd_pcm_substream *substream, struct snd_pcm_hw_params *hw_params) ++{ ++ struct aaudio_stream *astream = aaudio_pcm_stream(substream); ++ pr_debug("aaudio_pcm_hw_params\n"); ++ ++ if (!astream->buffer_cnt || !astream->buffers) ++ return -EINVAL; ++ ++ substream->runtime->dma_area = astream->buffers[0].ptr; ++ substream->runtime->dma_addr = astream->buffers[0].dma_addr; ++ substream->runtime->dma_bytes = astream->buffers[0].size; ++ return 0; ++} ++ ++static int aaudio_pcm_hw_free(struct snd_pcm_substream *substream) ++{ ++ pr_debug("aaudio_pcm_hw_free\n"); ++ return 0; ++} ++ ++static void aaudio_pcm_start(struct snd_pcm_substream *substream) ++{ ++ struct aaudio_subdevice *sdev = snd_pcm_substream_chip(substream); ++ struct aaudio_stream *stream = aaudio_pcm_stream(substream); ++ void *buf; ++ size_t s; ++ ktime_t time_start, time_end; ++ bool back_buffer; ++ time_start = ktime_get(); ++ ++ back_buffer = (substream->stream == SNDRV_PCM_STREAM_PLAYBACK); ++ ++ if (back_buffer) { ++ s = frames_to_bytes(substream->runtime, substream->runtime->control->appl_ptr); ++ buf = kmalloc(s, GFP_KERNEL); ++ memcpy_fromio(buf, substream->runtime->dma_area, s); ++ time_end = ktime_get(); ++ pr_debug("aaudio: Backed up the buffer in %lluns [%li]\n", ktime_to_ns(time_end - time_start), ++ substream->runtime->control->appl_ptr); ++ } ++ ++ stream->waiting_for_first_ts = true; ++ stream->frame_min = stream->latency; ++ ++ aaudio_cmd_start_io(sdev->a, sdev->dev_id); ++ if (back_buffer) ++ memcpy_toio(substream->runtime->dma_area, buf, s); ++ ++ time_end = ktime_get(); ++ pr_debug("aaudio: Started the audio device in %lluns\n", ktime_to_ns(time_end - time_start)); ++} ++ ++static int aaudio_pcm_trigger(struct snd_pcm_substream *substream, int cmd) ++{ ++ struct aaudio_subdevice *sdev = snd_pcm_substream_chip(substream); ++ struct aaudio_stream *stream = aaudio_pcm_stream(substream); ++ pr_debug("aaudio_pcm_trigger %x\n", cmd); ++ ++ /* We only supports triggers on the #0 buffer */ ++ if (substream->number != 0) ++ return 0; ++ switch (cmd) { ++ case SNDRV_PCM_TRIGGER_START: ++ aaudio_pcm_start(substream); ++ stream->started = 1; ++ break; ++ case SNDRV_PCM_TRIGGER_STOP: ++ aaudio_cmd_stop_io(sdev->a, sdev->dev_id); ++ stream->started = 0; ++ break; ++ default: ++ return -EINVAL; ++ } ++ return 0; ++} ++ ++static snd_pcm_uframes_t aaudio_pcm_pointer(struct snd_pcm_substream *substream) ++{ ++ struct aaudio_stream *stream = aaudio_pcm_stream(substream); ++ ktime_t time_from_start; ++ snd_pcm_sframes_t frames; ++ snd_pcm_sframes_t buffer_time_length; ++ ++ if (!stream->started || stream->waiting_for_first_ts) { ++ pr_warn("aaudio_pcm_pointer while not started\n"); ++ return 0; ++ } ++ ++ /* Approximate the pointer based on the last received timestamp */ ++ time_from_start = ktime_get_boottime() - stream->remote_timestamp; ++ buffer_time_length = NSEC_PER_SEC * substream->runtime->buffer_size / substream->runtime->rate; ++ frames = (ktime_to_ns(time_from_start) % buffer_time_length) * substream->runtime->buffer_size / buffer_time_length; ++ if (ktime_to_ns(time_from_start) < buffer_time_length) { ++ if (frames < stream->frame_min) ++ frames = stream->frame_min; ++ else ++ stream->frame_min = 0; ++ } else { ++ if (ktime_to_ns(time_from_start) < 2 * buffer_time_length) ++ stream->frame_min = frames; ++ else ++ stream->frame_min = 0; /* Heavy desync */ ++ } ++ frames -= stream->latency; ++ if (frames < 0) ++ frames += ((-frames - 1) / substream->runtime->buffer_size + 1) * substream->runtime->buffer_size; ++ return (snd_pcm_uframes_t) frames; ++} ++ ++static struct snd_pcm_ops aaudio_pcm_ops = { ++ .open = aaudio_pcm_open, ++ .close = aaudio_pcm_close, ++ .ioctl = snd_pcm_lib_ioctl, ++ .hw_params = aaudio_pcm_hw_params, ++ .hw_free = aaudio_pcm_hw_free, ++ .prepare = aaudio_pcm_prepare, ++ .trigger = aaudio_pcm_trigger, ++ .pointer = aaudio_pcm_pointer, ++ .mmap = snd_pcm_lib_mmap_iomem ++}; ++ ++int aaudio_create_pcm(struct aaudio_subdevice *sdev) ++{ ++ struct snd_pcm *pcm; ++ struct aaudio_alsa_pcm_id_mapping *id_mapping; ++ int err; ++ ++ if (!sdev->is_pcm || (sdev->in_stream_cnt == 0 && sdev->out_stream_cnt == 0)) { ++ return -EINVAL; ++ } ++ ++ for (id_mapping = aaudio_alsa_id_mappings; id_mapping->name; id_mapping++) { ++ if (!strcmp(sdev->uid, id_mapping->name)) { ++ sdev->alsa_id = id_mapping->alsa_id; ++ break; ++ } ++ } ++ if (!id_mapping->name) ++ sdev->alsa_id = sdev->a->next_alsa_id++; ++ err = snd_pcm_new(sdev->a->card, sdev->uid, sdev->alsa_id, ++ (int) sdev->out_stream_cnt, (int) sdev->in_stream_cnt, &pcm); ++ if (err < 0) ++ return err; ++ pcm->private_data = sdev; ++ pcm->nonatomic = 1; ++ sdev->pcm = pcm; ++ strcpy(pcm->name, sdev->uid); ++ snd_pcm_set_ops(pcm, SNDRV_PCM_STREAM_PLAYBACK, &aaudio_pcm_ops); ++ snd_pcm_set_ops(pcm, SNDRV_PCM_STREAM_CAPTURE, &aaudio_pcm_ops); ++ return 0; ++} ++ ++static void aaudio_handle_stream_timestamp(struct snd_pcm_substream *substream, ktime_t timestamp) ++{ ++ unsigned long flags; ++ struct aaudio_stream *stream; ++ ++ stream = aaudio_pcm_stream(substream); ++ snd_pcm_stream_lock_irqsave(substream, flags); ++ stream->remote_timestamp = timestamp; ++ if (stream->waiting_for_first_ts) { ++ stream->waiting_for_first_ts = false; ++ snd_pcm_stream_unlock_irqrestore(substream, flags); ++ return; ++ } ++ snd_pcm_stream_unlock_irqrestore(substream, flags); ++ snd_pcm_period_elapsed(substream); ++} ++ ++void aaudio_handle_timestamp(struct aaudio_subdevice *sdev, ktime_t os_timestamp, u64 dev_timestamp) ++{ ++ struct snd_pcm_substream *substream; ++ ++ substream = sdev->pcm->streams[SNDRV_PCM_STREAM_PLAYBACK].substream; ++ if (substream) ++ aaudio_handle_stream_timestamp(substream, dev_timestamp); ++ substream = sdev->pcm->streams[SNDRV_PCM_STREAM_CAPTURE].substream; ++ if (substream) ++ aaudio_handle_stream_timestamp(substream, os_timestamp); ++} +diff --git a/drivers/staging/apple-bce/audio/pcm.h b/drivers/staging/apple-bce/audio/pcm.h +new file mode 100644 +index 000000000000..111111111111 +--- /dev/null ++++ b/drivers/staging/apple-bce/audio/pcm.h +@@ -0,0 +1,16 @@ ++#ifndef AAUDIO_PCM_H ++#define AAUDIO_PCM_H ++ ++#include ++#include ++ ++struct aaudio_subdevice; ++struct aaudio_apple_description; ++struct snd_pcm_hardware; ++ ++int aaudio_create_hw_info(struct aaudio_apple_description *desc, struct snd_pcm_hardware *alsa_hw, size_t buf_size); ++int aaudio_create_pcm(struct aaudio_subdevice *sdev); ++ ++void aaudio_handle_timestamp(struct aaudio_subdevice *sdev, ktime_t os_timestamp, u64 dev_timestamp); ++ ++#endif //AAUDIO_PCM_H +diff --git a/drivers/staging/apple-bce/audio/protocol.c b/drivers/staging/apple-bce/audio/protocol.c +new file mode 100644 +index 000000000000..111111111111 +--- /dev/null ++++ b/drivers/staging/apple-bce/audio/protocol.c +@@ -0,0 +1,347 @@ ++#include "protocol.h" ++#include "protocol_bce.h" ++#include "audio.h" ++ ++int aaudio_msg_read_base(struct aaudio_msg *msg, struct aaudio_msg_base *base) ++{ ++ if (msg->size < sizeof(struct aaudio_msg_header) + sizeof(struct aaudio_msg_base) * 2) ++ return -EINVAL; ++ *base = *((struct aaudio_msg_base *) ((struct aaudio_msg_header *) msg->data + 1)); ++ return 0; ++} ++ ++#define READ_START(type) \ ++ size_t offset = sizeof(struct aaudio_msg_header) + sizeof(struct aaudio_msg_base); (void)offset; \ ++ if (((struct aaudio_msg_base *) ((struct aaudio_msg_header *) msg->data + 1))->msg != type) \ ++ return -EINVAL; ++#define READ_DEVID_VAR(devid) *devid = ((struct aaudio_msg_header *) msg->data)->device_id ++#define READ_VAL(type) ({ offset += sizeof(type); *((type *) ((u8 *) msg->data + offset - sizeof(type))); }) ++#define READ_VAR(type, var) *var = READ_VAL(type) ++ ++int aaudio_msg_read_start_io_response(struct aaudio_msg *msg) ++{ ++ READ_START(AAUDIO_MSG_START_IO_RESPONSE); ++ return 0; ++} ++ ++int aaudio_msg_read_stop_io_response(struct aaudio_msg *msg) ++{ ++ READ_START(AAUDIO_MSG_STOP_IO_RESPONSE); ++ return 0; ++} ++ ++int aaudio_msg_read_update_timestamp(struct aaudio_msg *msg, aaudio_device_id_t *devid, ++ u64 *timestamp, u64 *update_seed) ++{ ++ READ_START(AAUDIO_MSG_UPDATE_TIMESTAMP); ++ READ_DEVID_VAR(devid); ++ READ_VAR(u64, timestamp); ++ READ_VAR(u64, update_seed); ++ return 0; ++} ++ ++int aaudio_msg_read_get_property_response(struct aaudio_msg *msg, aaudio_object_id_t *obj, ++ struct aaudio_prop_addr *prop, void **data, u64 *data_size) ++{ ++ READ_START(AAUDIO_MSG_GET_PROPERTY_RESPONSE); ++ READ_VAR(aaudio_object_id_t, obj); ++ READ_VAR(u32, &prop->element); ++ READ_VAR(u32, &prop->scope); ++ READ_VAR(u32, &prop->selector); ++ READ_VAR(u64, data_size); ++ *data = ((u8 *) msg->data + offset); ++ /* offset += data_size; */ ++ return 0; ++} ++ ++int aaudio_msg_read_set_property_response(struct aaudio_msg *msg, aaudio_object_id_t *obj) ++{ ++ READ_START(AAUDIO_MSG_SET_PROPERTY_RESPONSE); ++ READ_VAR(aaudio_object_id_t, obj); ++ return 0; ++} ++ ++int aaudio_msg_read_property_listener_response(struct aaudio_msg *msg, aaudio_object_id_t *obj, ++ struct aaudio_prop_addr *prop) ++{ ++ READ_START(AAUDIO_MSG_PROPERTY_LISTENER_RESPONSE); ++ READ_VAR(aaudio_object_id_t, obj); ++ READ_VAR(u32, &prop->element); ++ READ_VAR(u32, &prop->scope); ++ READ_VAR(u32, &prop->selector); ++ return 0; ++} ++ ++int aaudio_msg_read_property_changed(struct aaudio_msg *msg, aaudio_device_id_t *devid, aaudio_object_id_t *obj, ++ struct aaudio_prop_addr *prop) ++{ ++ READ_START(AAUDIO_MSG_PROPERTY_CHANGED); ++ READ_DEVID_VAR(devid); ++ READ_VAR(aaudio_object_id_t, obj); ++ READ_VAR(u32, &prop->element); ++ READ_VAR(u32, &prop->scope); ++ READ_VAR(u32, &prop->selector); ++ return 0; ++} ++ ++int aaudio_msg_read_set_input_stream_address_ranges_response(struct aaudio_msg *msg) ++{ ++ READ_START(AAUDIO_MSG_SET_INPUT_STREAM_ADDRESS_RANGES_RESPONSE); ++ return 0; ++} ++ ++int aaudio_msg_read_get_input_stream_list_response(struct aaudio_msg *msg, aaudio_object_id_t **str_l, u64 *str_cnt) ++{ ++ READ_START(AAUDIO_MSG_GET_INPUT_STREAM_LIST_RESPONSE); ++ READ_VAR(u64, str_cnt); ++ *str_l = (aaudio_device_id_t *) ((u8 *) msg->data + offset); ++ /* offset += str_cnt * sizeof(aaudio_object_id_t); */ ++ return 0; ++} ++ ++int aaudio_msg_read_get_output_stream_list_response(struct aaudio_msg *msg, aaudio_object_id_t **str_l, u64 *str_cnt) ++{ ++ READ_START(AAUDIO_MSG_GET_OUTPUT_STREAM_LIST_RESPONSE); ++ READ_VAR(u64, str_cnt); ++ *str_l = (aaudio_device_id_t *) ((u8 *) msg->data + offset); ++ /* offset += str_cnt * sizeof(aaudio_object_id_t); */ ++ return 0; ++} ++ ++int aaudio_msg_read_set_remote_access_response(struct aaudio_msg *msg) ++{ ++ READ_START(AAUDIO_MSG_SET_REMOTE_ACCESS_RESPONSE); ++ return 0; ++} ++ ++int aaudio_msg_read_get_device_list_response(struct aaudio_msg *msg, aaudio_device_id_t **dev_l, u64 *dev_cnt) ++{ ++ READ_START(AAUDIO_MSG_GET_DEVICE_LIST_RESPONSE); ++ READ_VAR(u64, dev_cnt); ++ *dev_l = (aaudio_device_id_t *) ((u8 *) msg->data + offset); ++ /* offset += dev_cnt * sizeof(aaudio_device_id_t); */ ++ return 0; ++} ++ ++#define WRITE_START_OF_TYPE(typev, devid) \ ++ size_t offset = sizeof(struct aaudio_msg_header); (void) offset; \ ++ ((struct aaudio_msg_header *) msg->data)->type = (typev); \ ++ ((struct aaudio_msg_header *) msg->data)->device_id = (devid); ++#define WRITE_START_COMMAND(devid) WRITE_START_OF_TYPE(AAUDIO_MSG_TYPE_COMMAND, devid) ++#define WRITE_START_RESPONSE() WRITE_START_OF_TYPE(AAUDIO_MSG_TYPE_RESPONSE, 0) ++#define WRITE_START_NOTIFICATION() WRITE_START_OF_TYPE(AAUDIO_MSG_TYPE_NOTIFICATION, 0) ++#define WRITE_VAL(type, value) { *((type *) ((u8 *) msg->data + offset)) = value; offset += sizeof(value); } ++#define WRITE_BIN(value, size) { memcpy((u8 *) msg->data + offset, value, size); offset += size; } ++#define WRITE_BASE(type) WRITE_VAL(u32, type) WRITE_VAL(u32, 0) ++#define WRITE_END() { msg->size = offset; } ++ ++void aaudio_msg_write_start_io(struct aaudio_msg *msg, aaudio_device_id_t dev) ++{ ++ WRITE_START_COMMAND(dev); ++ WRITE_BASE(AAUDIO_MSG_START_IO); ++ WRITE_END(); ++} ++ ++void aaudio_msg_write_stop_io(struct aaudio_msg *msg, aaudio_device_id_t dev) ++{ ++ WRITE_START_COMMAND(dev); ++ WRITE_BASE(AAUDIO_MSG_STOP_IO); ++ WRITE_END(); ++} ++ ++void aaudio_msg_write_get_property(struct aaudio_msg *msg, aaudio_device_id_t dev, aaudio_object_id_t obj, ++ struct aaudio_prop_addr prop, void *qualifier, u64 qualifier_size) ++{ ++ WRITE_START_COMMAND(dev); ++ WRITE_BASE(AAUDIO_MSG_GET_PROPERTY); ++ WRITE_VAL(aaudio_object_id_t, obj); ++ WRITE_VAL(u32, prop.element); ++ WRITE_VAL(u32, prop.scope); ++ WRITE_VAL(u32, prop.selector); ++ WRITE_VAL(u64, qualifier_size); ++ WRITE_BIN(qualifier, qualifier_size); ++ WRITE_END(); ++} ++ ++void aaudio_msg_write_set_property(struct aaudio_msg *msg, aaudio_device_id_t dev, aaudio_object_id_t obj, ++ struct aaudio_prop_addr prop, void *data, u64 data_size, void *qualifier, u64 qualifier_size) ++{ ++ WRITE_START_COMMAND(dev); ++ WRITE_BASE(AAUDIO_MSG_SET_PROPERTY); ++ WRITE_VAL(aaudio_object_id_t, obj); ++ WRITE_VAL(u32, prop.element); ++ WRITE_VAL(u32, prop.scope); ++ WRITE_VAL(u32, prop.selector); ++ WRITE_VAL(u64, data_size); ++ WRITE_BIN(data, data_size); ++ WRITE_VAL(u64, qualifier_size); ++ WRITE_BIN(qualifier, qualifier_size); ++ WRITE_END(); ++} ++ ++void aaudio_msg_write_property_listener(struct aaudio_msg *msg, aaudio_device_id_t dev, aaudio_object_id_t obj, ++ struct aaudio_prop_addr prop) ++{ ++ WRITE_START_COMMAND(dev); ++ WRITE_BASE(AAUDIO_MSG_PROPERTY_LISTENER); ++ WRITE_VAL(aaudio_object_id_t, obj); ++ WRITE_VAL(u32, prop.element); ++ WRITE_VAL(u32, prop.scope); ++ WRITE_VAL(u32, prop.selector); ++ WRITE_END(); ++} ++ ++void aaudio_msg_write_set_input_stream_address_ranges(struct aaudio_msg *msg, aaudio_device_id_t devid) ++{ ++ WRITE_START_COMMAND(devid); ++ WRITE_BASE(AAUDIO_MSG_SET_INPUT_STREAM_ADDRESS_RANGES); ++ WRITE_END(); ++} ++ ++void aaudio_msg_write_get_input_stream_list(struct aaudio_msg *msg, aaudio_device_id_t devid) ++{ ++ WRITE_START_COMMAND(devid); ++ WRITE_BASE(AAUDIO_MSG_GET_INPUT_STREAM_LIST); ++ WRITE_END(); ++} ++ ++void aaudio_msg_write_get_output_stream_list(struct aaudio_msg *msg, aaudio_device_id_t devid) ++{ ++ WRITE_START_COMMAND(devid); ++ WRITE_BASE(AAUDIO_MSG_GET_OUTPUT_STREAM_LIST); ++ WRITE_END(); ++} ++ ++void aaudio_msg_write_set_remote_access(struct aaudio_msg *msg, u64 mode) ++{ ++ WRITE_START_COMMAND(0); ++ WRITE_BASE(AAUDIO_MSG_SET_REMOTE_ACCESS); ++ WRITE_VAL(u64, mode); ++ WRITE_END(); ++} ++ ++void aaudio_msg_write_alive_notification(struct aaudio_msg *msg, u32 proto_ver, u32 msg_ver) ++{ ++ WRITE_START_NOTIFICATION(); ++ WRITE_BASE(AAUDIO_MSG_NOTIFICATION_ALIVE); ++ WRITE_VAL(u32, proto_ver); ++ WRITE_VAL(u32, msg_ver); ++ WRITE_END(); ++} ++ ++void aaudio_msg_write_update_timestamp_response(struct aaudio_msg *msg) ++{ ++ WRITE_START_RESPONSE(); ++ WRITE_BASE(AAUDIO_MSG_UPDATE_TIMESTAMP_RESPONSE); ++ WRITE_END(); ++} ++ ++void aaudio_msg_write_get_device_list(struct aaudio_msg *msg) ++{ ++ WRITE_START_COMMAND(0); ++ WRITE_BASE(AAUDIO_MSG_GET_DEVICE_LIST); ++ WRITE_END(); ++} ++ ++#define CMD_SHARED_VARS_NO_REPLY \ ++ int status = 0; \ ++ struct aaudio_send_ctx sctx; ++#define CMD_SHARED_VARS \ ++ CMD_SHARED_VARS_NO_REPLY \ ++ struct aaudio_msg reply = aaudio_reply_alloc(); \ ++ struct aaudio_msg *buf = &reply; ++#define CMD_SEND_REQUEST(fn, ...) \ ++ if ((status = aaudio_send_cmd_sync(a, &sctx, buf, 500, fn, ##__VA_ARGS__))) \ ++ return status; ++#define CMD_DEF_SHARED_AND_SEND(fn, ...) \ ++ CMD_SHARED_VARS \ ++ CMD_SEND_REQUEST(fn, ##__VA_ARGS__); ++#define CMD_DEF_SHARED_NO_REPLY_AND_SEND(fn, ...) \ ++ CMD_SHARED_VARS_NO_REPLY \ ++ CMD_SEND_REQUEST(fn, ##__VA_ARGS__); ++#define CMD_HNDL_REPLY_NO_FREE(fn, ...) \ ++ status = fn(buf, ##__VA_ARGS__); \ ++ return status; ++#define CMD_HNDL_REPLY_AND_FREE(fn, ...) \ ++ status = fn(buf, ##__VA_ARGS__); \ ++ aaudio_reply_free(&reply); \ ++ return status; ++ ++int aaudio_cmd_start_io(struct aaudio_device *a, aaudio_device_id_t devid) ++{ ++ CMD_DEF_SHARED_AND_SEND(aaudio_msg_write_start_io, devid); ++ CMD_HNDL_REPLY_AND_FREE(aaudio_msg_read_start_io_response); ++} ++int aaudio_cmd_stop_io(struct aaudio_device *a, aaudio_device_id_t devid) ++{ ++ CMD_DEF_SHARED_AND_SEND(aaudio_msg_write_stop_io, devid); ++ CMD_HNDL_REPLY_AND_FREE(aaudio_msg_read_stop_io_response); ++} ++int aaudio_cmd_get_property(struct aaudio_device *a, struct aaudio_msg *buf, ++ aaudio_device_id_t devid, aaudio_object_id_t obj, ++ struct aaudio_prop_addr prop, void *qualifier, u64 qualifier_size, void **data, u64 *data_size) ++{ ++ CMD_DEF_SHARED_NO_REPLY_AND_SEND(aaudio_msg_write_get_property, devid, obj, prop, qualifier, qualifier_size); ++ CMD_HNDL_REPLY_NO_FREE(aaudio_msg_read_get_property_response, &obj, &prop, data, data_size); ++} ++int aaudio_cmd_get_primitive_property(struct aaudio_device *a, ++ aaudio_device_id_t devid, aaudio_object_id_t obj, ++ struct aaudio_prop_addr prop, void *qualifier, u64 qualifier_size, void *data, u64 data_size) ++{ ++ int status; ++ struct aaudio_msg reply = aaudio_reply_alloc(); ++ void *r_data; ++ u64 r_data_size; ++ if ((status = aaudio_cmd_get_property(a, &reply, devid, obj, prop, qualifier, qualifier_size, ++ &r_data, &r_data_size))) ++ goto finish; ++ if (r_data_size != data_size) { ++ status = -EINVAL; ++ goto finish; ++ } ++ memcpy(data, r_data, data_size); ++finish: ++ aaudio_reply_free(&reply); ++ return status; ++} ++int aaudio_cmd_set_property(struct aaudio_device *a, aaudio_device_id_t devid, aaudio_object_id_t obj, ++ struct aaudio_prop_addr prop, void *qualifier, u64 qualifier_size, void *data, u64 data_size) ++{ ++ CMD_DEF_SHARED_AND_SEND(aaudio_msg_write_set_property, devid, obj, prop, data, data_size, ++ qualifier, qualifier_size); ++ CMD_HNDL_REPLY_AND_FREE(aaudio_msg_read_set_property_response, &obj); ++} ++int aaudio_cmd_property_listener(struct aaudio_device *a, aaudio_device_id_t devid, aaudio_object_id_t obj, ++ struct aaudio_prop_addr prop) ++{ ++ CMD_DEF_SHARED_AND_SEND(aaudio_msg_write_property_listener, devid, obj, prop); ++ CMD_HNDL_REPLY_AND_FREE(aaudio_msg_read_property_listener_response, &obj, &prop); ++} ++int aaudio_cmd_set_input_stream_address_ranges(struct aaudio_device *a, aaudio_device_id_t devid) ++{ ++ CMD_DEF_SHARED_AND_SEND(aaudio_msg_write_set_input_stream_address_ranges, devid); ++ CMD_HNDL_REPLY_AND_FREE(aaudio_msg_read_set_input_stream_address_ranges_response); ++} ++int aaudio_cmd_get_input_stream_list(struct aaudio_device *a, struct aaudio_msg *buf, aaudio_device_id_t devid, ++ aaudio_object_id_t **str_l, u64 *str_cnt) ++{ ++ CMD_DEF_SHARED_NO_REPLY_AND_SEND(aaudio_msg_write_get_input_stream_list, devid); ++ CMD_HNDL_REPLY_NO_FREE(aaudio_msg_read_get_input_stream_list_response, str_l, str_cnt); ++} ++int aaudio_cmd_get_output_stream_list(struct aaudio_device *a, struct aaudio_msg *buf, aaudio_device_id_t devid, ++ aaudio_object_id_t **str_l, u64 *str_cnt) ++{ ++ CMD_DEF_SHARED_NO_REPLY_AND_SEND(aaudio_msg_write_get_output_stream_list, devid); ++ CMD_HNDL_REPLY_NO_FREE(aaudio_msg_read_get_output_stream_list_response, str_l, str_cnt); ++} ++int aaudio_cmd_set_remote_access(struct aaudio_device *a, u64 mode) ++{ ++ CMD_DEF_SHARED_AND_SEND(aaudio_msg_write_set_remote_access, mode); ++ CMD_HNDL_REPLY_AND_FREE(aaudio_msg_read_set_remote_access_response); ++} ++int aaudio_cmd_get_device_list(struct aaudio_device *a, struct aaudio_msg *buf, ++ aaudio_device_id_t **dev_l, u64 *dev_cnt) ++{ ++ CMD_DEF_SHARED_NO_REPLY_AND_SEND(aaudio_msg_write_get_device_list); ++ CMD_HNDL_REPLY_NO_FREE(aaudio_msg_read_get_device_list_response, dev_l, dev_cnt); ++} +\ No newline at end of file +diff --git a/drivers/staging/apple-bce/audio/protocol.h b/drivers/staging/apple-bce/audio/protocol.h +new file mode 100644 +index 000000000000..111111111111 +--- /dev/null ++++ b/drivers/staging/apple-bce/audio/protocol.h +@@ -0,0 +1,147 @@ ++#ifndef AAUDIO_PROTOCOL_H ++#define AAUDIO_PROTOCOL_H ++ ++#include ++ ++struct aaudio_device; ++ ++typedef u64 aaudio_device_id_t; ++typedef u64 aaudio_object_id_t; ++ ++struct aaudio_msg { ++ void *data; ++ size_t size; ++}; ++ ++struct __attribute__((packed)) aaudio_msg_header { ++ char tag[4]; ++ u8 type; ++ aaudio_device_id_t device_id; // Idk, use zero for commands? ++}; ++struct __attribute__((packed)) aaudio_msg_base { ++ u32 msg; ++ u32 status; ++}; ++ ++struct aaudio_prop_addr { ++ u32 scope; ++ u32 selector; ++ u32 element; ++}; ++#define AAUDIO_PROP(scope, sel, el) (struct aaudio_prop_addr) { scope, sel, el } ++ ++enum { ++ AAUDIO_MSG_TYPE_COMMAND = 1, ++ AAUDIO_MSG_TYPE_RESPONSE = 2, ++ AAUDIO_MSG_TYPE_NOTIFICATION = 3 ++}; ++ ++enum { ++ AAUDIO_MSG_START_IO = 0, ++ AAUDIO_MSG_START_IO_RESPONSE = 1, ++ AAUDIO_MSG_STOP_IO = 2, ++ AAUDIO_MSG_STOP_IO_RESPONSE = 3, ++ AAUDIO_MSG_UPDATE_TIMESTAMP = 4, ++ AAUDIO_MSG_GET_PROPERTY = 7, ++ AAUDIO_MSG_GET_PROPERTY_RESPONSE = 8, ++ AAUDIO_MSG_SET_PROPERTY = 9, ++ AAUDIO_MSG_SET_PROPERTY_RESPONSE = 10, ++ AAUDIO_MSG_PROPERTY_LISTENER = 11, ++ AAUDIO_MSG_PROPERTY_LISTENER_RESPONSE = 12, ++ AAUDIO_MSG_PROPERTY_CHANGED = 13, ++ AAUDIO_MSG_SET_INPUT_STREAM_ADDRESS_RANGES = 18, ++ AAUDIO_MSG_SET_INPUT_STREAM_ADDRESS_RANGES_RESPONSE = 19, ++ AAUDIO_MSG_GET_INPUT_STREAM_LIST = 24, ++ AAUDIO_MSG_GET_INPUT_STREAM_LIST_RESPONSE = 25, ++ AAUDIO_MSG_GET_OUTPUT_STREAM_LIST = 26, ++ AAUDIO_MSG_GET_OUTPUT_STREAM_LIST_RESPONSE = 27, ++ AAUDIO_MSG_SET_REMOTE_ACCESS = 32, ++ AAUDIO_MSG_SET_REMOTE_ACCESS_RESPONSE = 33, ++ AAUDIO_MSG_UPDATE_TIMESTAMP_RESPONSE = 34, ++ ++ AAUDIO_MSG_NOTIFICATION_ALIVE = 100, ++ AAUDIO_MSG_GET_DEVICE_LIST = 101, ++ AAUDIO_MSG_GET_DEVICE_LIST_RESPONSE = 102, ++ AAUDIO_MSG_NOTIFICATION_BOOT = 104 ++}; ++ ++enum { ++ AAUDIO_REMOTE_ACCESS_OFF = 0, ++ AAUDIO_REMOTE_ACCESS_ON = 2 ++}; ++ ++enum { ++ AAUDIO_PROP_SCOPE_GLOBAL = 0x676c6f62, // 'glob' ++ AAUDIO_PROP_SCOPE_INPUT = 0x696e7074, // 'inpt' ++ AAUDIO_PROP_SCOPE_OUTPUT = 0x6f757470 // 'outp' ++}; ++ ++enum { ++ AAUDIO_PROP_UID = 0x75696420, // 'uid ' ++ AAUDIO_PROP_BOOL_VALUE = 0x6263766c, // 'bcvl' ++ AAUDIO_PROP_JACK_PLUGGED = 0x6a61636b, // 'jack' ++ AAUDIO_PROP_SEL_VOLUME = 0x64656176, // 'deav' ++ AAUDIO_PROP_LATENCY = 0x6c746e63, // 'ltnc' ++ AAUDIO_PROP_PHYS_FORMAT = 0x70667420 // 'pft ' ++}; ++ ++int aaudio_msg_read_base(struct aaudio_msg *msg, struct aaudio_msg_base *base); ++ ++int aaudio_msg_read_start_io_response(struct aaudio_msg *msg); ++int aaudio_msg_read_stop_io_response(struct aaudio_msg *msg); ++int aaudio_msg_read_update_timestamp(struct aaudio_msg *msg, aaudio_device_id_t *devid, ++ u64 *timestamp, u64 *update_seed); ++int aaudio_msg_read_get_property_response(struct aaudio_msg *msg, aaudio_object_id_t *obj, ++ struct aaudio_prop_addr *prop, void **data, u64 *data_size); ++int aaudio_msg_read_set_property_response(struct aaudio_msg *msg, aaudio_object_id_t *obj); ++int aaudio_msg_read_property_listener_response(struct aaudio_msg *msg,aaudio_object_id_t *obj, ++ struct aaudio_prop_addr *prop); ++int aaudio_msg_read_property_changed(struct aaudio_msg *msg, aaudio_device_id_t *devid, aaudio_object_id_t *obj, ++ struct aaudio_prop_addr *prop); ++int aaudio_msg_read_set_input_stream_address_ranges_response(struct aaudio_msg *msg); ++int aaudio_msg_read_get_input_stream_list_response(struct aaudio_msg *msg, aaudio_object_id_t **str_l, u64 *str_cnt); ++int aaudio_msg_read_get_output_stream_list_response(struct aaudio_msg *msg, aaudio_object_id_t **str_l, u64 *str_cnt); ++int aaudio_msg_read_set_remote_access_response(struct aaudio_msg *msg); ++int aaudio_msg_read_get_device_list_response(struct aaudio_msg *msg, aaudio_device_id_t **dev_l, u64 *dev_cnt); ++ ++void aaudio_msg_write_start_io(struct aaudio_msg *msg, aaudio_device_id_t dev); ++void aaudio_msg_write_stop_io(struct aaudio_msg *msg, aaudio_device_id_t dev); ++void aaudio_msg_write_get_property(struct aaudio_msg *msg, aaudio_device_id_t dev, aaudio_object_id_t obj, ++ struct aaudio_prop_addr prop, void *qualifier, u64 qualifier_size); ++void aaudio_msg_write_set_property(struct aaudio_msg *msg, aaudio_device_id_t dev, aaudio_object_id_t obj, ++ struct aaudio_prop_addr prop, void *data, u64 data_size, void *qualifier, u64 qualifier_size); ++void aaudio_msg_write_property_listener(struct aaudio_msg *msg, aaudio_device_id_t dev, aaudio_object_id_t obj, ++ struct aaudio_prop_addr prop); ++void aaudio_msg_write_set_input_stream_address_ranges(struct aaudio_msg *msg, aaudio_device_id_t devid); ++void aaudio_msg_write_get_input_stream_list(struct aaudio_msg *msg, aaudio_device_id_t devid); ++void aaudio_msg_write_get_output_stream_list(struct aaudio_msg *msg, aaudio_device_id_t devid); ++void aaudio_msg_write_set_remote_access(struct aaudio_msg *msg, u64 mode); ++void aaudio_msg_write_alive_notification(struct aaudio_msg *msg, u32 proto_ver, u32 msg_ver); ++void aaudio_msg_write_update_timestamp_response(struct aaudio_msg *msg); ++void aaudio_msg_write_get_device_list(struct aaudio_msg *msg); ++ ++ ++int aaudio_cmd_start_io(struct aaudio_device *a, aaudio_device_id_t devid); ++int aaudio_cmd_stop_io(struct aaudio_device *a, aaudio_device_id_t devid); ++int aaudio_cmd_get_property(struct aaudio_device *a, struct aaudio_msg *buf, ++ aaudio_device_id_t devid, aaudio_object_id_t obj, ++ struct aaudio_prop_addr prop, void *qualifier, u64 qualifier_size, void **data, u64 *data_size); ++int aaudio_cmd_get_primitive_property(struct aaudio_device *a, ++ aaudio_device_id_t devid, aaudio_object_id_t obj, ++ struct aaudio_prop_addr prop, void *qualifier, u64 qualifier_size, void *data, u64 data_size); ++int aaudio_cmd_set_property(struct aaudio_device *a, aaudio_device_id_t devid, aaudio_object_id_t obj, ++ struct aaudio_prop_addr prop, void *qualifier, u64 qualifier_size, void *data, u64 data_size); ++int aaudio_cmd_property_listener(struct aaudio_device *a, aaudio_device_id_t devid, aaudio_object_id_t obj, ++ struct aaudio_prop_addr prop); ++int aaudio_cmd_set_input_stream_address_ranges(struct aaudio_device *a, aaudio_device_id_t devid); ++int aaudio_cmd_get_input_stream_list(struct aaudio_device *a, struct aaudio_msg *buf, aaudio_device_id_t devid, ++ aaudio_object_id_t **str_l, u64 *str_cnt); ++int aaudio_cmd_get_output_stream_list(struct aaudio_device *a, struct aaudio_msg *buf, aaudio_device_id_t devid, ++ aaudio_object_id_t **str_l, u64 *str_cnt); ++int aaudio_cmd_set_remote_access(struct aaudio_device *a, u64 mode); ++int aaudio_cmd_get_device_list(struct aaudio_device *a, struct aaudio_msg *buf, ++ aaudio_device_id_t **dev_l, u64 *dev_cnt); ++ ++ ++ ++#endif //AAUDIO_PROTOCOL_H +diff --git a/drivers/staging/apple-bce/audio/protocol_bce.c b/drivers/staging/apple-bce/audio/protocol_bce.c +new file mode 100644 +index 000000000000..111111111111 +--- /dev/null ++++ b/drivers/staging/apple-bce/audio/protocol_bce.c +@@ -0,0 +1,226 @@ ++#include "protocol_bce.h" ++ ++#include "audio.h" ++ ++static void aaudio_bce_out_queue_completion(struct bce_queue_sq *sq); ++static void aaudio_bce_in_queue_completion(struct bce_queue_sq *sq); ++static int aaudio_bce_queue_init(struct aaudio_device *dev, struct aaudio_bce_queue *q, const char *name, int direction, ++ bce_sq_completion cfn); ++void aaudio_bce_in_queue_submit_pending(struct aaudio_bce_queue *q, size_t count); ++ ++int aaudio_bce_init(struct aaudio_device *dev) ++{ ++ int status; ++ struct aaudio_bce *bce = &dev->bcem; ++ bce->cq = bce_create_cq(dev->bce, 0x80); ++ spin_lock_init(&bce->spinlock); ++ if (!bce->cq) ++ return -EINVAL; ++ if ((status = aaudio_bce_queue_init(dev, &bce->qout, "com.apple.BridgeAudio.IntelToARM", DMA_TO_DEVICE, ++ aaudio_bce_out_queue_completion))) { ++ return status; ++ } ++ if ((status = aaudio_bce_queue_init(dev, &bce->qin, "com.apple.BridgeAudio.ARMToIntel", DMA_FROM_DEVICE, ++ aaudio_bce_in_queue_completion))) { ++ return status; ++ } ++ aaudio_bce_in_queue_submit_pending(&bce->qin, bce->qin.el_count); ++ return 0; ++} ++ ++int aaudio_bce_queue_init(struct aaudio_device *dev, struct aaudio_bce_queue *q, const char *name, int direction, ++ bce_sq_completion cfn) ++{ ++ q->cq = dev->bcem.cq; ++ q->el_size = AAUDIO_BCE_QUEUE_ELEMENT_SIZE; ++ q->el_count = AAUDIO_BCE_QUEUE_ELEMENT_COUNT; ++ /* NOTE: The Apple impl uses 0x80 as the queue size, however we use 21 (in fact 20) to simplify the impl */ ++ q->sq = bce_create_sq(dev->bce, q->cq, name, (u32) (q->el_count + 1), direction, cfn, dev); ++ if (!q->sq) ++ return -EINVAL; ++ ++ q->data = dma_alloc_coherent(&dev->bce->pci->dev, q->el_size * q->el_count, &q->dma_addr, GFP_KERNEL); ++ if (!q->data) { ++ bce_destroy_sq(dev->bce, q->sq); ++ return -EINVAL; ++ } ++ return 0; ++} ++ ++static void aaudio_send_create_tag(struct aaudio_bce *b, int *tagn, char tag[4]) ++{ ++ char tag_zero[5]; ++ b->tag_num = (b->tag_num + 1) % AAUDIO_BCE_QUEUE_TAG_COUNT; ++ *tagn = b->tag_num; ++ snprintf(tag_zero, 5, "S%03d", b->tag_num); ++ *((u32 *) tag) = *((u32 *) tag_zero); ++} ++ ++int __aaudio_send_prepare(struct aaudio_bce *b, struct aaudio_send_ctx *ctx, char *tag) ++{ ++ int status; ++ size_t index; ++ void *dptr; ++ struct aaudio_msg_header *header; ++ if ((status = bce_reserve_submission(b->qout.sq, &ctx->timeout))) ++ return status; ++ spin_lock_irqsave(&b->spinlock, ctx->irq_flags); ++ index = b->qout.data_tail; ++ dptr = (u8 *) b->qout.data + index * b->qout.el_size; ++ ctx->msg.data = dptr; ++ header = dptr; ++ if (tag) ++ *((u32 *) header->tag) = *((u32 *) tag); ++ else ++ aaudio_send_create_tag(b, &ctx->tag_n, header->tag); ++ return 0; ++} ++ ++void __aaudio_send(struct aaudio_bce *b, struct aaudio_send_ctx *ctx) ++{ ++ struct bce_qe_submission *s = bce_next_submission(b->qout.sq); ++#ifdef DEBUG ++ pr_debug("aaudio: Sending command data\n"); ++ print_hex_dump(KERN_DEBUG, "aaudio:OUT ", DUMP_PREFIX_NONE, 32, 1, ctx->msg.data, ctx->msg.size, true); ++#endif ++ bce_set_submission_single(s, b->qout.dma_addr + (dma_addr_t) (ctx->msg.data - b->qout.data), ctx->msg.size); ++ bce_submit_to_device(b->qout.sq); ++ b->qout.data_tail = (b->qout.data_tail + 1) % b->qout.el_count; ++ spin_unlock_irqrestore(&b->spinlock, ctx->irq_flags); ++} ++ ++int __aaudio_send_cmd_sync(struct aaudio_bce *b, struct aaudio_send_ctx *ctx, struct aaudio_msg *reply) ++{ ++ struct aaudio_bce_queue_entry ent; ++ DECLARE_COMPLETION_ONSTACK(cmpl); ++ ent.msg = reply; ++ ent.cmpl = &cmpl; ++ b->pending_entries[ctx->tag_n] = &ent; ++ __aaudio_send(b, ctx); /* unlocks the spinlock */ ++ ctx->timeout = wait_for_completion_timeout(&cmpl, ctx->timeout); ++ if (ctx->timeout == 0) { ++ /* Remove the pending queue entry; this will be normally handled by the completion route but ++ * during a timeout it won't */ ++ spin_lock_irqsave(&b->spinlock, ctx->irq_flags); ++ if (b->pending_entries[ctx->tag_n] == &ent) ++ b->pending_entries[ctx->tag_n] = NULL; ++ spin_unlock_irqrestore(&b->spinlock, ctx->irq_flags); ++ return -ETIMEDOUT; ++ } ++ return 0; ++} ++ ++static void aaudio_handle_reply(struct aaudio_bce *b, struct aaudio_msg *reply) ++{ ++ const char *tag; ++ int tagn; ++ unsigned long irq_flags; ++ char tag_zero[5]; ++ struct aaudio_bce_queue_entry *entry; ++ ++ tag = ((struct aaudio_msg_header *) reply->data)->tag; ++ if (tag[0] != 'S') { ++ pr_err("aaudio_handle_reply: Unexpected tag: %.4s\n", tag); ++ return; ++ } ++ *((u32 *) tag_zero) = *((u32 *) tag); ++ tag_zero[4] = 0; ++ if (kstrtoint(&tag_zero[1], 10, &tagn)) { ++ pr_err("aaudio_handle_reply: Tag parse failed: %.4s\n", tag); ++ return; ++ } ++ ++ spin_lock_irqsave(&b->spinlock, irq_flags); ++ entry = b->pending_entries[tagn]; ++ if (entry) { ++ if (reply->size < entry->msg->size) ++ entry->msg->size = reply->size; ++ memcpy(entry->msg->data, reply->data, entry->msg->size); ++ complete(entry->cmpl); ++ ++ b->pending_entries[tagn] = NULL; ++ } else { ++ pr_err("aaudio_handle_reply: No queued item found for tag: %.4s\n", tag); ++ } ++ spin_unlock_irqrestore(&b->spinlock, irq_flags); ++} ++ ++static void aaudio_bce_out_queue_completion(struct bce_queue_sq *sq) ++{ ++ while (bce_next_completion(sq)) { ++ //pr_info("aaudio: Send confirmed\n"); ++ bce_notify_submission_complete(sq); ++ } ++} ++ ++static void aaudio_bce_in_queue_handle_msg(struct aaudio_device *a, struct aaudio_msg *msg); ++ ++static void aaudio_bce_in_queue_completion(struct bce_queue_sq *sq) ++{ ++ struct aaudio_msg msg; ++ struct aaudio_device *dev = sq->userdata; ++ struct aaudio_bce_queue *q = &dev->bcem.qin; ++ struct bce_sq_completion_data *c; ++ size_t cnt = 0; ++ ++ mb(); ++ while ((c = bce_next_completion(sq))) { ++ msg.data = (u8 *) q->data + q->data_head * q->el_size; ++ msg.size = c->data_size; ++#ifdef DEBUG ++ pr_debug("aaudio: Received command data %llx\n", c->data_size); ++ print_hex_dump(KERN_DEBUG, "aaudio:IN ", DUMP_PREFIX_NONE, 32, 1, msg.data, min(msg.size, 128UL), true); ++#endif ++ aaudio_bce_in_queue_handle_msg(dev, &msg); ++ ++ q->data_head = (q->data_head + 1) % q->el_count; ++ ++ bce_notify_submission_complete(sq); ++ ++cnt; ++ } ++ aaudio_bce_in_queue_submit_pending(q, cnt); ++} ++ ++static void aaudio_bce_in_queue_handle_msg(struct aaudio_device *a, struct aaudio_msg *msg) ++{ ++ struct aaudio_msg_header *header = (struct aaudio_msg_header *) msg->data; ++ if (msg->size < sizeof(struct aaudio_msg_header)) { ++ pr_err("aaudio: Msg size smaller than header (%lx)", msg->size); ++ return; ++ } ++ if (header->type == AAUDIO_MSG_TYPE_RESPONSE) { ++ aaudio_handle_reply(&a->bcem, msg); ++ } else if (header->type == AAUDIO_MSG_TYPE_COMMAND) { ++ aaudio_handle_command(a, msg); ++ } else if (header->type == AAUDIO_MSG_TYPE_NOTIFICATION) { ++ aaudio_handle_notification(a, msg); ++ } ++} ++ ++void aaudio_bce_in_queue_submit_pending(struct aaudio_bce_queue *q, size_t count) ++{ ++ struct bce_qe_submission *s; ++ while (count--) { ++ if (bce_reserve_submission(q->sq, NULL)) { ++ pr_err("aaudio: Failed to reserve an event queue submission\n"); ++ break; ++ } ++ s = bce_next_submission(q->sq); ++ bce_set_submission_single(s, q->dma_addr + (dma_addr_t) (q->data_tail * q->el_size), q->el_size); ++ q->data_tail = (q->data_tail + 1) % q->el_count; ++ } ++ bce_submit_to_device(q->sq); ++} ++ ++struct aaudio_msg aaudio_reply_alloc(void) ++{ ++ struct aaudio_msg ret; ++ ret.size = AAUDIO_BCE_QUEUE_ELEMENT_SIZE; ++ ret.data = kmalloc(ret.size, GFP_KERNEL); ++ return ret; ++} ++ ++void aaudio_reply_free(struct aaudio_msg *reply) ++{ ++ kfree(reply->data); ++} +diff --git a/drivers/staging/apple-bce/audio/protocol_bce.h b/drivers/staging/apple-bce/audio/protocol_bce.h +new file mode 100644 +index 000000000000..111111111111 +--- /dev/null ++++ b/drivers/staging/apple-bce/audio/protocol_bce.h +@@ -0,0 +1,72 @@ ++#ifndef AAUDIO_PROTOCOL_BCE_H ++#define AAUDIO_PROTOCOL_BCE_H ++ ++#include "protocol.h" ++#include "../queue.h" ++ ++#define AAUDIO_BCE_QUEUE_ELEMENT_SIZE 0x1000 ++#define AAUDIO_BCE_QUEUE_ELEMENT_COUNT 20 ++ ++#define AAUDIO_BCE_QUEUE_TAG_COUNT 1000 ++ ++struct aaudio_device; ++ ++struct aaudio_bce_queue_entry { ++ struct aaudio_msg *msg; ++ struct completion *cmpl; ++}; ++struct aaudio_bce_queue { ++ struct bce_queue_cq *cq; ++ struct bce_queue_sq *sq; ++ void *data; ++ dma_addr_t dma_addr; ++ size_t data_head, data_tail; ++ size_t el_size, el_count; ++}; ++struct aaudio_bce { ++ struct bce_queue_cq *cq; ++ struct aaudio_bce_queue qin; ++ struct aaudio_bce_queue qout; ++ int tag_num; ++ struct aaudio_bce_queue_entry *pending_entries[AAUDIO_BCE_QUEUE_TAG_COUNT]; ++ struct spinlock spinlock; ++}; ++ ++struct aaudio_send_ctx { ++ int status; ++ int tag_n; ++ unsigned long irq_flags; ++ struct aaudio_msg msg; ++ unsigned long timeout; ++}; ++ ++int aaudio_bce_init(struct aaudio_device *dev); ++int __aaudio_send_prepare(struct aaudio_bce *b, struct aaudio_send_ctx *ctx, char *tag); ++void __aaudio_send(struct aaudio_bce *b, struct aaudio_send_ctx *ctx); ++int __aaudio_send_cmd_sync(struct aaudio_bce *b, struct aaudio_send_ctx *ctx, struct aaudio_msg *reply); ++ ++#define aaudio_send_with_tag(a, ctx, tag, tout, fn, ...) ({ \ ++ (ctx)->timeout = msecs_to_jiffies(tout); \ ++ (ctx)->status = __aaudio_send_prepare(&(a)->bcem, (ctx), (tag)); \ ++ if (!(ctx)->status) { \ ++ fn(&(ctx)->msg, ##__VA_ARGS__); \ ++ __aaudio_send(&(a)->bcem, (ctx)); \ ++ } \ ++ (ctx)->status; \ ++}) ++#define aaudio_send(a, ctx, tout, fn, ...) aaudio_send_with_tag(a, ctx, NULL, tout, fn, ##__VA_ARGS__) ++ ++#define aaudio_send_cmd_sync(a, ctx, reply, tout, fn, ...) ({ \ ++ (ctx)->timeout = msecs_to_jiffies(tout); \ ++ (ctx)->status = __aaudio_send_prepare(&(a)->bcem, (ctx), NULL); \ ++ if (!(ctx)->status) { \ ++ fn(&(ctx)->msg, ##__VA_ARGS__); \ ++ (ctx)->status = __aaudio_send_cmd_sync(&(a)->bcem, (ctx), (reply)); \ ++ } \ ++ (ctx)->status; \ ++}) ++ ++struct aaudio_msg aaudio_reply_alloc(void); ++void aaudio_reply_free(struct aaudio_msg *reply); ++ ++#endif //AAUDIO_PROTOCOL_BCE_H +diff --git a/drivers/staging/apple-bce/mailbox.c b/drivers/staging/apple-bce/mailbox.c +new file mode 100644 +index 000000000000..111111111111 +--- /dev/null ++++ b/drivers/staging/apple-bce/mailbox.c +@@ -0,0 +1,155 @@ ++#include "mailbox.h" ++#include ++#include "apple_bce.h" ++#include ++ ++#define REG_MBOX_OUT_BASE 0x820 ++#define REG_MBOX_REPLY_COUNTER 0x108 ++#define REG_MBOX_REPLY_BASE 0x810 ++#define REG_TIMESTAMP_BASE 0xC000 ++ ++#define BCE_MBOX_TIMEOUT_MS 200 ++ ++void bce_mailbox_init(struct bce_mailbox *mb, void __iomem *reg_mb) ++{ ++ mb->reg_mb = reg_mb; ++ init_completion(&mb->mb_completion); ++} ++ ++int bce_mailbox_send(struct bce_mailbox *mb, u64 msg, u64* recv) ++{ ++ u32 __iomem *regb; ++ ++ if (atomic_cmpxchg(&mb->mb_status, 0, 1) != 0) { ++ return -EEXIST; // We don't support two messages at once ++ } ++ reinit_completion(&mb->mb_completion); ++ ++ pr_debug("bce_mailbox_send: %llx\n", msg); ++ regb = (u32*) ((u8*) mb->reg_mb + REG_MBOX_OUT_BASE); ++ iowrite32((u32) msg, regb); ++ iowrite32((u32) (msg >> 32), regb + 1); ++ iowrite32(0, regb + 2); ++ iowrite32(0, regb + 3); ++ ++ wait_for_completion_timeout(&mb->mb_completion, msecs_to_jiffies(BCE_MBOX_TIMEOUT_MS)); ++ if (atomic_read(&mb->mb_status) != 2) { // Didn't get the reply ++ atomic_set(&mb->mb_status, 0); ++ return -ETIMEDOUT; ++ } ++ ++ *recv = mb->mb_result; ++ pr_debug("bce_mailbox_send: reply %llx\n", *recv); ++ ++ atomic_set(&mb->mb_status, 0); ++ return 0; ++} ++ ++static int bce_mailbox_retrive_response(struct bce_mailbox *mb) ++{ ++ u32 __iomem *regb; ++ u32 lo, hi; ++ int count, counter; ++ u32 res = ioread32((u8*) mb->reg_mb + REG_MBOX_REPLY_COUNTER); ++ count = (res >> 20) & 0xf; ++ counter = count; ++ pr_debug("bce_mailbox_retrive_response count=%i\n", count); ++ while (counter--) { ++ regb = (u32*) ((u8*) mb->reg_mb + REG_MBOX_REPLY_BASE); ++ lo = ioread32(regb); ++ hi = ioread32(regb + 1); ++ ioread32(regb + 2); ++ ioread32(regb + 3); ++ pr_debug("bce_mailbox_retrive_response %llx\n", ((u64) hi << 32) | lo); ++ mb->mb_result = ((u64) hi << 32) | lo; ++ } ++ return count > 0 ? 0 : -ENODATA; ++} ++ ++int bce_mailbox_handle_interrupt(struct bce_mailbox *mb) ++{ ++ int status = bce_mailbox_retrive_response(mb); ++ if (!status) { ++ atomic_set(&mb->mb_status, 2); ++ complete(&mb->mb_completion); ++ } ++ return status; ++} ++ ++static void bc_send_timestamp(struct timer_list *tl); ++ ++void bce_timestamp_init(struct bce_timestamp *ts, void __iomem *reg) ++{ ++ u32 __iomem *regb; ++ ++ spin_lock_init(&ts->stop_sl); ++ ts->stopped = false; ++ ++ ts->reg = reg; ++ ++ regb = (u32*) ((u8*) ts->reg + REG_TIMESTAMP_BASE); ++ ++ ioread32(regb); ++ mb(); ++ ++ timer_setup(&ts->timer, bc_send_timestamp, 0); ++} ++ ++void bce_timestamp_start(struct bce_timestamp *ts, bool is_initial) ++{ ++ unsigned long flags; ++ u32 __iomem *regb = (u32*) ((u8*) ts->reg + REG_TIMESTAMP_BASE); ++ ++ if (is_initial) { ++ iowrite32((u32) -4, regb + 2); ++ iowrite32((u32) -1, regb); ++ } else { ++ iowrite32((u32) -3, regb + 2); ++ iowrite32((u32) -1, regb); ++ } ++ ++ spin_lock_irqsave(&ts->stop_sl, flags); ++ ts->stopped = false; ++ spin_unlock_irqrestore(&ts->stop_sl, flags); ++ mod_timer(&ts->timer, jiffies + msecs_to_jiffies(150)); ++} ++ ++void bce_timestamp_stop(struct bce_timestamp *ts) ++{ ++ unsigned long flags; ++ u32 __iomem *regb = (u32*) ((u8*) ts->reg + REG_TIMESTAMP_BASE); ++ ++ spin_lock_irqsave(&ts->stop_sl, flags); ++ ts->stopped = true; ++ spin_unlock_irqrestore(&ts->stop_sl, flags); ++#if LINUX_VERSION_CODE < KERNEL_VERSION(6,15,0) ++ del_timer_sync(&ts->timer); ++#else ++ timer_delete_sync(&ts->timer); ++#endif ++ iowrite32((u32) -2, regb + 2); ++ iowrite32((u32) -1, regb); ++} ++ ++static void bc_send_timestamp(struct timer_list *tl) ++{ ++ struct bce_timestamp *ts; ++ unsigned long flags; ++ u32 __iomem *regb; ++ ktime_t bt; ++ ++ ts = container_of(tl, struct bce_timestamp, timer); ++ regb = (u32*) ((u8*) ts->reg + REG_TIMESTAMP_BASE); ++ local_irq_save(flags); ++ ioread32(regb + 2); ++ mb(); ++ bt = ktime_get_boottime(); ++ iowrite32((u32) bt, regb + 2); ++ iowrite32((u32) (bt >> 32), regb); ++ ++ spin_lock(&ts->stop_sl); ++ if (!ts->stopped) ++ mod_timer(&ts->timer, jiffies + msecs_to_jiffies(150)); ++ spin_unlock(&ts->stop_sl); ++ local_irq_restore(flags); ++} +diff --git a/drivers/staging/apple-bce/mailbox.h b/drivers/staging/apple-bce/mailbox.h +new file mode 100644 +index 000000000000..111111111111 +--- /dev/null ++++ b/drivers/staging/apple-bce/mailbox.h +@@ -0,0 +1,53 @@ ++#ifndef BCE_MAILBOX_H ++#define BCE_MAILBOX_H ++ ++#include ++#include ++#include ++ ++struct bce_mailbox { ++ void __iomem *reg_mb; ++ ++ atomic_t mb_status; // possible statuses: 0 (no msg), 1 (has active msg), 2 (got reply) ++ struct completion mb_completion; ++ uint64_t mb_result; ++}; ++ ++enum bce_message_type { ++ BCE_MB_REGISTER_COMMAND_SQ = 0x7, // to-device ++ BCE_MB_REGISTER_COMMAND_CQ = 0x8, // to-device ++ BCE_MB_REGISTER_COMMAND_QUEUE_REPLY = 0xB, // to-host ++ BCE_MB_SET_FW_PROTOCOL_VERSION = 0xC, // both ++ BCE_MB_SLEEP_NO_STATE = 0x14, // to-device ++ BCE_MB_RESTORE_NO_STATE = 0x15, // to-device ++ BCE_MB_SAVE_STATE_AND_SLEEP = 0x17, // to-device ++ BCE_MB_RESTORE_STATE_AND_WAKE = 0x18, // to-device ++ BCE_MB_SAVE_STATE_AND_SLEEP_FAILURE = 0x19, // from-device ++ BCE_MB_SAVE_RESTORE_STATE_COMPLETE = 0x1A, // from-device ++}; ++ ++#define BCE_MB_MSG(type, value) (((u64) (type) << 58) | ((value) & 0x3FFFFFFFFFFFFFFLL)) ++#define BCE_MB_TYPE(v) ((u32) (v >> 58)) ++#define BCE_MB_VALUE(v) (v & 0x3FFFFFFFFFFFFFFLL) ++ ++void bce_mailbox_init(struct bce_mailbox *mb, void __iomem *reg_mb); ++ ++int bce_mailbox_send(struct bce_mailbox *mb, u64 msg, u64* recv); ++ ++int bce_mailbox_handle_interrupt(struct bce_mailbox *mb); ++ ++ ++struct bce_timestamp { ++ void __iomem *reg; ++ struct timer_list timer; ++ struct spinlock stop_sl; ++ bool stopped; ++}; ++ ++void bce_timestamp_init(struct bce_timestamp *ts, void __iomem *reg); ++ ++void bce_timestamp_start(struct bce_timestamp *ts, bool is_initial); ++ ++void bce_timestamp_stop(struct bce_timestamp *ts); ++ ++#endif //BCEDRIVER_MAILBOX_H +diff --git a/drivers/staging/apple-bce/queue.c b/drivers/staging/apple-bce/queue.c +new file mode 100644 +index 000000000000..111111111111 +--- /dev/null ++++ b/drivers/staging/apple-bce/queue.c +@@ -0,0 +1,415 @@ ++#include "queue.h" ++#include "apple_bce.h" ++#include ++ ++#define REG_DOORBELL_BASE 0x44000 ++ ++struct bce_queue_cq *bce_alloc_cq(struct apple_bce_device *dev, int qid, u32 el_count) ++{ ++ struct bce_queue_cq *q; ++ q = kzalloc(sizeof(struct bce_queue_cq), GFP_KERNEL); ++ q->qid = qid; ++ q->type = BCE_QUEUE_CQ; ++ q->el_count = el_count; ++ q->data = dma_alloc_coherent(&dev->pci->dev, el_count * sizeof(struct bce_qe_completion), ++ &q->dma_handle, GFP_KERNEL); ++ if (!q->data) { ++ pr_err("DMA queue memory alloc failed\n"); ++ kfree(q); ++ return NULL; ++ } ++ return q; ++} ++ ++void bce_get_cq_memcfg(struct bce_queue_cq *cq, struct bce_queue_memcfg *cfg) ++{ ++ cfg->qid = (u16) cq->qid; ++ cfg->el_count = (u16) cq->el_count; ++ cfg->vector_or_cq = 0; ++ cfg->_pad = 0; ++ cfg->addr = cq->dma_handle; ++ cfg->length = cq->el_count * sizeof(struct bce_qe_completion); ++} ++ ++void bce_free_cq(struct apple_bce_device *dev, struct bce_queue_cq *cq) ++{ ++ dma_free_coherent(&dev->pci->dev, cq->el_count * sizeof(struct bce_qe_completion), cq->data, cq->dma_handle); ++ kfree(cq); ++} ++ ++static void bce_handle_cq_completion(struct apple_bce_device *dev, struct bce_qe_completion *e, size_t *ce) ++{ ++ struct bce_queue *target; ++ struct bce_queue_sq *target_sq; ++ struct bce_sq_completion_data *cmpl; ++ if (e->qid >= BCE_MAX_QUEUE_COUNT) { ++ pr_err("Device sent a response for qid (%u) >= BCE_MAX_QUEUE_COUNT\n", e->qid); ++ return; ++ } ++ target = dev->queues[e->qid]; ++ if (!target || target->type != BCE_QUEUE_SQ) { ++ pr_err("Device sent a response for qid (%u), which does not exist\n", e->qid); ++ return; ++ } ++ target_sq = (struct bce_queue_sq *) target; ++ if (target_sq->completion_tail != e->completion_index) { ++ pr_err("Completion index mismatch; this is likely going to make this driver unusable\n"); ++ return; ++ } ++ if (!target_sq->has_pending_completions) { ++ target_sq->has_pending_completions = true; ++ dev->int_sq_list[(*ce)++] = target_sq; ++ } ++ cmpl = &target_sq->completion_data[e->completion_index]; ++ cmpl->status = e->status; ++ cmpl->data_size = e->data_size; ++ cmpl->result = e->result; ++ wmb(); ++ target_sq->completion_tail = (target_sq->completion_tail + 1) % target_sq->el_count; ++} ++ ++void bce_handle_cq_completions(struct apple_bce_device *dev, struct bce_queue_cq *cq) ++{ ++ size_t ce = 0; ++ struct bce_qe_completion *e; ++ struct bce_queue_sq *sq; ++ e = bce_cq_element(cq, cq->index); ++ if (!(e->flags & BCE_COMPLETION_FLAG_PENDING)) ++ return; ++ mb(); ++ while (true) { ++ e = bce_cq_element(cq, cq->index); ++ if (!(e->flags & BCE_COMPLETION_FLAG_PENDING)) ++ break; ++ // pr_info("apple-bce: compl: %i: %i %llx %llx", e->qid, e->status, e->data_size, e->result); ++ bce_handle_cq_completion(dev, e, &ce); ++ e->flags = 0; ++ cq->index = (cq->index + 1) % cq->el_count; ++ } ++ mb(); ++ iowrite32(cq->index, (u32 *) ((u8 *) dev->reg_mem_dma + REG_DOORBELL_BASE) + cq->qid); ++ while (ce) { ++ --ce; ++ sq = dev->int_sq_list[ce]; ++ sq->completion(sq); ++ sq->has_pending_completions = false; ++ } ++} ++ ++ ++struct bce_queue_sq *bce_alloc_sq(struct apple_bce_device *dev, int qid, u32 el_size, u32 el_count, ++ bce_sq_completion compl, void *userdata) ++{ ++ struct bce_queue_sq *q; ++ q = kzalloc(sizeof(struct bce_queue_sq), GFP_KERNEL); ++ q->qid = qid; ++ q->type = BCE_QUEUE_SQ; ++ q->el_size = el_size; ++ q->el_count = el_count; ++ q->data = dma_alloc_coherent(&dev->pci->dev, el_count * el_size, ++ &q->dma_handle, GFP_KERNEL); ++ q->completion = compl; ++ q->userdata = userdata; ++ q->completion_data = kzalloc(sizeof(struct bce_sq_completion_data) * el_count, GFP_KERNEL); ++ q->reg_mem_dma = dev->reg_mem_dma; ++ atomic_set(&q->available_commands, el_count - 1); ++ init_completion(&q->available_command_completion); ++ atomic_set(&q->available_command_completion_waiting_count, 0); ++ if (!q->data) { ++ pr_err("DMA queue memory alloc failed\n"); ++ kfree(q); ++ return NULL; ++ } ++ return q; ++} ++ ++void bce_get_sq_memcfg(struct bce_queue_sq *sq, struct bce_queue_cq *cq, struct bce_queue_memcfg *cfg) ++{ ++ cfg->qid = (u16) sq->qid; ++ cfg->el_count = (u16) sq->el_count; ++ cfg->vector_or_cq = (u16) cq->qid; ++ cfg->_pad = 0; ++ cfg->addr = sq->dma_handle; ++ cfg->length = sq->el_count * sq->el_size; ++} ++ ++void bce_free_sq(struct apple_bce_device *dev, struct bce_queue_sq *sq) ++{ ++ dma_free_coherent(&dev->pci->dev, sq->el_count * sq->el_size, sq->data, sq->dma_handle); ++ kfree(sq); ++} ++ ++int bce_reserve_submission(struct bce_queue_sq *sq, unsigned long *timeout) ++{ ++ while (atomic_dec_if_positive(&sq->available_commands) < 0) { ++ if (!timeout || !*timeout) ++ return -EAGAIN; ++ atomic_inc(&sq->available_command_completion_waiting_count); ++ *timeout = wait_for_completion_timeout(&sq->available_command_completion, *timeout); ++ if (!*timeout) { ++ if (atomic_dec_if_positive(&sq->available_command_completion_waiting_count) < 0) ++ try_wait_for_completion(&sq->available_command_completion); /* consume the pending completion */ ++ } ++ } ++ return 0; ++} ++ ++void bce_cancel_submission_reservation(struct bce_queue_sq *sq) ++{ ++ atomic_inc(&sq->available_commands); ++} ++ ++void *bce_next_submission(struct bce_queue_sq *sq) ++{ ++ void *ret = bce_sq_element(sq, sq->tail); ++ sq->tail = (sq->tail + 1) % sq->el_count; ++ return ret; ++} ++ ++void bce_submit_to_device(struct bce_queue_sq *sq) ++{ ++ mb(); ++ iowrite32(sq->tail, (u32 *) ((u8 *) sq->reg_mem_dma + REG_DOORBELL_BASE) + sq->qid); ++} ++ ++void bce_notify_submission_complete(struct bce_queue_sq *sq) ++{ ++ sq->head = (sq->head + 1) % sq->el_count; ++ atomic_inc(&sq->available_commands); ++ if (atomic_dec_if_positive(&sq->available_command_completion_waiting_count) >= 0) { ++ complete(&sq->available_command_completion); ++ } ++} ++ ++void bce_set_submission_single(struct bce_qe_submission *element, dma_addr_t addr, size_t size) ++{ ++ element->addr = addr; ++ element->length = size; ++ element->segl_addr = element->segl_length = 0; ++} ++ ++static void bce_cmdq_completion(struct bce_queue_sq *q); ++ ++struct bce_queue_cmdq *bce_alloc_cmdq(struct apple_bce_device *dev, int qid, u32 el_count) ++{ ++ struct bce_queue_cmdq *q; ++ q = kzalloc(sizeof(struct bce_queue_cmdq), GFP_KERNEL); ++ q->sq = bce_alloc_sq(dev, qid, BCE_CMD_SIZE, el_count, bce_cmdq_completion, q); ++ if (!q->sq) { ++ kfree(q); ++ return NULL; ++ } ++ spin_lock_init(&q->lck); ++ q->tres = kzalloc(sizeof(struct bce_queue_cmdq_result_el*) * el_count, GFP_KERNEL); ++ if (!q->tres) { ++ kfree(q); ++ return NULL; ++ } ++ return q; ++} ++ ++void bce_free_cmdq(struct apple_bce_device *dev, struct bce_queue_cmdq *cmdq) ++{ ++ bce_free_sq(dev, cmdq->sq); ++ kfree(cmdq->tres); ++ kfree(cmdq); ++} ++ ++void bce_cmdq_completion(struct bce_queue_sq *q) ++{ ++ struct bce_queue_cmdq_result_el *el; ++ struct bce_queue_cmdq *cmdq = q->userdata; ++ struct bce_sq_completion_data *result; ++ ++ spin_lock(&cmdq->lck); ++ while ((result = bce_next_completion(q))) { ++ el = cmdq->tres[cmdq->sq->head]; ++ if (el) { ++ el->result = result->result; ++ el->status = result->status; ++ mb(); ++ complete(&el->cmpl); ++ } else { ++ pr_err("apple-bce: Unexpected command queue completion\n"); ++ } ++ cmdq->tres[cmdq->sq->head] = NULL; ++ bce_notify_submission_complete(q); ++ } ++ spin_unlock(&cmdq->lck); ++} ++ ++static __always_inline void *bce_cmd_start(struct bce_queue_cmdq *cmdq, struct bce_queue_cmdq_result_el *res) ++{ ++ void *ret; ++ unsigned long timeout; ++ init_completion(&res->cmpl); ++ mb(); ++ ++ timeout = msecs_to_jiffies(1000L * 60 * 5); /* wait for up to ~5 minutes */ ++ if (bce_reserve_submission(cmdq->sq, &timeout)) ++ return NULL; ++ ++ spin_lock(&cmdq->lck); ++ cmdq->tres[cmdq->sq->tail] = res; ++ ret = bce_next_submission(cmdq->sq); ++ return ret; ++} ++ ++static __always_inline void bce_cmd_finish(struct bce_queue_cmdq *cmdq, struct bce_queue_cmdq_result_el *res) ++{ ++ bce_submit_to_device(cmdq->sq); ++ spin_unlock(&cmdq->lck); ++ ++ wait_for_completion(&res->cmpl); ++ mb(); ++} ++ ++u32 bce_cmd_register_queue(struct bce_queue_cmdq *cmdq, struct bce_queue_memcfg *cfg, const char *name, bool isdirout) ++{ ++ struct bce_queue_cmdq_result_el res; ++ struct bce_cmdq_register_memory_queue_cmd *cmd = bce_cmd_start(cmdq, &res); ++ if (!cmd) ++ return (u32) -1; ++ cmd->cmd = BCE_CMD_REGISTER_MEMORY_QUEUE; ++ cmd->flags = (u16) ((name ? 2 : 0) | (isdirout ? 1 : 0)); ++ cmd->qid = cfg->qid; ++ cmd->el_count = cfg->el_count; ++ cmd->vector_or_cq = cfg->vector_or_cq; ++ memset(cmd->name, 0, sizeof(cmd->name)); ++ if (name) { ++ cmd->name_len = (u16) min(strlen(name), (size_t) sizeof(cmd->name)); ++ memcpy(cmd->name, name, cmd->name_len); ++ } else { ++ cmd->name_len = 0; ++ } ++ cmd->addr = cfg->addr; ++ cmd->length = cfg->length; ++ ++ bce_cmd_finish(cmdq, &res); ++ return res.status; ++} ++ ++u32 bce_cmd_unregister_memory_queue(struct bce_queue_cmdq *cmdq, u16 qid) ++{ ++ struct bce_queue_cmdq_result_el res; ++ struct bce_cmdq_simple_memory_queue_cmd *cmd = bce_cmd_start(cmdq, &res); ++ if (!cmd) ++ return (u32) -1; ++ cmd->cmd = BCE_CMD_UNREGISTER_MEMORY_QUEUE; ++ cmd->flags = 0; ++ cmd->qid = qid; ++ bce_cmd_finish(cmdq, &res); ++ return res.status; ++} ++ ++u32 bce_cmd_flush_memory_queue(struct bce_queue_cmdq *cmdq, u16 qid) ++{ ++ struct bce_queue_cmdq_result_el res; ++ struct bce_cmdq_simple_memory_queue_cmd *cmd = bce_cmd_start(cmdq, &res); ++ if (!cmd) ++ return (u32) -1; ++ cmd->cmd = BCE_CMD_FLUSH_MEMORY_QUEUE; ++ cmd->flags = 0; ++ cmd->qid = qid; ++ bce_cmd_finish(cmdq, &res); ++ return res.status; ++} ++ ++ ++struct bce_queue_cq *bce_create_cq(struct apple_bce_device *dev, u32 el_count) ++{ ++ struct bce_queue_cq *cq; ++ struct bce_queue_memcfg cfg; ++#if LINUX_VERSION_CODE < KERNEL_VERSION(6,18,0) ++ int qid = ida_simple_get(&dev->queue_ida, BCE_QUEUE_USER_MIN, BCE_QUEUE_USER_MAX, GFP_KERNEL); ++#else ++ int qid = ida_alloc_range(&dev->queue_ida, BCE_QUEUE_USER_MIN, BCE_QUEUE_USER_MAX - 1, GFP_KERNEL); ++#endif ++ if (qid < 0) ++ return NULL; ++ cq = bce_alloc_cq(dev, qid, el_count); ++ if (!cq) ++ return NULL; ++ bce_get_cq_memcfg(cq, &cfg); ++ if (bce_cmd_register_queue(dev->cmd_cmdq, &cfg, NULL, false) != 0) { ++ pr_err("apple-bce: CQ registration failed (%i)", qid); ++ bce_free_cq(dev, cq); ++#if LINUX_VERSION_CODE < KERNEL_VERSION(6,18,0) ++ ida_simple_remove(&dev->queue_ida, (uint) qid); ++#else ++ ida_free(&dev->queue_ida, (uint) qid); ++#endif ++ return NULL; ++ } ++ dev->queues[qid] = (struct bce_queue *) cq; ++ return cq; ++} ++ ++struct bce_queue_sq *bce_create_sq(struct apple_bce_device *dev, struct bce_queue_cq *cq, const char *name, u32 el_count, ++ int direction, bce_sq_completion compl, void *userdata) ++{ ++ struct bce_queue_sq *sq; ++ struct bce_queue_memcfg cfg; ++ int qid; ++ if (cq == NULL) ++ return NULL; /* cq can not be null */ ++ if (name == NULL) ++ return NULL; /* name can not be null */ ++ if (direction != DMA_TO_DEVICE && direction != DMA_FROM_DEVICE) ++ return NULL; /* unsupported direction */ ++#if LINUX_VERSION_CODE < KERNEL_VERSION(6,18,0) ++ qid = ida_simple_get(&dev->queue_ida, BCE_QUEUE_USER_MIN, BCE_QUEUE_USER_MAX, GFP_KERNEL); ++#else ++ qid = ida_alloc_range(&dev->queue_ida, BCE_QUEUE_USER_MIN, BCE_QUEUE_USER_MAX - 1, GFP_KERNEL); ++#endif ++ if (qid < 0) ++ return NULL; ++ sq = bce_alloc_sq(dev, qid, sizeof(struct bce_qe_submission), el_count, compl, userdata); ++ if (!sq) ++ return NULL; ++ bce_get_sq_memcfg(sq, cq, &cfg); ++ if (bce_cmd_register_queue(dev->cmd_cmdq, &cfg, name, direction != DMA_FROM_DEVICE) != 0) { ++ pr_err("apple-bce: SQ registration failed (%i)", qid); ++ bce_free_sq(dev, sq); ++#if LINUX_VERSION_CODE < KERNEL_VERSION(6,18,0) ++ ida_simple_remove(&dev->queue_ida, (uint) qid); ++#else ++ ida_free(&dev->queue_ida, (uint) qid); ++#endif ++ return NULL; ++ } ++ spin_lock(&dev->queues_lock); ++ dev->queues[qid] = (struct bce_queue *) sq; ++ spin_unlock(&dev->queues_lock); ++ return sq; ++} ++ ++void bce_destroy_cq(struct apple_bce_device *dev, struct bce_queue_cq *cq) ++{ ++ if (!dev->is_being_removed && bce_cmd_unregister_memory_queue(dev->cmd_cmdq, (u16) cq->qid)) ++ pr_err("apple-bce: CQ unregister failed"); ++ spin_lock(&dev->queues_lock); ++ dev->queues[cq->qid] = NULL; ++ spin_unlock(&dev->queues_lock); ++#if LINUX_VERSION_CODE < KERNEL_VERSION(6,18,0) ++ ida_simple_remove(&dev->queue_ida, (uint) cq->qid); ++#else ++ ida_free(&dev->queue_ida, (uint) cq->qid); ++#endif ++ bce_free_cq(dev, cq); ++} ++ ++void bce_destroy_sq(struct apple_bce_device *dev, struct bce_queue_sq *sq) ++{ ++ if (!dev->is_being_removed && bce_cmd_unregister_memory_queue(dev->cmd_cmdq, (u16) sq->qid)) ++ pr_err("apple-bce: CQ unregister failed"); ++ spin_lock(&dev->queues_lock); ++ dev->queues[sq->qid] = NULL; ++ spin_unlock(&dev->queues_lock); ++#if LINUX_VERSION_CODE < KERNEL_VERSION(6,18,0) ++ ida_simple_remove(&dev->queue_ida, (uint) sq->qid); ++#else ++ ida_free(&dev->queue_ida, (uint) sq->qid); ++#endif ++ bce_free_sq(dev, sq); ++} +\ No newline at end of file +diff --git a/drivers/staging/apple-bce/queue.h b/drivers/staging/apple-bce/queue.h +new file mode 100644 +index 000000000000..111111111111 +--- /dev/null ++++ b/drivers/staging/apple-bce/queue.h +@@ -0,0 +1,177 @@ ++#ifndef BCE_QUEUE_H ++#define BCE_QUEUE_H ++ ++#include ++#include ++ ++#define BCE_CMD_SIZE 0x40 ++ ++struct apple_bce_device; ++ ++enum bce_queue_type { ++ BCE_QUEUE_CQ, BCE_QUEUE_SQ ++}; ++struct bce_queue { ++ int qid; ++ int type; ++}; ++struct bce_queue_cq { ++ int qid; ++ int type; ++ u32 el_count; ++ dma_addr_t dma_handle; ++ void *data; ++ ++ u32 index; ++}; ++struct bce_queue_sq; ++typedef void (*bce_sq_completion)(struct bce_queue_sq *q); ++struct bce_sq_completion_data { ++ u32 status; ++ u64 data_size; ++ u64 result; ++}; ++struct bce_queue_sq { ++ int qid; ++ int type; ++ u32 el_size; ++ u32 el_count; ++ dma_addr_t dma_handle; ++ void *data; ++ void *userdata; ++ void __iomem *reg_mem_dma; ++ ++ atomic_t available_commands; ++ struct completion available_command_completion; ++ atomic_t available_command_completion_waiting_count; ++ u32 head, tail; ++ ++ u32 completion_cidx, completion_tail; ++ struct bce_sq_completion_data *completion_data; ++ bool has_pending_completions; ++ bce_sq_completion completion; ++}; ++ ++struct bce_queue_cmdq_result_el { ++ struct completion cmpl; ++ u32 status; ++ u64 result; ++}; ++struct bce_queue_cmdq { ++ struct bce_queue_sq *sq; ++ struct spinlock lck; ++ struct bce_queue_cmdq_result_el **tres; ++}; ++ ++struct bce_queue_memcfg { ++ u16 qid; ++ u16 el_count; ++ u16 vector_or_cq; ++ u16 _pad; ++ u64 addr; ++ u64 length; ++}; ++ ++enum bce_qe_completion_status { ++ BCE_COMPLETION_SUCCESS = 0, ++ BCE_COMPLETION_ERROR = 1, ++ BCE_COMPLETION_ABORTED = 2, ++ BCE_COMPLETION_NO_SPACE = 3, ++ BCE_COMPLETION_OVERRUN = 4 ++}; ++enum bce_qe_completion_flags { ++ BCE_COMPLETION_FLAG_PENDING = 0x8000 ++}; ++struct bce_qe_completion { ++ u64 result; ++ u64 data_size; ++ u16 qid; ++ u16 completion_index; ++ u16 status; // bce_qe_completion_status ++ u16 flags; // bce_qe_completion_flags ++}; ++ ++struct bce_qe_submission { ++ u64 length; ++ u64 addr; ++ ++ u64 segl_addr; ++ u64 segl_length; ++}; ++ ++enum bce_cmdq_command { ++ BCE_CMD_REGISTER_MEMORY_QUEUE = 0x20, ++ BCE_CMD_UNREGISTER_MEMORY_QUEUE = 0x30, ++ BCE_CMD_FLUSH_MEMORY_QUEUE = 0x40, ++ BCE_CMD_SET_MEMORY_QUEUE_PROPERTY = 0x50 ++}; ++struct bce_cmdq_simple_memory_queue_cmd { ++ u16 cmd; // bce_cmdq_command ++ u16 flags; ++ u16 qid; ++}; ++struct bce_cmdq_register_memory_queue_cmd { ++ u16 cmd; // bce_cmdq_command ++ u16 flags; ++ u16 qid; ++ u16 _pad; ++ u16 el_count; ++ u16 vector_or_cq; ++ u16 _pad2; ++ u16 name_len; ++ char name[0x20]; ++ u64 addr; ++ u64 length; ++}; ++ ++static __always_inline void *bce_sq_element(struct bce_queue_sq *q, int i) { ++ return (void *) ((u8 *) q->data + q->el_size * i); ++} ++static __always_inline void *bce_cq_element(struct bce_queue_cq *q, int i) { ++ return (void *) ((struct bce_qe_completion *) q->data + i); ++} ++ ++static __always_inline struct bce_sq_completion_data *bce_next_completion(struct bce_queue_sq *sq) { ++ struct bce_sq_completion_data *res; ++ rmb(); ++ if (sq->completion_cidx == sq->completion_tail) ++ return NULL; ++ res = &sq->completion_data[sq->completion_cidx]; ++ sq->completion_cidx = (sq->completion_cidx + 1) % sq->el_count; ++ return res; ++} ++ ++struct bce_queue_cq *bce_alloc_cq(struct apple_bce_device *dev, int qid, u32 el_count); ++void bce_get_cq_memcfg(struct bce_queue_cq *cq, struct bce_queue_memcfg *cfg); ++void bce_free_cq(struct apple_bce_device *dev, struct bce_queue_cq *cq); ++void bce_handle_cq_completions(struct apple_bce_device *dev, struct bce_queue_cq *cq); ++ ++struct bce_queue_sq *bce_alloc_sq(struct apple_bce_device *dev, int qid, u32 el_size, u32 el_count, ++ bce_sq_completion compl, void *userdata); ++void bce_get_sq_memcfg(struct bce_queue_sq *sq, struct bce_queue_cq *cq, struct bce_queue_memcfg *cfg); ++void bce_free_sq(struct apple_bce_device *dev, struct bce_queue_sq *sq); ++int bce_reserve_submission(struct bce_queue_sq *sq, unsigned long *timeout); ++void bce_cancel_submission_reservation(struct bce_queue_sq *sq); ++void *bce_next_submission(struct bce_queue_sq *sq); ++void bce_submit_to_device(struct bce_queue_sq *sq); ++void bce_notify_submission_complete(struct bce_queue_sq *sq); ++ ++void bce_set_submission_single(struct bce_qe_submission *element, dma_addr_t addr, size_t size); ++ ++struct bce_queue_cmdq *bce_alloc_cmdq(struct apple_bce_device *dev, int qid, u32 el_count); ++void bce_free_cmdq(struct apple_bce_device *dev, struct bce_queue_cmdq *cmdq); ++ ++u32 bce_cmd_register_queue(struct bce_queue_cmdq *cmdq, struct bce_queue_memcfg *cfg, const char *name, bool isdirout); ++u32 bce_cmd_unregister_memory_queue(struct bce_queue_cmdq *cmdq, u16 qid); ++u32 bce_cmd_flush_memory_queue(struct bce_queue_cmdq *cmdq, u16 qid); ++ ++ ++/* User API - Creates and registers the queue */ ++ ++struct bce_queue_cq *bce_create_cq(struct apple_bce_device *dev, u32 el_count); ++struct bce_queue_sq *bce_create_sq(struct apple_bce_device *dev, struct bce_queue_cq *cq, const char *name, u32 el_count, ++ int direction, bce_sq_completion compl, void *userdata); ++void bce_destroy_cq(struct apple_bce_device *dev, struct bce_queue_cq *cq); ++void bce_destroy_sq(struct apple_bce_device *dev, struct bce_queue_sq *sq); ++ ++#endif //BCEDRIVER_MAILBOX_H +diff --git a/drivers/staging/apple-bce/queue_dma.c b/drivers/staging/apple-bce/queue_dma.c +new file mode 100644 +index 000000000000..111111111111 +--- /dev/null ++++ b/drivers/staging/apple-bce/queue_dma.c +@@ -0,0 +1,220 @@ ++#include "queue_dma.h" ++#include ++#include ++#include "queue.h" ++ ++static int bce_alloc_scatterlist_from_vm(struct sg_table *tbl, void *data, size_t len); ++static struct bce_segment_list_element_hostinfo *bce_map_segment_list( ++ struct device *dev, struct scatterlist *pages, int pagen); ++static void bce_unmap_segement_list(struct device *dev, struct bce_segment_list_element_hostinfo *list); ++ ++int bce_map_dma_buffer(struct device *dev, struct bce_dma_buffer *buf, struct sg_table scatterlist, ++ enum dma_data_direction dir) ++{ ++ int cnt; ++ ++ buf->direction = dir; ++ buf->scatterlist = scatterlist; ++ buf->seglist_hostinfo = NULL; ++ ++ cnt = dma_map_sg(dev, buf->scatterlist.sgl, buf->scatterlist.nents, dir); ++ if (cnt != buf->scatterlist.nents) { ++ pr_err("apple-bce: DMA scatter list mapping returned an unexpected count: %i\n", cnt); ++ dma_unmap_sg(dev, buf->scatterlist.sgl, buf->scatterlist.nents, dir); ++ return -EIO; ++ } ++ if (cnt == 1) ++ return 0; ++ ++ buf->seglist_hostinfo = bce_map_segment_list(dev, buf->scatterlist.sgl, buf->scatterlist.nents); ++ if (!buf->seglist_hostinfo) { ++ pr_err("apple-bce: Creating segment list failed\n"); ++ dma_unmap_sg(dev, buf->scatterlist.sgl, buf->scatterlist.nents, dir); ++ return -EIO; ++ } ++ return 0; ++} ++ ++int bce_map_dma_buffer_vm(struct device *dev, struct bce_dma_buffer *buf, void *data, size_t len, ++ enum dma_data_direction dir) ++{ ++ int status; ++ struct sg_table scatterlist; ++ if ((status = bce_alloc_scatterlist_from_vm(&scatterlist, data, len))) ++ return status; ++ if ((status = bce_map_dma_buffer(dev, buf, scatterlist, dir))) { ++ sg_free_table(&scatterlist); ++ return status; ++ } ++ return 0; ++} ++ ++int bce_map_dma_buffer_km(struct device *dev, struct bce_dma_buffer *buf, void *data, size_t len, ++ enum dma_data_direction dir) ++{ ++ /* Kernel memory is continuous which is great for us. */ ++ int status; ++ struct sg_table scatterlist; ++ if ((status = sg_alloc_table(&scatterlist, 1, GFP_KERNEL))) { ++ sg_free_table(&scatterlist); ++ return status; ++ } ++ sg_set_buf(scatterlist.sgl, data, (uint) len); ++ if ((status = bce_map_dma_buffer(dev, buf, scatterlist, dir))) { ++ sg_free_table(&scatterlist); ++ return status; ++ } ++ return 0; ++} ++ ++void bce_unmap_dma_buffer(struct device *dev, struct bce_dma_buffer *buf) ++{ ++ dma_unmap_sg(dev, buf->scatterlist.sgl, buf->scatterlist.nents, buf->direction); ++ bce_unmap_segement_list(dev, buf->seglist_hostinfo); ++} ++ ++ ++static int bce_alloc_scatterlist_from_vm(struct sg_table *tbl, void *data, size_t len) ++{ ++ int status, i; ++ struct page **pages; ++ size_t off, start_page, end_page, page_count; ++ off = (size_t) data % PAGE_SIZE; ++ start_page = (size_t) data / PAGE_SIZE; ++ end_page = ((size_t) data + len - 1) / PAGE_SIZE; ++ page_count = end_page - start_page + 1; ++ ++ if (page_count > PAGE_SIZE / sizeof(struct page *)) ++ pages = vmalloc(page_count * sizeof(struct page *)); ++ else ++ pages = kmalloc(page_count * sizeof(struct page *), GFP_KERNEL); ++ ++ for (i = 0; i < page_count; i++) ++ pages[i] = vmalloc_to_page((void *) ((start_page + i) * PAGE_SIZE)); ++ ++ if ((status = sg_alloc_table_from_pages(tbl, pages, page_count, (unsigned int) off, len, GFP_KERNEL))) { ++ sg_free_table(tbl); ++ } ++ ++ if (page_count > PAGE_SIZE / sizeof(struct page *)) ++ vfree(pages); ++ else ++ kfree(pages); ++ return status; ++} ++ ++#define BCE_ELEMENTS_PER_PAGE ((PAGE_SIZE - sizeof(struct bce_segment_list_header)) \ ++ / sizeof(struct bce_segment_list_element)) ++#define BCE_ELEMENTS_PER_ADDITIONAL_PAGE (PAGE_SIZE / sizeof(struct bce_segment_list_element)) ++ ++static struct bce_segment_list_element_hostinfo *bce_map_segment_list( ++ struct device *dev, struct scatterlist *pages, int pagen) ++{ ++ size_t ptr, pptr = 0; ++ struct bce_segment_list_header theader; /* a temp header, to store the initial seg */ ++ struct bce_segment_list_header *header; ++ struct bce_segment_list_element *el, *el_end; ++ struct bce_segment_list_element_hostinfo *out, *pout, *out_root; ++ struct scatterlist *sg; ++ int i; ++ header = &theader; ++ out = out_root = NULL; ++ el = el_end = NULL; ++ for_each_sg(pages, sg, pagen, i) { ++ if (el >= el_end) { ++ /* allocate a new page, this will be also done for the first element */ ++ ptr = __get_free_page(GFP_KERNEL); ++ if (pptr && ptr == pptr + PAGE_SIZE) { ++ out->page_count++; ++ header->element_count += BCE_ELEMENTS_PER_ADDITIONAL_PAGE; ++ el_end += BCE_ELEMENTS_PER_ADDITIONAL_PAGE; ++ } else { ++ header = (void *) ptr; ++ header->element_count = BCE_ELEMENTS_PER_PAGE; ++ header->data_size = 0; ++ header->next_segl_addr = 0; ++ header->next_segl_length = 0; ++ el = (void *) (header + 1); ++ el_end = el + BCE_ELEMENTS_PER_PAGE; ++ ++ if (out) { ++ out->next = kmalloc(sizeof(struct bce_segment_list_element_hostinfo), GFP_KERNEL); ++ out = out->next; ++ } else { ++ out_root = out = kmalloc(sizeof(struct bce_segment_list_element_hostinfo), GFP_KERNEL); ++ } ++ out->page_start = (void *) ptr; ++ out->page_count = 1; ++ out->dma_start = DMA_MAPPING_ERROR; ++ out->next = NULL; ++ } ++ pptr = ptr; ++ } ++ el->addr = sg->dma_address; ++ el->length = sg->length; ++ header->data_size += el->length; ++ } ++ ++ /* DMA map */ ++ out = out_root; ++ pout = NULL; ++ while (out) { ++ out->dma_start = dma_map_single(dev, out->page_start, out->page_count * PAGE_SIZE, DMA_TO_DEVICE); ++ if (dma_mapping_error(dev, out->dma_start)) ++ goto error; ++ if (pout) { ++ header = pout->page_start; ++ header->next_segl_addr = out->dma_start; ++ header->next_segl_length = out->page_count * PAGE_SIZE; ++ } ++ pout = out; ++ out = out->next; ++ } ++ return out_root; ++ ++ error: ++ bce_unmap_segement_list(dev, out_root); ++ return NULL; ++} ++ ++static void bce_unmap_segement_list(struct device *dev, struct bce_segment_list_element_hostinfo *list) ++{ ++ struct bce_segment_list_element_hostinfo *next; ++ while (list) { ++ if (list->dma_start != DMA_MAPPING_ERROR) ++ dma_unmap_single(dev, list->dma_start, list->page_count * PAGE_SIZE, DMA_TO_DEVICE); ++ next = list->next; ++ kfree(list); ++ list = next; ++ } ++} ++ ++int bce_set_submission_buf(struct bce_qe_submission *element, struct bce_dma_buffer *buf, size_t offset, size_t length) ++{ ++ struct bce_segment_list_element_hostinfo *seg; ++ struct bce_segment_list_header *seg_header; ++ ++ seg = buf->seglist_hostinfo; ++ if (!seg) { ++ element->addr = buf->scatterlist.sgl->dma_address + offset; ++ element->length = length; ++ element->segl_addr = 0; ++ element->segl_length = 0; ++ return 0; ++ } ++ ++ while (seg) { ++ seg_header = seg->page_start; ++ if (offset <= seg_header->data_size) ++ break; ++ offset -= seg_header->data_size; ++ seg = seg->next; ++ } ++ if (!seg) ++ return -EINVAL; ++ element->addr = offset; ++ element->length = buf->scatterlist.sgl->dma_length; ++ element->segl_addr = seg->dma_start; ++ element->segl_length = seg->page_count * PAGE_SIZE; ++ return 0; ++} +\ No newline at end of file +diff --git a/drivers/staging/apple-bce/queue_dma.h b/drivers/staging/apple-bce/queue_dma.h +new file mode 100644 +index 000000000000..111111111111 +--- /dev/null ++++ b/drivers/staging/apple-bce/queue_dma.h +@@ -0,0 +1,50 @@ ++#ifndef BCE_QUEUE_DMA_H ++#define BCE_QUEUE_DMA_H ++ ++#include ++ ++struct bce_qe_submission; ++ ++struct bce_segment_list_header { ++ u64 element_count; ++ u64 data_size; ++ ++ u64 next_segl_addr; ++ u64 next_segl_length; ++}; ++struct bce_segment_list_element { ++ u64 addr; ++ u64 length; ++}; ++ ++struct bce_segment_list_element_hostinfo { ++ struct bce_segment_list_element_hostinfo *next; ++ void *page_start; ++ size_t page_count; ++ dma_addr_t dma_start; ++}; ++ ++ ++struct bce_dma_buffer { ++ enum dma_data_direction direction; ++ struct sg_table scatterlist; ++ struct bce_segment_list_element_hostinfo *seglist_hostinfo; ++}; ++ ++/* NOTE: Takes ownership of the sg_table if it succeeds. Ownership is not transferred on failure. */ ++int bce_map_dma_buffer(struct device *dev, struct bce_dma_buffer *buf, struct sg_table scatterlist, ++ enum dma_data_direction dir); ++ ++/* Creates a buffer from virtual memory (vmalloc) */ ++int bce_map_dma_buffer_vm(struct device *dev, struct bce_dma_buffer *buf, void *data, size_t len, ++ enum dma_data_direction dir); ++ ++/* Creates a buffer from kernel memory (kmalloc) */ ++int bce_map_dma_buffer_km(struct device *dev, struct bce_dma_buffer *buf, void *data, size_t len, ++ enum dma_data_direction dir); ++ ++void bce_unmap_dma_buffer(struct device *dev, struct bce_dma_buffer *buf); ++ ++int bce_set_submission_buf(struct bce_qe_submission *element, struct bce_dma_buffer *buf, size_t offset, size_t length); ++ ++#endif //BCE_QUEUE_DMA_H +diff --git a/drivers/staging/apple-bce/vhci/command.h b/drivers/staging/apple-bce/vhci/command.h +new file mode 100644 +index 000000000000..111111111111 +--- /dev/null ++++ b/drivers/staging/apple-bce/vhci/command.h +@@ -0,0 +1,204 @@ ++#ifndef BCE_VHCI_COMMAND_H ++#define BCE_VHCI_COMMAND_H ++ ++#include "queue.h" ++#include ++#include ++ ++#define BCE_VHCI_CMD_TIMEOUT_SHORT msecs_to_jiffies(2000) ++#define BCE_VHCI_CMD_TIMEOUT_LONG msecs_to_jiffies(30000) ++ ++#define BCE_VHCI_BULK_MAX_ACTIVE_URBS_POW2 2 ++#define BCE_VHCI_BULK_MAX_ACTIVE_URBS (1 << BCE_VHCI_BULK_MAX_ACTIVE_URBS_POW2) ++ ++typedef u8 bce_vhci_port_t; ++typedef u8 bce_vhci_device_t; ++ ++enum bce_vhci_command { ++ BCE_VHCI_CMD_CONTROLLER_ENABLE = 1, ++ BCE_VHCI_CMD_CONTROLLER_DISABLE = 2, ++ BCE_VHCI_CMD_CONTROLLER_START = 3, ++ BCE_VHCI_CMD_CONTROLLER_PAUSE = 4, ++ ++ BCE_VHCI_CMD_PORT_POWER_ON = 0x10, ++ BCE_VHCI_CMD_PORT_POWER_OFF = 0x11, ++ BCE_VHCI_CMD_PORT_RESUME = 0x12, ++ BCE_VHCI_CMD_PORT_SUSPEND = 0x13, ++ BCE_VHCI_CMD_PORT_RESET = 0x14, ++ BCE_VHCI_CMD_PORT_DISABLE = 0x15, ++ BCE_VHCI_CMD_PORT_STATUS = 0x16, ++ ++ BCE_VHCI_CMD_DEVICE_CREATE = 0x30, ++ BCE_VHCI_CMD_DEVICE_DESTROY = 0x31, ++ ++ BCE_VHCI_CMD_ENDPOINT_CREATE = 0x40, ++ BCE_VHCI_CMD_ENDPOINT_DESTROY = 0x41, ++ BCE_VHCI_CMD_ENDPOINT_SET_STATE = 0x42, ++ BCE_VHCI_CMD_ENDPOINT_RESET = 0x44, ++ ++ /* Device to host only */ ++ BCE_VHCI_CMD_ENDPOINT_REQUEST_STATE = 0x43, ++ BCE_VHCI_CMD_TRANSFER_REQUEST = 0x1000, ++ BCE_VHCI_CMD_CONTROL_TRANSFER_STATUS = 0x1005 ++}; ++ ++enum bce_vhci_endpoint_state { ++ BCE_VHCI_ENDPOINT_ACTIVE = 0, ++ BCE_VHCI_ENDPOINT_PAUSED = 1, ++ BCE_VHCI_ENDPOINT_STALLED = 2 ++}; ++ ++static inline int bce_vhci_cmd_controller_enable(struct bce_vhci_command_queue *q, u8 busNum, u16 *portMask) ++{ ++ int status; ++ struct bce_vhci_message cmd, res; ++ cmd.cmd = BCE_VHCI_CMD_CONTROLLER_ENABLE; ++ cmd.param1 = 0x7100u | busNum; ++ status = bce_vhci_command_queue_execute(q, &cmd, &res, BCE_VHCI_CMD_TIMEOUT_LONG); ++ if (!status) ++ *portMask = (u16) res.param2; ++ return status; ++} ++static inline int bce_vhci_cmd_controller_disable(struct bce_vhci_command_queue *q) ++{ ++ struct bce_vhci_message cmd, res; ++ cmd.cmd = BCE_VHCI_CMD_CONTROLLER_DISABLE; ++ return bce_vhci_command_queue_execute(q, &cmd, &res, BCE_VHCI_CMD_TIMEOUT_LONG); ++} ++static inline int bce_vhci_cmd_controller_start(struct bce_vhci_command_queue *q) ++{ ++ struct bce_vhci_message cmd, res; ++ cmd.cmd = BCE_VHCI_CMD_CONTROLLER_START; ++ return bce_vhci_command_queue_execute(q, &cmd, &res, BCE_VHCI_CMD_TIMEOUT_LONG); ++} ++static inline int bce_vhci_cmd_controller_pause(struct bce_vhci_command_queue *q) ++{ ++ struct bce_vhci_message cmd, res; ++ cmd.cmd = BCE_VHCI_CMD_CONTROLLER_PAUSE; ++ return bce_vhci_command_queue_execute(q, &cmd, &res, BCE_VHCI_CMD_TIMEOUT_LONG); ++} ++ ++static inline int bce_vhci_cmd_port_power_on(struct bce_vhci_command_queue *q, bce_vhci_port_t port) ++{ ++ struct bce_vhci_message cmd, res; ++ cmd.cmd = BCE_VHCI_CMD_PORT_POWER_ON; ++ cmd.param1 = port; ++ return bce_vhci_command_queue_execute(q, &cmd, &res, BCE_VHCI_CMD_TIMEOUT_SHORT); ++} ++static inline int bce_vhci_cmd_port_power_off(struct bce_vhci_command_queue *q, bce_vhci_port_t port) ++{ ++ struct bce_vhci_message cmd, res; ++ cmd.cmd = BCE_VHCI_CMD_PORT_POWER_OFF; ++ cmd.param1 = port; ++ return bce_vhci_command_queue_execute(q, &cmd, &res, BCE_VHCI_CMD_TIMEOUT_SHORT); ++} ++static inline int bce_vhci_cmd_port_resume(struct bce_vhci_command_queue *q, bce_vhci_port_t port) ++{ ++ struct bce_vhci_message cmd, res; ++ cmd.cmd = BCE_VHCI_CMD_PORT_RESUME; ++ cmd.param1 = port; ++ return bce_vhci_command_queue_execute(q, &cmd, &res, BCE_VHCI_CMD_TIMEOUT_LONG); ++} ++static inline int bce_vhci_cmd_port_suspend(struct bce_vhci_command_queue *q, bce_vhci_port_t port) ++{ ++ struct bce_vhci_message cmd, res; ++ cmd.cmd = BCE_VHCI_CMD_PORT_SUSPEND; ++ cmd.param1 = port; ++ return bce_vhci_command_queue_execute(q, &cmd, &res, BCE_VHCI_CMD_TIMEOUT_LONG); ++} ++static inline int bce_vhci_cmd_port_reset(struct bce_vhci_command_queue *q, bce_vhci_port_t port, u32 timeout) ++{ ++ struct bce_vhci_message cmd, res; ++ cmd.cmd = BCE_VHCI_CMD_PORT_RESET; ++ cmd.param1 = port; ++ cmd.param2 = timeout; ++ return bce_vhci_command_queue_execute(q, &cmd, &res, BCE_VHCI_CMD_TIMEOUT_SHORT); ++} ++static inline int bce_vhci_cmd_port_disable(struct bce_vhci_command_queue *q, bce_vhci_port_t port) ++{ ++ struct bce_vhci_message cmd, res; ++ cmd.cmd = BCE_VHCI_CMD_PORT_DISABLE; ++ cmd.param1 = port; ++ return bce_vhci_command_queue_execute(q, &cmd, &res, BCE_VHCI_CMD_TIMEOUT_SHORT); ++} ++static inline int bce_vhci_cmd_port_status(struct bce_vhci_command_queue *q, bce_vhci_port_t port, ++ u32 clearFlags, u32 *resStatus) ++{ ++ int status; ++ struct bce_vhci_message cmd, res; ++ cmd.cmd = BCE_VHCI_CMD_PORT_STATUS; ++ cmd.param1 = port; ++ cmd.param2 = clearFlags & 0x560000; ++ status = bce_vhci_command_queue_execute(q, &cmd, &res, BCE_VHCI_CMD_TIMEOUT_SHORT); ++ if (status >= 0) ++ *resStatus = (u32) res.param2; ++ return status; ++} ++ ++static inline int bce_vhci_cmd_device_create(struct bce_vhci_command_queue *q, bce_vhci_port_t port, ++ bce_vhci_device_t *dev) ++{ ++ int status; ++ struct bce_vhci_message cmd, res; ++ cmd.cmd = BCE_VHCI_CMD_DEVICE_CREATE; ++ cmd.param1 = port; ++ status = bce_vhci_command_queue_execute(q, &cmd, &res, BCE_VHCI_CMD_TIMEOUT_SHORT); ++ if (!status) ++ *dev = (bce_vhci_device_t) res.param2; ++ return status; ++} ++static inline int bce_vhci_cmd_device_destroy(struct bce_vhci_command_queue *q, bce_vhci_device_t dev) ++{ ++ struct bce_vhci_message cmd, res; ++ cmd.cmd = BCE_VHCI_CMD_DEVICE_DESTROY; ++ cmd.param1 = dev; ++ return bce_vhci_command_queue_execute(q, &cmd, &res, BCE_VHCI_CMD_TIMEOUT_LONG); ++} ++ ++static inline int bce_vhci_cmd_endpoint_create(struct bce_vhci_command_queue *q, bce_vhci_device_t dev, ++ struct usb_endpoint_descriptor *desc) ++{ ++ struct bce_vhci_message cmd, res; ++ int endpoint_type = usb_endpoint_type(desc); ++ int maxp = usb_endpoint_maxp(desc); ++ int maxp_burst = usb_endpoint_maxp_mult(desc) * maxp; ++ u8 max_active_requests_pow2 = 0; ++ cmd.cmd = BCE_VHCI_CMD_ENDPOINT_CREATE; ++ cmd.param1 = dev | ((desc->bEndpointAddress & 0x8Fu) << 8); ++ if (endpoint_type == USB_ENDPOINT_XFER_BULK) ++ max_active_requests_pow2 = BCE_VHCI_BULK_MAX_ACTIVE_URBS_POW2; ++ cmd.param2 = endpoint_type | ((max_active_requests_pow2 & 0xf) << 4) | (maxp << 16) | ((u64) maxp_burst << 32); ++ if (endpoint_type == USB_ENDPOINT_XFER_INT) ++ cmd.param2 |= (desc->bInterval - 1) << 8; ++ return bce_vhci_command_queue_execute(q, &cmd, &res, BCE_VHCI_CMD_TIMEOUT_SHORT); ++} ++static inline int bce_vhci_cmd_endpoint_destroy(struct bce_vhci_command_queue *q, bce_vhci_device_t dev, u8 endpoint) ++{ ++ struct bce_vhci_message cmd, res; ++ cmd.cmd = BCE_VHCI_CMD_ENDPOINT_DESTROY; ++ cmd.param1 = dev | (endpoint << 8); ++ return bce_vhci_command_queue_execute(q, &cmd, &res, BCE_VHCI_CMD_TIMEOUT_SHORT); ++} ++static inline int bce_vhci_cmd_endpoint_set_state(struct bce_vhci_command_queue *q, bce_vhci_device_t dev, u8 endpoint, ++ enum bce_vhci_endpoint_state newState, enum bce_vhci_endpoint_state *retState) ++{ ++ int status; ++ struct bce_vhci_message cmd, res; ++ cmd.cmd = BCE_VHCI_CMD_ENDPOINT_SET_STATE; ++ cmd.param1 = dev | (endpoint << 8); ++ cmd.param2 = (u64) newState; ++ status = bce_vhci_command_queue_execute(q, &cmd, &res, BCE_VHCI_CMD_TIMEOUT_SHORT); ++ if (status != BCE_VHCI_INTERNAL_ERROR && status != BCE_VHCI_NO_POWER) ++ *retState = (enum bce_vhci_endpoint_state) res.param2; ++ return status; ++} ++static inline int bce_vhci_cmd_endpoint_reset(struct bce_vhci_command_queue *q, bce_vhci_device_t dev, u8 endpoint) ++{ ++ struct bce_vhci_message cmd, res; ++ cmd.cmd = BCE_VHCI_CMD_ENDPOINT_RESET; ++ cmd.param1 = dev | (endpoint << 8); ++ return bce_vhci_command_queue_execute(q, &cmd, &res, BCE_VHCI_CMD_TIMEOUT_SHORT); ++} ++ ++ ++#endif //BCE_VHCI_COMMAND_H +diff --git a/drivers/staging/apple-bce/vhci/queue.c b/drivers/staging/apple-bce/vhci/queue.c +new file mode 100644 +index 000000000000..111111111111 +--- /dev/null ++++ b/drivers/staging/apple-bce/vhci/queue.c +@@ -0,0 +1,268 @@ ++#include "queue.h" ++#include "vhci.h" ++#include "../apple_bce.h" ++ ++ ++static void bce_vhci_message_queue_completion(struct bce_queue_sq *sq); ++ ++int bce_vhci_message_queue_create(struct bce_vhci *vhci, struct bce_vhci_message_queue *ret, const char *name) ++{ ++ int status; ++ ret->cq = bce_create_cq(vhci->dev, VHCI_EVENT_QUEUE_EL_COUNT); ++ if (!ret->cq) ++ return -EINVAL; ++ ret->sq = bce_create_sq(vhci->dev, ret->cq, name, VHCI_EVENT_QUEUE_EL_COUNT, DMA_TO_DEVICE, ++ bce_vhci_message_queue_completion, ret); ++ if (!ret->sq) { ++ status = -EINVAL; ++ goto fail_cq; ++ } ++ ret->data = dma_alloc_coherent(&vhci->dev->pci->dev, sizeof(struct bce_vhci_message) * VHCI_EVENT_QUEUE_EL_COUNT, ++ &ret->dma_addr, GFP_KERNEL); ++ if (!ret->data) { ++ status = -EINVAL; ++ goto fail_sq; ++ } ++ return 0; ++ ++fail_sq: ++ bce_destroy_sq(vhci->dev, ret->sq); ++ ret->sq = NULL; ++fail_cq: ++ bce_destroy_cq(vhci->dev, ret->cq); ++ ret->cq = NULL; ++ return status; ++} ++ ++void bce_vhci_message_queue_destroy(struct bce_vhci *vhci, struct bce_vhci_message_queue *q) ++{ ++ if (!q->cq) ++ return; ++ dma_free_coherent(&vhci->dev->pci->dev, sizeof(struct bce_vhci_message) * VHCI_EVENT_QUEUE_EL_COUNT, ++ q->data, q->dma_addr); ++ bce_destroy_sq(vhci->dev, q->sq); ++ bce_destroy_cq(vhci->dev, q->cq); ++} ++ ++void bce_vhci_message_queue_write(struct bce_vhci_message_queue *q, struct bce_vhci_message *req) ++{ ++ int sidx; ++ struct bce_qe_submission *s; ++ sidx = q->sq->tail; ++ s = bce_next_submission(q->sq); ++ pr_debug("bce-vhci: Send message: %x s=%x p1=%x p2=%llx\n", req->cmd, req->status, req->param1, req->param2); ++ q->data[sidx] = *req; ++ bce_set_submission_single(s, q->dma_addr + sizeof(struct bce_vhci_message) * sidx, ++ sizeof(struct bce_vhci_message)); ++ bce_submit_to_device(q->sq); ++} ++ ++static void bce_vhci_message_queue_completion(struct bce_queue_sq *sq) ++{ ++ while (bce_next_completion(sq)) ++ bce_notify_submission_complete(sq); ++} ++ ++ ++ ++static void bce_vhci_event_queue_completion(struct bce_queue_sq *sq); ++ ++int __bce_vhci_event_queue_create(struct bce_vhci *vhci, struct bce_vhci_event_queue *ret, const char *name, ++ bce_sq_completion compl) ++{ ++ ret->vhci = vhci; ++ ++ ret->sq = bce_create_sq(vhci->dev, vhci->ev_cq, name, VHCI_EVENT_QUEUE_EL_COUNT, DMA_FROM_DEVICE, compl, ret); ++ if (!ret->sq) ++ return -EINVAL; ++ ret->data = dma_alloc_coherent(&vhci->dev->pci->dev, sizeof(struct bce_vhci_message) * VHCI_EVENT_QUEUE_EL_COUNT, ++ &ret->dma_addr, GFP_KERNEL); ++ if (!ret->data) { ++ bce_destroy_sq(vhci->dev, ret->sq); ++ ret->sq = NULL; ++ return -EINVAL; ++ } ++ ++ init_completion(&ret->queue_empty_completion); ++ bce_vhci_event_queue_submit_pending(ret, VHCI_EVENT_PENDING_COUNT); ++ return 0; ++} ++ ++int bce_vhci_event_queue_create(struct bce_vhci *vhci, struct bce_vhci_event_queue *ret, const char *name, ++ bce_vhci_event_queue_callback cb) ++{ ++ ret->cb = cb; ++ return __bce_vhci_event_queue_create(vhci, ret, name, bce_vhci_event_queue_completion); ++} ++ ++void bce_vhci_event_queue_destroy(struct bce_vhci *vhci, struct bce_vhci_event_queue *q) ++{ ++ if (!q->sq) ++ return; ++ dma_free_coherent(&vhci->dev->pci->dev, sizeof(struct bce_vhci_message) * VHCI_EVENT_QUEUE_EL_COUNT, ++ q->data, q->dma_addr); ++ bce_destroy_sq(vhci->dev, q->sq); ++} ++ ++static void bce_vhci_event_queue_completion(struct bce_queue_sq *sq) ++{ ++ struct bce_sq_completion_data *cd; ++ struct bce_vhci_event_queue *ev = sq->userdata; ++ struct bce_vhci_message *msg; ++ size_t cnt = 0; ++ ++ while ((cd = bce_next_completion(sq))) { ++ if (cd->status == BCE_COMPLETION_ABORTED) { /* We flushed the queue */ ++ bce_notify_submission_complete(sq); ++ continue; ++ } ++ msg = &ev->data[sq->head]; ++ pr_debug("bce-vhci: Got event: %x s=%x p1=%x p2=%llx\n", msg->cmd, msg->status, msg->param1, msg->param2); ++ ev->cb(ev, msg); ++ ++ bce_notify_submission_complete(sq); ++ ++cnt; ++ } ++ bce_vhci_event_queue_submit_pending(ev, cnt); ++ if (atomic_read(&sq->available_commands) == sq->el_count - 1) ++ complete(&ev->queue_empty_completion); ++} ++ ++void bce_vhci_event_queue_submit_pending(struct bce_vhci_event_queue *q, size_t count) ++{ ++ int idx; ++ struct bce_qe_submission *s; ++ while (count--) { ++ if (bce_reserve_submission(q->sq, NULL)) { ++ pr_err("bce-vhci: Failed to reserve an event queue submission\n"); ++ break; ++ } ++ idx = q->sq->tail; ++ s = bce_next_submission(q->sq); ++ bce_set_submission_single(s, ++ q->dma_addr + idx * sizeof(struct bce_vhci_message), sizeof(struct bce_vhci_message)); ++ } ++ bce_submit_to_device(q->sq); ++} ++ ++void bce_vhci_event_queue_pause(struct bce_vhci_event_queue *q) ++{ ++ unsigned long timeout; ++ reinit_completion(&q->queue_empty_completion); ++ if (bce_cmd_flush_memory_queue(q->vhci->dev->cmd_cmdq, q->sq->qid)) ++ pr_warn("bce-vhci: failed to flush event queue\n"); ++ timeout = msecs_to_jiffies(5000); ++ while (atomic_read(&q->sq->available_commands) != q->sq->el_count - 1) { ++ timeout = wait_for_completion_timeout(&q->queue_empty_completion, timeout); ++ if (timeout == 0) { ++ pr_err("bce-vhci: waiting for queue to be flushed timed out\n"); ++ break; ++ } ++ } ++} ++ ++void bce_vhci_event_queue_resume(struct bce_vhci_event_queue *q) ++{ ++ if (atomic_read(&q->sq->available_commands) != q->sq->el_count - 1) { ++ pr_err("bce-vhci: resume of a queue with pending submissions\n"); ++ return; ++ } ++ bce_vhci_event_queue_submit_pending(q, VHCI_EVENT_PENDING_COUNT); ++} ++ ++void bce_vhci_command_queue_create(struct bce_vhci_command_queue *ret, struct bce_vhci_message_queue *mq) ++{ ++ ret->mq = mq; ++ ret->completion.result = NULL; ++ init_completion(&ret->completion.completion); ++ spin_lock_init(&ret->completion_lock); ++ mutex_init(&ret->mutex); ++} ++ ++void bce_vhci_command_queue_destroy(struct bce_vhci_command_queue *cq) ++{ ++ spin_lock(&cq->completion_lock); ++ if (cq->completion.result) { ++ memset(cq->completion.result, 0, sizeof(struct bce_vhci_message)); ++ cq->completion.result->status = BCE_VHCI_ABORT; ++ complete(&cq->completion.completion); ++ cq->completion.result = NULL; ++ } ++ spin_unlock(&cq->completion_lock); ++ mutex_lock(&cq->mutex); ++ mutex_unlock(&cq->mutex); ++ mutex_destroy(&cq->mutex); ++} ++ ++void bce_vhci_command_queue_deliver_completion(struct bce_vhci_command_queue *cq, struct bce_vhci_message *msg) ++{ ++ struct bce_vhci_command_queue_completion *c = &cq->completion; ++ ++ spin_lock(&cq->completion_lock); ++ if (c->result) { ++ *c->result = *msg; ++ complete(&c->completion); ++ c->result = NULL; ++ } ++ spin_unlock(&cq->completion_lock); ++} ++ ++static int __bce_vhci_command_queue_execute(struct bce_vhci_command_queue *cq, struct bce_vhci_message *req, ++ struct bce_vhci_message *res, unsigned long timeout) ++{ ++ int status; ++ struct bce_vhci_command_queue_completion *c; ++ struct bce_vhci_message creq; ++ c = &cq->completion; ++ ++ if ((status = bce_reserve_submission(cq->mq->sq, &timeout))) ++ return status; ++ ++ spin_lock(&cq->completion_lock); ++ c->result = res; ++ reinit_completion(&c->completion); ++ spin_unlock(&cq->completion_lock); ++ ++ bce_vhci_message_queue_write(cq->mq, req); ++ ++ if (!wait_for_completion_timeout(&c->completion, timeout)) { ++ /* we ran out of time, send cancellation */ ++ pr_debug("bce-vhci: command timed out req=%x\n", req->cmd); ++ if ((status = bce_reserve_submission(cq->mq->sq, &timeout))) ++ return status; ++ ++ creq = *req; ++ creq.cmd |= 0x4000; ++ bce_vhci_message_queue_write(cq->mq, &creq); ++ ++ if (!wait_for_completion_timeout(&c->completion, 1000)) { ++ pr_err("bce-vhci: Possible desync, cmd cancel timed out\n"); ++ ++ spin_lock(&cq->completion_lock); ++ c->result = NULL; ++ spin_unlock(&cq->completion_lock); ++ return -ETIMEDOUT; ++ } ++ if ((res->cmd & ~0x8000) == creq.cmd) ++ return -ETIMEDOUT; ++ /* reply for the previous command most likely arrived */ ++ } ++ ++ if ((res->cmd & ~0x8000) != req->cmd) { ++ pr_err("bce-vhci: Possible desync, cmd reply mismatch req=%x, res=%x\n", req->cmd, res->cmd); ++ return -EIO; ++ } ++ if (res->status == BCE_VHCI_SUCCESS) ++ return 0; ++ return res->status; ++} ++ ++int bce_vhci_command_queue_execute(struct bce_vhci_command_queue *cq, struct bce_vhci_message *req, ++ struct bce_vhci_message *res, unsigned long timeout) ++{ ++ int status; ++ mutex_lock(&cq->mutex); ++ status = __bce_vhci_command_queue_execute(cq, req, res, timeout); ++ mutex_unlock(&cq->mutex); ++ return status; ++} +diff --git a/drivers/staging/apple-bce/vhci/queue.h b/drivers/staging/apple-bce/vhci/queue.h +new file mode 100644 +index 000000000000..111111111111 +--- /dev/null ++++ b/drivers/staging/apple-bce/vhci/queue.h +@@ -0,0 +1,76 @@ ++#ifndef BCE_VHCI_QUEUE_H ++#define BCE_VHCI_QUEUE_H ++ ++#include ++#include "../queue.h" ++ ++#define VHCI_EVENT_QUEUE_EL_COUNT 256 ++#define VHCI_EVENT_PENDING_COUNT 32 ++ ++struct bce_vhci; ++struct bce_vhci_event_queue; ++ ++enum bce_vhci_message_status { ++ BCE_VHCI_SUCCESS = 1, ++ BCE_VHCI_ERROR = 2, ++ BCE_VHCI_USB_PIPE_STALL = 3, ++ BCE_VHCI_ABORT = 4, ++ BCE_VHCI_BAD_ARGUMENT = 5, ++ BCE_VHCI_OVERRUN = 6, ++ BCE_VHCI_INTERNAL_ERROR = 7, ++ BCE_VHCI_NO_POWER = 8, ++ BCE_VHCI_UNSUPPORTED = 9 ++}; ++struct bce_vhci_message { ++ u16 cmd; ++ u16 status; // bce_vhci_message_status ++ u32 param1; ++ u64 param2; ++}; ++ ++struct bce_vhci_message_queue { ++ struct bce_queue_cq *cq; ++ struct bce_queue_sq *sq; ++ struct bce_vhci_message *data; ++ dma_addr_t dma_addr; ++}; ++typedef void (*bce_vhci_event_queue_callback)(struct bce_vhci_event_queue *q, struct bce_vhci_message *msg); ++struct bce_vhci_event_queue { ++ struct bce_vhci *vhci; ++ struct bce_queue_sq *sq; ++ struct bce_vhci_message *data; ++ dma_addr_t dma_addr; ++ bce_vhci_event_queue_callback cb; ++ struct completion queue_empty_completion; ++}; ++struct bce_vhci_command_queue_completion { ++ struct bce_vhci_message *result; ++ struct completion completion; ++}; ++struct bce_vhci_command_queue { ++ struct bce_vhci_message_queue *mq; ++ struct bce_vhci_command_queue_completion completion; ++ struct spinlock completion_lock; ++ struct mutex mutex; ++}; ++ ++int bce_vhci_message_queue_create(struct bce_vhci *vhci, struct bce_vhci_message_queue *ret, const char *name); ++void bce_vhci_message_queue_destroy(struct bce_vhci *vhci, struct bce_vhci_message_queue *q); ++void bce_vhci_message_queue_write(struct bce_vhci_message_queue *q, struct bce_vhci_message *req); ++ ++int __bce_vhci_event_queue_create(struct bce_vhci *vhci, struct bce_vhci_event_queue *ret, const char *name, ++ bce_sq_completion compl); ++int bce_vhci_event_queue_create(struct bce_vhci *vhci, struct bce_vhci_event_queue *ret, const char *name, ++ bce_vhci_event_queue_callback cb); ++void bce_vhci_event_queue_destroy(struct bce_vhci *vhci, struct bce_vhci_event_queue *q); ++void bce_vhci_event_queue_submit_pending(struct bce_vhci_event_queue *q, size_t count); ++void bce_vhci_event_queue_pause(struct bce_vhci_event_queue *q); ++void bce_vhci_event_queue_resume(struct bce_vhci_event_queue *q); ++ ++void bce_vhci_command_queue_create(struct bce_vhci_command_queue *ret, struct bce_vhci_message_queue *mq); ++void bce_vhci_command_queue_destroy(struct bce_vhci_command_queue *cq); ++int bce_vhci_command_queue_execute(struct bce_vhci_command_queue *cq, struct bce_vhci_message *req, ++ struct bce_vhci_message *res, unsigned long timeout); ++void bce_vhci_command_queue_deliver_completion(struct bce_vhci_command_queue *cq, struct bce_vhci_message *msg); ++ ++#endif //BCE_VHCI_QUEUE_H +diff --git a/drivers/staging/apple-bce/vhci/transfer.c b/drivers/staging/apple-bce/vhci/transfer.c +new file mode 100644 +index 000000000000..111111111111 +--- /dev/null ++++ b/drivers/staging/apple-bce/vhci/transfer.c +@@ -0,0 +1,661 @@ ++#include "transfer.h" ++#include "../queue.h" ++#include "vhci.h" ++#include "../apple_bce.h" ++#include ++ ++static void bce_vhci_transfer_queue_completion(struct bce_queue_sq *sq); ++static void bce_vhci_transfer_queue_giveback(struct bce_vhci_transfer_queue *q); ++static void bce_vhci_transfer_queue_remove_pending(struct bce_vhci_transfer_queue *q); ++ ++static int bce_vhci_urb_init(struct bce_vhci_urb *vurb); ++static int bce_vhci_urb_update(struct bce_vhci_urb *urb, struct bce_vhci_message *msg); ++static int bce_vhci_urb_transfer_completion(struct bce_vhci_urb *urb, struct bce_sq_completion_data *c); ++ ++static void bce_vhci_transfer_queue_reset_w(struct work_struct *work); ++ ++void bce_vhci_create_transfer_queue(struct bce_vhci *vhci, struct bce_vhci_transfer_queue *q, ++ struct usb_host_endpoint *endp, bce_vhci_device_t dev_addr, enum dma_data_direction dir) ++{ ++ char name[0x21]; ++ INIT_LIST_HEAD(&q->evq); ++ INIT_LIST_HEAD(&q->giveback_urb_list); ++ spin_lock_init(&q->urb_lock); ++ mutex_init(&q->pause_lock); ++ q->vhci = vhci; ++ q->endp = endp; ++ q->dev_addr = dev_addr; ++ q->endp_addr = (u8) (endp->desc.bEndpointAddress & 0x8F); ++ q->state = BCE_VHCI_ENDPOINT_ACTIVE; ++ q->active = true; ++ q->stalled = false; ++ q->max_active_requests = 1; ++ if (usb_endpoint_type(&endp->desc) == USB_ENDPOINT_XFER_BULK) ++ q->max_active_requests = BCE_VHCI_BULK_MAX_ACTIVE_URBS; ++ q->remaining_active_requests = q->max_active_requests; ++ q->cq = bce_create_cq(vhci->dev, 0x100); ++ INIT_WORK(&q->w_reset, bce_vhci_transfer_queue_reset_w); ++ q->sq_in = NULL; ++ if (dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL) { ++ snprintf(name, sizeof(name), "VHC1-%i-%02x", dev_addr, 0x80 | usb_endpoint_num(&endp->desc)); ++ q->sq_in = bce_create_sq(vhci->dev, q->cq, name, 0x100, DMA_FROM_DEVICE, ++ bce_vhci_transfer_queue_completion, q); ++ } ++ q->sq_out = NULL; ++ if (dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL) { ++ snprintf(name, sizeof(name), "VHC1-%i-%02x", dev_addr, usb_endpoint_num(&endp->desc)); ++ q->sq_out = bce_create_sq(vhci->dev, q->cq, name, 0x100, DMA_TO_DEVICE, ++ bce_vhci_transfer_queue_completion, q); ++ } ++} ++ ++void bce_vhci_destroy_transfer_queue(struct bce_vhci *vhci, struct bce_vhci_transfer_queue *q) ++{ ++ bce_vhci_transfer_queue_giveback(q); ++ bce_vhci_transfer_queue_remove_pending(q); ++ if (q->sq_in) ++ bce_destroy_sq(vhci->dev, q->sq_in); ++ if (q->sq_out) ++ bce_destroy_sq(vhci->dev, q->sq_out); ++ bce_destroy_cq(vhci->dev, q->cq); ++} ++ ++static inline bool bce_vhci_transfer_queue_can_init_urb(struct bce_vhci_transfer_queue *q) ++{ ++ return q->remaining_active_requests > 0; ++} ++ ++static void bce_vhci_transfer_queue_defer_event(struct bce_vhci_transfer_queue *q, struct bce_vhci_message *msg) ++{ ++ struct bce_vhci_list_message *lm; ++ lm = kmalloc(sizeof(struct bce_vhci_list_message), GFP_KERNEL); ++ INIT_LIST_HEAD(&lm->list); ++ lm->msg = *msg; ++ list_add_tail(&lm->list, &q->evq); ++} ++ ++static void bce_vhci_transfer_queue_giveback(struct bce_vhci_transfer_queue *q) ++{ ++ unsigned long flags; ++ struct urb *urb; ++ spin_lock_irqsave(&q->urb_lock, flags); ++ while (!list_empty(&q->giveback_urb_list)) { ++ urb = list_first_entry(&q->giveback_urb_list, struct urb, urb_list); ++ list_del(&urb->urb_list); ++ ++ spin_unlock_irqrestore(&q->urb_lock, flags); ++ usb_hcd_giveback_urb(q->vhci->hcd, urb, urb->status); ++ spin_lock_irqsave(&q->urb_lock, flags); ++ } ++ spin_unlock_irqrestore(&q->urb_lock, flags); ++} ++ ++static void bce_vhci_transfer_queue_init_pending_urbs(struct bce_vhci_transfer_queue *q); ++ ++static void bce_vhci_transfer_queue_deliver_pending(struct bce_vhci_transfer_queue *q) ++{ ++ struct urb *urb; ++ struct bce_vhci_list_message *lm; ++ ++ while (!list_empty(&q->endp->urb_list) && !list_empty(&q->evq)) { ++ urb = list_first_entry(&q->endp->urb_list, struct urb, urb_list); ++ ++ lm = list_first_entry(&q->evq, struct bce_vhci_list_message, list); ++ if (bce_vhci_urb_update(urb->hcpriv, &lm->msg) == -EAGAIN) ++ break; ++ list_del(&lm->list); ++ kfree(lm); ++ } ++ ++ /* some of the URBs could have been completed, so initialize more URBs if possible */ ++ bce_vhci_transfer_queue_init_pending_urbs(q); ++} ++ ++static void bce_vhci_transfer_queue_remove_pending(struct bce_vhci_transfer_queue *q) ++{ ++ unsigned long flags; ++ struct bce_vhci_list_message *lm; ++ spin_lock_irqsave(&q->urb_lock, flags); ++ while (!list_empty(&q->evq)) { ++ lm = list_first_entry(&q->evq, struct bce_vhci_list_message, list); ++ list_del(&lm->list); ++ kfree(lm); ++ } ++ spin_unlock_irqrestore(&q->urb_lock, flags); ++} ++ ++void bce_vhci_transfer_queue_event(struct bce_vhci_transfer_queue *q, struct bce_vhci_message *msg) ++{ ++ unsigned long flags; ++ struct bce_vhci_urb *turb; ++ struct urb *urb; ++ spin_lock_irqsave(&q->urb_lock, flags); ++ bce_vhci_transfer_queue_deliver_pending(q); ++ ++ if (msg->cmd == BCE_VHCI_CMD_TRANSFER_REQUEST && ++ (!list_empty(&q->evq) || list_empty(&q->endp->urb_list))) { ++ bce_vhci_transfer_queue_defer_event(q, msg); ++ goto complete; ++ } ++ if (list_empty(&q->endp->urb_list)) { ++ pr_err("bce-vhci: [%02x] Unexpected transfer queue event\n", q->endp_addr); ++ goto complete; ++ } ++ urb = list_first_entry(&q->endp->urb_list, struct urb, urb_list); ++ turb = urb->hcpriv; ++ if (bce_vhci_urb_update(turb, msg) == -EAGAIN) { ++ bce_vhci_transfer_queue_defer_event(q, msg); ++ } else { ++ bce_vhci_transfer_queue_init_pending_urbs(q); ++ } ++ ++complete: ++ spin_unlock_irqrestore(&q->urb_lock, flags); ++ bce_vhci_transfer_queue_giveback(q); ++} ++ ++static void bce_vhci_transfer_queue_completion(struct bce_queue_sq *sq) ++{ ++ unsigned long flags; ++ struct bce_sq_completion_data *c; ++ struct urb *urb; ++ struct bce_vhci_transfer_queue *q = sq->userdata; ++ spin_lock_irqsave(&q->urb_lock, flags); ++ while ((c = bce_next_completion(sq))) { ++ if (c->status == BCE_COMPLETION_ABORTED) { /* We flushed the queue */ ++ pr_debug("bce-vhci: [%02x] Got an abort completion\n", q->endp_addr); ++ bce_notify_submission_complete(sq); ++ continue; ++ } ++ if (list_empty(&q->endp->urb_list)) { ++ pr_err("bce-vhci: [%02x] Got a completion while no requests are pending\n", q->endp_addr); ++ continue; ++ } ++ pr_debug("bce-vhci: [%02x] Got a transfer queue completion\n", q->endp_addr); ++ urb = list_first_entry(&q->endp->urb_list, struct urb, urb_list); ++ bce_vhci_urb_transfer_completion(urb->hcpriv, c); ++ bce_notify_submission_complete(sq); ++ } ++ bce_vhci_transfer_queue_deliver_pending(q); ++ spin_unlock_irqrestore(&q->urb_lock, flags); ++ bce_vhci_transfer_queue_giveback(q); ++} ++ ++int bce_vhci_transfer_queue_do_pause(struct bce_vhci_transfer_queue *q) ++{ ++ unsigned long flags; ++ int status; ++ u8 endp_addr = (u8) (q->endp->desc.bEndpointAddress & 0x8F); ++ spin_lock_irqsave(&q->urb_lock, flags); ++ q->active = false; ++ spin_unlock_irqrestore(&q->urb_lock, flags); ++ if (q->sq_out) { ++ pr_err("bce-vhci: Not implemented: wait for pending output requests\n"); ++ } ++ bce_vhci_transfer_queue_remove_pending(q); ++ if ((status = bce_vhci_cmd_endpoint_set_state( ++ &q->vhci->cq, q->dev_addr, endp_addr, BCE_VHCI_ENDPOINT_PAUSED, &q->state))) ++ return status; ++ if (q->state != BCE_VHCI_ENDPOINT_PAUSED) ++ return -EINVAL; ++ if (q->sq_in) ++ bce_cmd_flush_memory_queue(q->vhci->dev->cmd_cmdq, (u16) q->sq_in->qid); ++ if (q->sq_out) ++ bce_cmd_flush_memory_queue(q->vhci->dev->cmd_cmdq, (u16) q->sq_out->qid); ++ return 0; ++} ++ ++static void bce_vhci_urb_resume(struct bce_vhci_urb *urb); ++ ++int bce_vhci_transfer_queue_do_resume(struct bce_vhci_transfer_queue *q) ++{ ++ unsigned long flags; ++ int status; ++ struct urb *urb, *urbt; ++ struct bce_vhci_urb *vurb; ++ u8 endp_addr = (u8) (q->endp->desc.bEndpointAddress & 0x8F); ++ if ((status = bce_vhci_cmd_endpoint_set_state( ++ &q->vhci->cq, q->dev_addr, endp_addr, BCE_VHCI_ENDPOINT_ACTIVE, &q->state))) ++ return status; ++ if (q->state != BCE_VHCI_ENDPOINT_ACTIVE) ++ return -EINVAL; ++ spin_lock_irqsave(&q->urb_lock, flags); ++ q->active = true; ++ list_for_each_entry_safe(urb, urbt, &q->endp->urb_list, urb_list) { ++ vurb = urb->hcpriv; ++ if (vurb->state == BCE_VHCI_URB_INIT_PENDING) { ++ if (!bce_vhci_transfer_queue_can_init_urb(q)) ++ break; ++ bce_vhci_urb_init(vurb); ++ } else { ++ bce_vhci_urb_resume(vurb); ++ } ++ } ++ bce_vhci_transfer_queue_deliver_pending(q); ++ spin_unlock_irqrestore(&q->urb_lock, flags); ++ return 0; ++} ++ ++int bce_vhci_transfer_queue_pause(struct bce_vhci_transfer_queue *q, enum bce_vhci_pause_source src) ++{ ++ int ret = 0; ++ mutex_lock(&q->pause_lock); ++ if ((q->paused_by & src) != src) { ++ if (!q->paused_by) ++ ret = bce_vhci_transfer_queue_do_pause(q); ++ if (!ret) ++ q->paused_by |= src; ++ } ++ mutex_unlock(&q->pause_lock); ++ return ret; ++} ++ ++int bce_vhci_transfer_queue_resume(struct bce_vhci_transfer_queue *q, enum bce_vhci_pause_source src) ++{ ++ int ret = 0; ++ mutex_lock(&q->pause_lock); ++ if (q->paused_by & src) { ++ if (!(q->paused_by & ~src)) ++ ret = bce_vhci_transfer_queue_do_resume(q); ++ if (!ret) ++ q->paused_by &= ~src; ++ } ++ mutex_unlock(&q->pause_lock); ++ return ret; ++} ++ ++static void bce_vhci_transfer_queue_reset_w(struct work_struct *work) ++{ ++ unsigned long flags; ++ struct bce_vhci_transfer_queue *q = container_of(work, struct bce_vhci_transfer_queue, w_reset); ++ ++ mutex_lock(&q->pause_lock); ++ spin_lock_irqsave(&q->urb_lock, flags); ++ if (!q->stalled) { ++ spin_unlock_irqrestore(&q->urb_lock, flags); ++ mutex_unlock(&q->pause_lock); ++ return; ++ } ++ q->active = false; ++ spin_unlock_irqrestore(&q->urb_lock, flags); ++ q->paused_by |= BCE_VHCI_PAUSE_INTERNAL_WQ; ++ bce_vhci_transfer_queue_remove_pending(q); ++ if (q->sq_in) ++ bce_cmd_flush_memory_queue(q->vhci->dev->cmd_cmdq, (u16) q->sq_in->qid); ++ if (q->sq_out) ++ bce_cmd_flush_memory_queue(q->vhci->dev->cmd_cmdq, (u16) q->sq_out->qid); ++ bce_vhci_cmd_endpoint_reset(&q->vhci->cq, q->dev_addr, (u8) (q->endp->desc.bEndpointAddress & 0x8F)); ++ spin_lock_irqsave(&q->urb_lock, flags); ++ q->stalled = false; ++ spin_unlock_irqrestore(&q->urb_lock, flags); ++ mutex_unlock(&q->pause_lock); ++ bce_vhci_transfer_queue_resume(q, BCE_VHCI_PAUSE_INTERNAL_WQ); ++} ++ ++void bce_vhci_transfer_queue_request_reset(struct bce_vhci_transfer_queue *q) ++{ ++ queue_work(q->vhci->tq_state_wq, &q->w_reset); ++} ++ ++static void bce_vhci_transfer_queue_init_pending_urbs(struct bce_vhci_transfer_queue *q) ++{ ++ struct urb *urb, *urbt; ++ struct bce_vhci_urb *vurb; ++ list_for_each_entry_safe(urb, urbt, &q->endp->urb_list, urb_list) { ++ vurb = urb->hcpriv; ++ if (!bce_vhci_transfer_queue_can_init_urb(q)) ++ break; ++ if (vurb->state == BCE_VHCI_URB_INIT_PENDING) ++ bce_vhci_urb_init(vurb); ++ } ++} ++ ++ ++ ++static int bce_vhci_urb_data_start(struct bce_vhci_urb *urb, unsigned long *timeout); ++ ++int bce_vhci_urb_create(struct bce_vhci_transfer_queue *q, struct urb *urb) ++{ ++ unsigned long flags; ++ int status = 0; ++ struct bce_vhci_urb *vurb; ++ vurb = kzalloc(sizeof(struct bce_vhci_urb), GFP_KERNEL); ++ urb->hcpriv = vurb; ++ ++ vurb->q = q; ++ vurb->urb = urb; ++ vurb->dir = usb_urb_dir_in(urb) ? DMA_FROM_DEVICE : DMA_TO_DEVICE; ++ vurb->is_control = (usb_endpoint_num(&urb->ep->desc) == 0); ++ ++ spin_lock_irqsave(&q->urb_lock, flags); ++ status = usb_hcd_link_urb_to_ep(q->vhci->hcd, urb); ++ if (status) { ++ spin_unlock_irqrestore(&q->urb_lock, flags); ++ urb->hcpriv = NULL; ++ kfree(vurb); ++ return status; ++ } ++ ++ if (q->active) { ++ if (bce_vhci_transfer_queue_can_init_urb(vurb->q)) ++ status = bce_vhci_urb_init(vurb); ++ else ++ vurb->state = BCE_VHCI_URB_INIT_PENDING; ++ } else { ++ if (q->stalled) ++ bce_vhci_transfer_queue_request_reset(q); ++ vurb->state = BCE_VHCI_URB_INIT_PENDING; ++ } ++ if (status) { ++ usb_hcd_unlink_urb_from_ep(q->vhci->hcd, urb); ++ urb->hcpriv = NULL; ++ kfree(vurb); ++ } else { ++ bce_vhci_transfer_queue_deliver_pending(q); ++ } ++ spin_unlock_irqrestore(&q->urb_lock, flags); ++ pr_debug("bce-vhci: [%02x] URB enqueued (dir = %s, size = %i)\n", q->endp_addr, ++ usb_urb_dir_in(urb) ? "IN" : "OUT", urb->transfer_buffer_length); ++ return status; ++} ++ ++static int bce_vhci_urb_init(struct bce_vhci_urb *vurb) ++{ ++ int status = 0; ++ ++ if (vurb->q->remaining_active_requests == 0) { ++ pr_err("bce-vhci: cannot init request (remaining_active_requests = 0)\n"); ++ return -EINVAL; ++ } ++ ++ if (vurb->is_control) { ++ vurb->state = BCE_VHCI_URB_CONTROL_WAITING_FOR_SETUP_REQUEST; ++ } else { ++ status = bce_vhci_urb_data_start(vurb, NULL); ++ } ++ ++ if (!status) { ++ --vurb->q->remaining_active_requests; ++ } ++ return status; ++} ++ ++static void bce_vhci_urb_complete(struct bce_vhci_urb *urb, int status) ++{ ++ struct bce_vhci_transfer_queue *q = urb->q; ++ struct bce_vhci *vhci = q->vhci; ++ struct urb *real_urb = urb->urb; ++ pr_debug("bce-vhci: [%02x] URB complete %i\n", q->endp_addr, status); ++ usb_hcd_unlink_urb_from_ep(vhci->hcd, real_urb); ++ real_urb->hcpriv = NULL; ++ real_urb->status = status; ++ if (urb->state != BCE_VHCI_URB_INIT_PENDING) ++ ++urb->q->remaining_active_requests; ++ kfree(urb); ++ list_add_tail(&real_urb->urb_list, &q->giveback_urb_list); ++} ++ ++int bce_vhci_urb_request_cancel(struct bce_vhci_transfer_queue *q, struct urb *urb, int status) ++{ ++ struct bce_vhci_urb *vurb; ++ unsigned long flags; ++ int ret; ++ ++ spin_lock_irqsave(&q->urb_lock, flags); ++ if ((ret = usb_hcd_check_unlink_urb(q->vhci->hcd, urb, status))) { ++ spin_unlock_irqrestore(&q->urb_lock, flags); ++ return ret; ++ } ++ ++ vurb = urb->hcpriv; ++ /* If the URB wasn't posted to the device yet, we can still remove it on the host without pausing the queue. */ ++ if (vurb->state != BCE_VHCI_URB_INIT_PENDING) { ++ pr_debug("bce-vhci: [%02x] Cancelling URB\n", q->endp_addr); ++ ++ spin_unlock_irqrestore(&q->urb_lock, flags); ++ bce_vhci_transfer_queue_pause(q, BCE_VHCI_PAUSE_INTERNAL_WQ); ++ spin_lock_irqsave(&q->urb_lock, flags); ++ ++ ++q->remaining_active_requests; ++ } ++ ++ usb_hcd_unlink_urb_from_ep(q->vhci->hcd, urb); ++ ++ spin_unlock_irqrestore(&q->urb_lock, flags); ++ ++ usb_hcd_giveback_urb(q->vhci->hcd, urb, status); ++ ++ if (vurb->state != BCE_VHCI_URB_INIT_PENDING) ++ bce_vhci_transfer_queue_resume(q, BCE_VHCI_PAUSE_INTERNAL_WQ); ++ ++ kfree(vurb); ++ ++ return 0; ++} ++ ++static int bce_vhci_urb_data_transfer_in(struct bce_vhci_urb *urb, unsigned long *timeout) ++{ ++ struct bce_vhci_message msg; ++ struct bce_qe_submission *s; ++ u32 tr_len; ++ int reservation1, reservation2 = -EFAULT; ++ ++ pr_debug("bce-vhci: [%02x] DMA from device %llx %x\n", urb->q->endp_addr, ++ (u64) urb->urb->transfer_dma, urb->urb->transfer_buffer_length); ++ ++ /* Reserve both a message and a submission, so we don't run into issues later. */ ++ reservation1 = bce_reserve_submission(urb->q->vhci->msg_asynchronous.sq, timeout); ++ if (!reservation1) ++ reservation2 = bce_reserve_submission(urb->q->sq_in, timeout); ++ if (reservation1 || reservation2) { ++ pr_err("bce-vhci: Failed to reserve a submission for URB data transfer\n"); ++ if (!reservation1) ++ bce_cancel_submission_reservation(urb->q->vhci->msg_asynchronous.sq); ++ return -ENOMEM; ++ } ++ ++ urb->send_offset = urb->receive_offset; ++ ++ tr_len = urb->urb->transfer_buffer_length - urb->send_offset; ++ ++ spin_lock(&urb->q->vhci->msg_asynchronous_lock); ++ msg.cmd = BCE_VHCI_CMD_TRANSFER_REQUEST; ++ msg.status = 0; ++ msg.param1 = ((urb->urb->ep->desc.bEndpointAddress & 0x8Fu) << 8) | urb->q->dev_addr; ++ msg.param2 = tr_len; ++ bce_vhci_message_queue_write(&urb->q->vhci->msg_asynchronous, &msg); ++ spin_unlock(&urb->q->vhci->msg_asynchronous_lock); ++ ++ s = bce_next_submission(urb->q->sq_in); ++ bce_set_submission_single(s, urb->urb->transfer_dma + urb->send_offset, tr_len); ++ bce_submit_to_device(urb->q->sq_in); ++ ++ urb->state = BCE_VHCI_URB_WAITING_FOR_COMPLETION; ++ return 0; ++} ++ ++static int bce_vhci_urb_data_start(struct bce_vhci_urb *urb, unsigned long *timeout) ++{ ++ if (urb->dir == DMA_TO_DEVICE) { ++ if (urb->urb->transfer_buffer_length > 0) ++ urb->state = BCE_VHCI_URB_WAITING_FOR_TRANSFER_REQUEST; ++ else ++ urb->state = BCE_VHCI_URB_DATA_TRANSFER_COMPLETE; ++ return 0; ++ } else { ++ return bce_vhci_urb_data_transfer_in(urb, timeout); ++ } ++} ++ ++static int bce_vhci_urb_send_out_data(struct bce_vhci_urb *urb, dma_addr_t addr, size_t size) ++{ ++ struct bce_qe_submission *s; ++ unsigned long timeout = 0; ++ if (bce_reserve_submission(urb->q->sq_out, &timeout)) { ++ pr_err("bce-vhci: Failed to reserve a submission for URB data transfer\n"); ++ return -EPIPE; ++ } ++ ++ pr_debug("bce-vhci: [%02x] DMA to device %llx %lx\n", urb->q->endp_addr, (u64) addr, size); ++ ++ s = bce_next_submission(urb->q->sq_out); ++ bce_set_submission_single(s, addr, size); ++ bce_submit_to_device(urb->q->sq_out); ++ return 0; ++} ++ ++static int bce_vhci_urb_data_update(struct bce_vhci_urb *urb, struct bce_vhci_message *msg) ++{ ++ u32 tr_len; ++ int status; ++ if (urb->state == BCE_VHCI_URB_WAITING_FOR_TRANSFER_REQUEST) { ++ if (msg->cmd == BCE_VHCI_CMD_TRANSFER_REQUEST) { ++ tr_len = min(urb->urb->transfer_buffer_length - urb->send_offset, (u32) msg->param2); ++ if ((status = bce_vhci_urb_send_out_data(urb, urb->urb->transfer_dma + urb->send_offset, tr_len))) ++ return status; ++ urb->send_offset += tr_len; ++ urb->state = BCE_VHCI_URB_WAITING_FOR_COMPLETION; ++ return 0; ++ } ++ } ++ ++ /* 0x1000 in out queues aren't really unexpected */ ++ if (msg->cmd == BCE_VHCI_CMD_TRANSFER_REQUEST && urb->q->sq_out != NULL) ++ return -EAGAIN; ++ pr_err("bce-vhci: [%02x] %s URB unexpected message (state = %x, msg: %x %x %x %llx)\n", ++ urb->q->endp_addr, (urb->is_control ? "Control (data update)" : "Data"), urb->state, ++ msg->cmd, msg->status, msg->param1, msg->param2); ++ return -EAGAIN; ++} ++ ++static int bce_vhci_urb_data_transfer_completion(struct bce_vhci_urb *urb, struct bce_sq_completion_data *c) ++{ ++ if (urb->state == BCE_VHCI_URB_WAITING_FOR_COMPLETION) { ++ urb->receive_offset += c->data_size; ++ if (urb->dir == DMA_FROM_DEVICE || urb->receive_offset >= urb->urb->transfer_buffer_length) { ++ urb->urb->actual_length = (u32) urb->receive_offset; ++ urb->state = BCE_VHCI_URB_DATA_TRANSFER_COMPLETE; ++ if (!urb->is_control) { ++ bce_vhci_urb_complete(urb, 0); ++ return -ENOENT; ++ } ++ } ++ } else { ++ pr_err("bce-vhci: [%02x] Data URB unexpected completion\n", urb->q->endp_addr); ++ } ++ return 0; ++} ++ ++ ++static int bce_vhci_urb_control_check_status(struct bce_vhci_urb *urb) ++{ ++ struct bce_vhci_transfer_queue *q = urb->q; ++ if (urb->received_status == 0) ++ return 0; ++ if (urb->state == BCE_VHCI_URB_DATA_TRANSFER_COMPLETE || ++ (urb->received_status != BCE_VHCI_SUCCESS && urb->state != BCE_VHCI_URB_CONTROL_WAITING_FOR_SETUP_REQUEST && ++ urb->state != BCE_VHCI_URB_CONTROL_WAITING_FOR_SETUP_COMPLETION)) { ++ urb->state = BCE_VHCI_URB_CONTROL_COMPLETE; ++ if (urb->received_status != BCE_VHCI_SUCCESS) { ++ pr_err("bce-vhci: [%02x] URB failed: %x\n", urb->q->endp_addr, urb->received_status); ++ urb->q->active = false; ++ urb->q->stalled = true; ++ bce_vhci_urb_complete(urb, -EPIPE); ++ if (!list_empty(&q->endp->urb_list)) ++ bce_vhci_transfer_queue_request_reset(q); ++ return -ENOENT; ++ } ++ bce_vhci_urb_complete(urb, 0); ++ return -ENOENT; ++ } ++ return 0; ++} ++ ++static int bce_vhci_urb_control_update(struct bce_vhci_urb *urb, struct bce_vhci_message *msg) ++{ ++ int status; ++ if (msg->cmd == BCE_VHCI_CMD_CONTROL_TRANSFER_STATUS) { ++ urb->received_status = msg->status; ++ return bce_vhci_urb_control_check_status(urb); ++ } ++ ++ if (urb->state == BCE_VHCI_URB_CONTROL_WAITING_FOR_SETUP_REQUEST) { ++ if (msg->cmd == BCE_VHCI_CMD_TRANSFER_REQUEST) { ++ if (bce_vhci_urb_send_out_data(urb, urb->urb->setup_dma, sizeof(struct usb_ctrlrequest))) { ++ pr_err("bce-vhci: [%02x] Failed to start URB setup transfer\n", urb->q->endp_addr); ++ return 0; /* TODO: fail the URB? */ ++ } ++ urb->state = BCE_VHCI_URB_CONTROL_WAITING_FOR_SETUP_COMPLETION; ++ pr_debug("bce-vhci: [%02x] Sent setup %llx\n", urb->q->endp_addr, urb->urb->setup_dma); ++ return 0; ++ } ++ } else if (urb->state == BCE_VHCI_URB_WAITING_FOR_TRANSFER_REQUEST || ++ urb->state == BCE_VHCI_URB_WAITING_FOR_COMPLETION) { ++ if ((status = bce_vhci_urb_data_update(urb, msg))) ++ return status; ++ return bce_vhci_urb_control_check_status(urb); ++ } ++ ++ /* 0x1000 in out queues aren't really unexpected */ ++ if (msg->cmd == BCE_VHCI_CMD_TRANSFER_REQUEST && urb->q->sq_out != NULL) ++ return -EAGAIN; ++ pr_err("bce-vhci: [%02x] Control URB unexpected message (state = %x, msg: %x %x %x %llx)\n", urb->q->endp_addr, ++ urb->state, msg->cmd, msg->status, msg->param1, msg->param2); ++ return -EAGAIN; ++} ++ ++static int bce_vhci_urb_control_transfer_completion(struct bce_vhci_urb *urb, struct bce_sq_completion_data *c) ++{ ++ int status; ++ unsigned long timeout; ++ ++ if (urb->state == BCE_VHCI_URB_CONTROL_WAITING_FOR_SETUP_COMPLETION) { ++ if (c->data_size != sizeof(struct usb_ctrlrequest)) ++ pr_err("bce-vhci: [%02x] transfer complete data size mistmatch for usb_ctrlrequest (%llx instead of %lx)\n", ++ urb->q->endp_addr, c->data_size, sizeof(struct usb_ctrlrequest)); ++ ++ timeout = 1000; ++ status = bce_vhci_urb_data_start(urb, &timeout); ++ if (status) { ++ bce_vhci_urb_complete(urb, status); ++ return -ENOENT; ++ } ++ return 0; ++ } else if (urb->state == BCE_VHCI_URB_WAITING_FOR_TRANSFER_REQUEST || ++ urb->state == BCE_VHCI_URB_WAITING_FOR_COMPLETION) { ++ if ((status = bce_vhci_urb_data_transfer_completion(urb, c))) ++ return status; ++ return bce_vhci_urb_control_check_status(urb); ++ } else { ++ pr_err("bce-vhci: [%02x] Control URB unexpected completion (state = %x)\n", urb->q->endp_addr, urb->state); ++ } ++ return 0; ++} ++ ++static int bce_vhci_urb_update(struct bce_vhci_urb *urb, struct bce_vhci_message *msg) ++{ ++ if (urb->state == BCE_VHCI_URB_INIT_PENDING) ++ return -EAGAIN; ++ if (urb->is_control) ++ return bce_vhci_urb_control_update(urb, msg); ++ else ++ return bce_vhci_urb_data_update(urb, msg); ++} ++ ++static int bce_vhci_urb_transfer_completion(struct bce_vhci_urb *urb, struct bce_sq_completion_data *c) ++{ ++ if (urb->is_control) ++ return bce_vhci_urb_control_transfer_completion(urb, c); ++ else ++ return bce_vhci_urb_data_transfer_completion(urb, c); ++} ++ ++static void bce_vhci_urb_resume(struct bce_vhci_urb *urb) ++{ ++ int status = 0; ++ if (urb->state == BCE_VHCI_URB_WAITING_FOR_COMPLETION) { ++ status = bce_vhci_urb_data_transfer_in(urb, NULL); ++ } ++ if (status) ++ bce_vhci_urb_complete(urb, status); ++} +diff --git a/drivers/staging/apple-bce/vhci/transfer.h b/drivers/staging/apple-bce/vhci/transfer.h +new file mode 100644 +index 000000000000..111111111111 +--- /dev/null ++++ b/drivers/staging/apple-bce/vhci/transfer.h +@@ -0,0 +1,73 @@ ++#ifndef BCEDRIVER_TRANSFER_H ++#define BCEDRIVER_TRANSFER_H ++ ++#include ++#include "queue.h" ++#include "command.h" ++#include "../queue.h" ++ ++struct bce_vhci_list_message { ++ struct list_head list; ++ struct bce_vhci_message msg; ++}; ++enum bce_vhci_pause_source { ++ BCE_VHCI_PAUSE_INTERNAL_WQ = 1, ++ BCE_VHCI_PAUSE_FIRMWARE = 2, ++ BCE_VHCI_PAUSE_SUSPEND = 4, ++ BCE_VHCI_PAUSE_SHUTDOWN = 8 ++}; ++struct bce_vhci_transfer_queue { ++ struct bce_vhci *vhci; ++ struct usb_host_endpoint *endp; ++ enum bce_vhci_endpoint_state state; ++ u32 max_active_requests, remaining_active_requests; ++ bool active, stalled; ++ u32 paused_by; ++ bce_vhci_device_t dev_addr; ++ u8 endp_addr; ++ struct bce_queue_cq *cq; ++ struct bce_queue_sq *sq_in; ++ struct bce_queue_sq *sq_out; ++ struct list_head evq; ++ struct spinlock urb_lock; ++ struct mutex pause_lock; ++ struct list_head giveback_urb_list; ++ ++ struct work_struct w_reset; ++}; ++enum bce_vhci_urb_state { ++ BCE_VHCI_URB_INIT_PENDING, ++ ++ BCE_VHCI_URB_WAITING_FOR_TRANSFER_REQUEST, ++ BCE_VHCI_URB_WAITING_FOR_COMPLETION, ++ BCE_VHCI_URB_DATA_TRANSFER_COMPLETE, ++ ++ BCE_VHCI_URB_CONTROL_WAITING_FOR_SETUP_REQUEST, ++ BCE_VHCI_URB_CONTROL_WAITING_FOR_SETUP_COMPLETION, ++ BCE_VHCI_URB_CONTROL_COMPLETE ++}; ++struct bce_vhci_urb { ++ struct urb *urb; ++ struct bce_vhci_transfer_queue *q; ++ enum dma_data_direction dir; ++ bool is_control; ++ enum bce_vhci_urb_state state; ++ int received_status; ++ u32 send_offset; ++ u32 receive_offset; ++}; ++ ++void bce_vhci_create_transfer_queue(struct bce_vhci *vhci, struct bce_vhci_transfer_queue *q, ++ struct usb_host_endpoint *endp, bce_vhci_device_t dev_addr, enum dma_data_direction dir); ++void bce_vhci_destroy_transfer_queue(struct bce_vhci *vhci, struct bce_vhci_transfer_queue *q); ++void bce_vhci_transfer_queue_event(struct bce_vhci_transfer_queue *q, struct bce_vhci_message *msg); ++int bce_vhci_transfer_queue_do_pause(struct bce_vhci_transfer_queue *q); ++int bce_vhci_transfer_queue_do_resume(struct bce_vhci_transfer_queue *q); ++int bce_vhci_transfer_queue_pause(struct bce_vhci_transfer_queue *q, enum bce_vhci_pause_source src); ++int bce_vhci_transfer_queue_resume(struct bce_vhci_transfer_queue *q, enum bce_vhci_pause_source src); ++void bce_vhci_transfer_queue_request_reset(struct bce_vhci_transfer_queue *q); ++ ++int bce_vhci_urb_create(struct bce_vhci_transfer_queue *q, struct urb *urb); ++int bce_vhci_urb_request_cancel(struct bce_vhci_transfer_queue *q, struct urb *urb, int status); ++ ++#endif //BCEDRIVER_TRANSFER_H +diff --git a/drivers/staging/apple-bce/vhci/vhci.c b/drivers/staging/apple-bce/vhci/vhci.c +new file mode 100644 +index 000000000000..111111111111 +--- /dev/null ++++ b/drivers/staging/apple-bce/vhci/vhci.c +@@ -0,0 +1,763 @@ ++#include "vhci.h" ++#include "../apple_bce.h" ++#include "command.h" ++#include ++#include ++#include ++#include ++ ++static dev_t bce_vhci_chrdev; ++static struct class *bce_vhci_class; ++static const struct hc_driver bce_vhci_driver; ++static u16 bce_vhci_port_mask = U16_MAX; ++ ++static int bce_vhci_create_event_queues(struct bce_vhci *vhci); ++static void bce_vhci_destroy_event_queues(struct bce_vhci *vhci); ++static int bce_vhci_create_message_queues(struct bce_vhci *vhci); ++static void bce_vhci_destroy_message_queues(struct bce_vhci *vhci); ++static void bce_vhci_handle_firmware_events_w(struct work_struct *ws); ++static void bce_vhci_firmware_event_completion(struct bce_queue_sq *sq); ++ ++int bce_vhci_create(struct apple_bce_device *dev, struct bce_vhci *vhci) ++{ ++ int status; ++ ++ spin_lock_init(&vhci->hcd_spinlock); ++ ++ vhci->dev = dev; ++ ++ vhci->vdevt = bce_vhci_chrdev; ++ vhci->vdev = device_create(bce_vhci_class, dev->dev, vhci->vdevt, NULL, "bce-vhci"); ++ if (IS_ERR_OR_NULL(vhci->vdev)) { ++ status = PTR_ERR(vhci->vdev); ++ goto fail_dev; ++ } ++ ++ if ((status = bce_vhci_create_message_queues(vhci))) ++ goto fail_mq; ++ if ((status = bce_vhci_create_event_queues(vhci))) ++ goto fail_eq; ++ ++ vhci->tq_state_wq = alloc_ordered_workqueue("bce-vhci-tq-state", 0); ++ INIT_WORK(&vhci->w_fw_events, bce_vhci_handle_firmware_events_w); ++ ++ vhci->hcd = usb_create_hcd(&bce_vhci_driver, vhci->vdev, "bce-vhci"); ++ if (!vhci->hcd) { ++ status = -ENOMEM; ++ goto fail_hcd; ++ } ++ vhci->hcd->self.sysdev = &dev->pci->dev; ++#if LINUX_VERSION_CODE < KERNEL_VERSION(5,4,0) ++ vhci->hcd->self.uses_dma = 1; ++#endif ++ *((struct bce_vhci **) vhci->hcd->hcd_priv) = vhci; ++ vhci->hcd->speed = HCD_USB2; ++ ++ if ((status = usb_add_hcd(vhci->hcd, 0, 0))) ++ goto fail_hcd; ++ ++ return 0; ++ ++fail_hcd: ++ bce_vhci_destroy_event_queues(vhci); ++fail_eq: ++ bce_vhci_destroy_message_queues(vhci); ++fail_mq: ++ device_destroy(bce_vhci_class, vhci->vdevt); ++fail_dev: ++ if (!status) ++ status = -EINVAL; ++ return status; ++} ++ ++void bce_vhci_destroy(struct bce_vhci *vhci) ++{ ++ usb_remove_hcd(vhci->hcd); ++ bce_vhci_destroy_event_queues(vhci); ++ bce_vhci_destroy_message_queues(vhci); ++ device_destroy(bce_vhci_class, vhci->vdevt); ++} ++ ++struct bce_vhci *bce_vhci_from_hcd(struct usb_hcd *hcd) ++{ ++ return *((struct bce_vhci **) hcd->hcd_priv); ++} ++ ++int bce_vhci_start(struct usb_hcd *hcd) ++{ ++ struct bce_vhci *vhci = bce_vhci_from_hcd(hcd); ++ int status; ++ u16 port_mask = 0; ++ bce_vhci_port_t port_no = 0; ++ if ((status = bce_vhci_cmd_controller_enable(&vhci->cq, 1, &port_mask))) ++ return status; ++ vhci->port_mask = port_mask; ++ vhci->port_power_mask = 0; ++ if ((status = bce_vhci_cmd_controller_start(&vhci->cq))) ++ return status; ++ port_mask = vhci->port_mask; ++ while (port_mask) { ++ port_no += 1; ++ port_mask >>= 1; ++ } ++ vhci->port_count = port_no; ++ return 0; ++} ++ ++void bce_vhci_stop(struct usb_hcd *hcd) ++{ ++ struct bce_vhci *vhci = bce_vhci_from_hcd(hcd); ++ bce_vhci_cmd_controller_disable(&vhci->cq); ++} ++ ++static int bce_vhci_hub_status_data(struct usb_hcd *hcd, char *buf) ++{ ++ return 0; ++} ++ ++static int bce_vhci_reset_device(struct bce_vhci *vhci, int index, u16 timeout); ++ ++static int bce_vhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue, u16 wIndex, char *buf, u16 wLength) ++{ ++ struct bce_vhci *vhci = bce_vhci_from_hcd(hcd); ++ int status; ++ struct usb_hub_descriptor *hd; ++ struct usb_hub_status *hs; ++ struct usb_port_status *ps; ++ u32 port_status; ++ // pr_info("bce-vhci: bce_vhci_hub_control %x %i %i [bufl=%i]\n", typeReq, wValue, wIndex, wLength); ++ if (typeReq == GetHubDescriptor && wLength >= sizeof(struct usb_hub_descriptor)) { ++ hd = (struct usb_hub_descriptor *) buf; ++ memset(hd, 0, sizeof(*hd)); ++ hd->bDescLength = sizeof(struct usb_hub_descriptor); ++ hd->bDescriptorType = USB_DT_HUB; ++ hd->bNbrPorts = (u8) vhci->port_count; ++ hd->wHubCharacteristics = HUB_CHAR_INDV_PORT_LPSM | HUB_CHAR_INDV_PORT_OCPM; ++ hd->bPwrOn2PwrGood = 0; ++ hd->bHubContrCurrent = 0; ++ return 0; ++ } else if (typeReq == GetHubStatus && wLength >= sizeof(struct usb_hub_status)) { ++ hs = (struct usb_hub_status *) buf; ++ memset(hs, 0, sizeof(*hs)); ++ hs->wHubStatus = 0; ++ hs->wHubChange = 0; ++ return 0; ++ } else if (typeReq == GetPortStatus && wLength >= 4 /* usb 2.0 */) { ++ ps = (struct usb_port_status *) buf; ++ ps->wPortStatus = 0; ++ ps->wPortChange = 0; ++ ++ if (vhci->port_power_mask & BIT(wIndex)) ++ ps->wPortStatus |= USB_PORT_STAT_POWER; ++ ++ if (!(bce_vhci_port_mask & BIT(wIndex))) ++ return 0; ++ ++ if ((status = bce_vhci_cmd_port_status(&vhci->cq, (u8) wIndex, 0, &port_status))) ++ return status; ++ ++ if (port_status & 16) ++ ps->wPortStatus |= USB_PORT_STAT_ENABLE | USB_PORT_STAT_HIGH_SPEED; ++ if (port_status & 4) ++ ps->wPortStatus |= USB_PORT_STAT_CONNECTION; ++ if (port_status & 2) ++ ps->wPortStatus |= USB_PORT_STAT_OVERCURRENT; ++ if (port_status & 8) ++ ps->wPortStatus |= USB_PORT_STAT_RESET; ++ if (port_status & 0x60) ++ ps->wPortStatus |= USB_PORT_STAT_SUSPEND; ++ ++ if (port_status & 0x40000) ++ ps->wPortChange |= USB_PORT_STAT_C_CONNECTION; ++ ++ pr_debug("bce-vhci: Translated status %x to %x:%x\n", port_status, ps->wPortStatus, ps->wPortChange); ++ return 0; ++ } else if (typeReq == SetPortFeature) { ++ if (wValue == USB_PORT_FEAT_POWER) { ++ status = bce_vhci_cmd_port_power_on(&vhci->cq, (u8) wIndex); ++ /* As far as I am aware, power status is not part of the port status so store it separately */ ++ if (!status) ++ vhci->port_power_mask |= BIT(wIndex); ++ return status; ++ } ++ if (wValue == USB_PORT_FEAT_RESET) { ++ return bce_vhci_reset_device(vhci, wIndex, wValue); ++ } ++ if (wValue == USB_PORT_FEAT_SUSPEND) { ++ /* TODO: Am I supposed to also suspend the endpoints? */ ++ pr_debug("bce-vhci: Suspending port %i\n", wIndex); ++ return bce_vhci_cmd_port_suspend(&vhci->cq, (u8) wIndex); ++ } ++ } else if (typeReq == ClearPortFeature) { ++ if (wValue == USB_PORT_FEAT_ENABLE) ++ return bce_vhci_cmd_port_disable(&vhci->cq, (u8) wIndex); ++ if (wValue == USB_PORT_FEAT_POWER) { ++ status = bce_vhci_cmd_port_power_off(&vhci->cq, (u8) wIndex); ++ if (!status) ++ vhci->port_power_mask &= ~BIT(wIndex); ++ return status; ++ } ++ if (wValue == USB_PORT_FEAT_C_CONNECTION) ++ return bce_vhci_cmd_port_status(&vhci->cq, (u8) wIndex, 0x40000, &port_status); ++ if (wValue == USB_PORT_FEAT_C_RESET) { /* I don't think I can transfer it in any way */ ++ return 0; ++ } ++ if (wValue == USB_PORT_FEAT_SUSPEND) { ++ pr_debug("bce-vhci: Resuming port %i\n", wIndex); ++ return bce_vhci_cmd_port_resume(&vhci->cq, (u8) wIndex); ++ } ++ } ++ pr_err("bce-vhci: bce_vhci_hub_control unhandled request: %x %i %i [bufl=%i]\n", typeReq, wValue, wIndex, wLength); ++ dump_stack(); ++ return -EIO; ++} ++ ++static int bce_vhci_enable_device(struct usb_hcd *hcd, struct usb_device *udev) ++{ ++ struct bce_vhci *vhci = bce_vhci_from_hcd(hcd); ++ struct bce_vhci_device *vdev; ++ bce_vhci_device_t devid; ++ pr_info("bce_vhci_enable_device\n"); ++ ++ if (vhci->port_to_device[udev->portnum]) ++ return 0; ++ ++ /* We need to early address the device */ ++ if (bce_vhci_cmd_device_create(&vhci->cq, udev->portnum, &devid)) ++ return -EIO; ++ ++ pr_info("bce_vhci_cmd_device_create %i -> %i\n", udev->portnum, devid); ++ ++ vdev = kzalloc(sizeof(struct bce_vhci_device), GFP_KERNEL); ++ vhci->port_to_device[udev->portnum] = devid; ++ vhci->devices[devid] = vdev; ++ ++ bce_vhci_create_transfer_queue(vhci, &vdev->tq[0], &udev->ep0, devid, DMA_BIDIRECTIONAL); ++ udev->ep0.hcpriv = &vdev->tq[0]; ++ vdev->tq_mask |= BIT(0); ++ ++ bce_vhci_cmd_endpoint_create(&vhci->cq, devid, &udev->ep0.desc); ++ return 0; ++} ++ ++#if LINUX_VERSION_CODE < KERNEL_VERSION(6,8,0) ++static int bce_vhci_address_device(struct usb_hcd *hcd, struct usb_device *udev) ++#else ++static int bce_vhci_address_device(struct usb_hcd *hcd, struct usb_device *udev, unsigned int timeout_ms) //TODO: follow timeout ++#endif ++{ ++ /* This is the same as enable_device, but instead in the old scheme */ ++ return bce_vhci_enable_device(hcd, udev); ++} ++ ++static void bce_vhci_free_device(struct usb_hcd *hcd, struct usb_device *udev) ++{ ++ struct bce_vhci *vhci = bce_vhci_from_hcd(hcd); ++ int i; ++ bce_vhci_device_t devid; ++ struct bce_vhci_device *dev; ++ pr_info("bce_vhci_free_device %i\n", udev->portnum); ++ if (!vhci->port_to_device[udev->portnum]) ++ return; ++ devid = vhci->port_to_device[udev->portnum]; ++ dev = vhci->devices[devid]; ++ for (i = 0; i < 32; i++) { ++ if (dev->tq_mask & BIT(i)) { ++ bce_vhci_transfer_queue_pause(&dev->tq[i], BCE_VHCI_PAUSE_SHUTDOWN); ++ bce_vhci_cmd_endpoint_destroy(&vhci->cq, devid, (u8) i); ++ bce_vhci_destroy_transfer_queue(vhci, &dev->tq[i]); ++ } ++ } ++ vhci->devices[devid] = NULL; ++ vhci->port_to_device[udev->portnum] = 0; ++ bce_vhci_cmd_device_destroy(&vhci->cq, devid); ++ kfree(dev); ++} ++ ++static int bce_vhci_reset_device(struct bce_vhci *vhci, int index, u16 timeout) ++{ ++ struct bce_vhci_device *dev = NULL; ++ bce_vhci_device_t devid; ++ int i; ++ int status; ++ enum dma_data_direction dir; ++ pr_info("bce_vhci_reset_device %i\n", index); ++ ++ devid = vhci->port_to_device[index]; ++ if (devid) { ++ dev = vhci->devices[devid]; ++ ++ for (i = 0; i < 32; i++) { ++ if (dev->tq_mask & BIT(i)) { ++ bce_vhci_transfer_queue_pause(&dev->tq[i], BCE_VHCI_PAUSE_SHUTDOWN); ++ bce_vhci_cmd_endpoint_destroy(&vhci->cq, devid, (u8) i); ++ bce_vhci_destroy_transfer_queue(vhci, &dev->tq[i]); ++ } ++ } ++ vhci->devices[devid] = NULL; ++ vhci->port_to_device[index] = 0; ++ bce_vhci_cmd_device_destroy(&vhci->cq, devid); ++ } ++ status = bce_vhci_cmd_port_reset(&vhci->cq, (u8) index, timeout); ++ ++ if (dev) { ++ if ((status = bce_vhci_cmd_device_create(&vhci->cq, index, &devid))) ++ return status; ++ vhci->devices[devid] = dev; ++ vhci->port_to_device[index] = devid; ++ ++ for (i = 0; i < 32; i++) { ++ if (dev->tq_mask & BIT(i)) { ++ dir = usb_endpoint_dir_in(&dev->tq[i].endp->desc) ? DMA_FROM_DEVICE : DMA_TO_DEVICE; ++ if (i == 0) ++ dir = DMA_BIDIRECTIONAL; ++ bce_vhci_create_transfer_queue(vhci, &dev->tq[i], dev->tq[i].endp, devid, dir); ++ bce_vhci_cmd_endpoint_create(&vhci->cq, devid, &dev->tq[i].endp->desc); ++ } ++ } ++ } ++ ++ return status; ++} ++ ++static int bce_vhci_check_bandwidth(struct usb_hcd *hcd, struct usb_device *udev) ++{ ++ return 0; ++} ++ ++static int bce_vhci_get_frame_number(struct usb_hcd *hcd) ++{ ++ return 0; ++} ++ ++static int bce_vhci_bus_suspend(struct usb_hcd *hcd) ++{ ++ int i, j; ++ int status; ++ struct bce_vhci *vhci = bce_vhci_from_hcd(hcd); ++ pr_info("bce_vhci: suspend started\n"); ++ ++ pr_info("bce_vhci: suspend endpoints\n"); ++ for (i = 0; i < 16; i++) { ++ if (!vhci->port_to_device[i]) ++ continue; ++ for (j = 0; j < 32; j++) { ++ if (!(vhci->devices[vhci->port_to_device[i]]->tq_mask & BIT(j))) ++ continue; ++ bce_vhci_transfer_queue_pause(&vhci->devices[vhci->port_to_device[i]]->tq[j], ++ BCE_VHCI_PAUSE_SUSPEND); ++ } ++ } ++ ++ pr_info("bce_vhci: suspend ports\n"); ++ for (i = 0; i < 16; i++) { ++ if (!vhci->port_to_device[i]) ++ continue; ++ bce_vhci_cmd_port_suspend(&vhci->cq, i); ++ } ++ pr_info("bce_vhci: suspend controller\n"); ++ if ((status = bce_vhci_cmd_controller_pause(&vhci->cq))) ++ return status; ++ ++ bce_vhci_event_queue_pause(&vhci->ev_commands); ++ bce_vhci_event_queue_pause(&vhci->ev_system); ++ bce_vhci_event_queue_pause(&vhci->ev_isochronous); ++ bce_vhci_event_queue_pause(&vhci->ev_interrupt); ++ bce_vhci_event_queue_pause(&vhci->ev_asynchronous); ++ pr_info("bce_vhci: suspend done\n"); ++ return 0; ++} ++ ++static int bce_vhci_bus_resume(struct usb_hcd *hcd) ++{ ++ int i, j; ++ int status; ++ struct bce_vhci *vhci = bce_vhci_from_hcd(hcd); ++ pr_info("bce_vhci: resume started\n"); ++ ++ bce_vhci_event_queue_resume(&vhci->ev_system); ++ bce_vhci_event_queue_resume(&vhci->ev_isochronous); ++ bce_vhci_event_queue_resume(&vhci->ev_interrupt); ++ bce_vhci_event_queue_resume(&vhci->ev_asynchronous); ++ bce_vhci_event_queue_resume(&vhci->ev_commands); ++ ++ pr_info("bce_vhci: resume controller\n"); ++ if ((status = bce_vhci_cmd_controller_start(&vhci->cq))) ++ return status; ++ ++ pr_info("bce_vhci: resume ports\n"); ++ for (i = 0; i < 16; i++) { ++ if (!vhci->port_to_device[i]) ++ continue; ++ bce_vhci_cmd_port_resume(&vhci->cq, i); ++ } ++ pr_info("bce_vhci: resume endpoints\n"); ++ for (i = 0; i < 16; i++) { ++ if (!vhci->port_to_device[i]) ++ continue; ++ for (j = 0; j < 32; j++) { ++ if (!(vhci->devices[vhci->port_to_device[i]]->tq_mask & BIT(j))) ++ continue; ++ bce_vhci_transfer_queue_resume(&vhci->devices[vhci->port_to_device[i]]->tq[j], ++ BCE_VHCI_PAUSE_SUSPEND); ++ } ++ } ++ ++ pr_info("bce_vhci: resume done\n"); ++ return 0; ++} ++ ++static int bce_vhci_urb_enqueue(struct usb_hcd *hcd, struct urb *urb, gfp_t mem_flags) ++{ ++ struct bce_vhci_transfer_queue *q = urb->ep->hcpriv; ++ pr_debug("bce_vhci_urb_enqueue %i:%x\n", q->dev_addr, urb->ep->desc.bEndpointAddress); ++ if (!q) ++ return -ENOENT; ++ return bce_vhci_urb_create(q, urb); ++} ++ ++static int bce_vhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status) ++{ ++ struct bce_vhci_transfer_queue *q = urb->ep->hcpriv; ++ pr_debug("bce_vhci_urb_dequeue %x\n", urb->ep->desc.bEndpointAddress); ++ return bce_vhci_urb_request_cancel(q, urb, status); ++} ++ ++static void bce_vhci_endpoint_reset(struct usb_hcd *hcd, struct usb_host_endpoint *ep) ++{ ++ struct bce_vhci_transfer_queue *q = ep->hcpriv; ++ pr_debug("bce_vhci_endpoint_reset\n"); ++ if (q) ++ bce_vhci_transfer_queue_request_reset(q); ++} ++ ++static u8 bce_vhci_endpoint_index(u8 addr) ++{ ++ if (addr & 0x80) ++ return (u8) (0x10 + (addr & 0xf)); ++ return (u8) (addr & 0xf); ++} ++ ++static int bce_vhci_add_endpoint(struct usb_hcd *hcd, struct usb_device *udev, struct usb_host_endpoint *endp) ++{ ++ u8 endp_index = bce_vhci_endpoint_index(endp->desc.bEndpointAddress); ++ struct bce_vhci *vhci = bce_vhci_from_hcd(hcd); ++ bce_vhci_device_t devid = vhci->port_to_device[udev->portnum]; ++ struct bce_vhci_device *vdev = vhci->devices[devid]; ++ pr_debug("bce_vhci_add_endpoint %x/%x:%x\n", udev->portnum, devid, endp_index); ++ ++ if (udev->bus->root_hub == udev) /* The USB hub */ ++ return 0; ++ if (vdev == NULL) ++ return -ENODEV; ++ if (vdev->tq_mask & BIT(endp_index)) { ++ endp->hcpriv = &vdev->tq[endp_index]; ++ return 0; ++ } ++ ++ bce_vhci_create_transfer_queue(vhci, &vdev->tq[endp_index], endp, devid, ++ usb_endpoint_dir_in(&endp->desc) ? DMA_FROM_DEVICE : DMA_TO_DEVICE); ++ endp->hcpriv = &vdev->tq[endp_index]; ++ vdev->tq_mask |= BIT(endp_index); ++ ++ bce_vhci_cmd_endpoint_create(&vhci->cq, devid, &endp->desc); ++ return 0; ++} ++ ++static int bce_vhci_drop_endpoint(struct usb_hcd *hcd, struct usb_device *udev, struct usb_host_endpoint *endp) ++{ ++ u8 endp_index = bce_vhci_endpoint_index(endp->desc.bEndpointAddress); ++ struct bce_vhci *vhci = bce_vhci_from_hcd(hcd); ++ bce_vhci_device_t devid = vhci->port_to_device[udev->portnum]; ++ struct bce_vhci_transfer_queue *q = endp->hcpriv; ++ struct bce_vhci_device *vdev = vhci->devices[devid]; ++ pr_info("bce_vhci_drop_endpoint %x:%x\n", udev->portnum, endp_index); ++ if (!q) { ++ if (vdev && vdev->tq_mask & BIT(endp_index)) { ++ pr_err("something deleted the hcpriv?\n"); ++ q = &vdev->tq[endp_index]; ++ } else { ++ return 0; ++ } ++ } ++ ++ bce_vhci_cmd_endpoint_destroy(&vhci->cq, devid, (u8) (endp->desc.bEndpointAddress & 0x8Fu)); ++ vhci->devices[devid]->tq_mask &= ~BIT(endp_index); ++ bce_vhci_destroy_transfer_queue(vhci, q); ++ return 0; ++} ++ ++static int bce_vhci_create_message_queues(struct bce_vhci *vhci) ++{ ++ if (bce_vhci_message_queue_create(vhci, &vhci->msg_commands, "VHC1HostCommands") || ++ bce_vhci_message_queue_create(vhci, &vhci->msg_system, "VHC1HostSystemEvents") || ++ bce_vhci_message_queue_create(vhci, &vhci->msg_isochronous, "VHC1HostIsochronousEvents") || ++ bce_vhci_message_queue_create(vhci, &vhci->msg_interrupt, "VHC1HostInterruptEvents") || ++ bce_vhci_message_queue_create(vhci, &vhci->msg_asynchronous, "VHC1HostAsynchronousEvents")) { ++ bce_vhci_destroy_message_queues(vhci); ++ return -EINVAL; ++ } ++ spin_lock_init(&vhci->msg_asynchronous_lock); ++ bce_vhci_command_queue_create(&vhci->cq, &vhci->msg_commands); ++ return 0; ++} ++ ++static void bce_vhci_destroy_message_queues(struct bce_vhci *vhci) ++{ ++ bce_vhci_command_queue_destroy(&vhci->cq); ++ bce_vhci_message_queue_destroy(vhci, &vhci->msg_commands); ++ bce_vhci_message_queue_destroy(vhci, &vhci->msg_system); ++ bce_vhci_message_queue_destroy(vhci, &vhci->msg_isochronous); ++ bce_vhci_message_queue_destroy(vhci, &vhci->msg_interrupt); ++ bce_vhci_message_queue_destroy(vhci, &vhci->msg_asynchronous); ++} ++ ++static void bce_vhci_handle_system_event(struct bce_vhci_event_queue *q, struct bce_vhci_message *msg); ++static void bce_vhci_handle_usb_event(struct bce_vhci_event_queue *q, struct bce_vhci_message *msg); ++ ++static int bce_vhci_create_event_queues(struct bce_vhci *vhci) ++{ ++ vhci->ev_cq = bce_create_cq(vhci->dev, 0x100); ++ if (!vhci->ev_cq) ++ return -EINVAL; ++#define CREATE_EVENT_QUEUE(field, name, cb) bce_vhci_event_queue_create(vhci, &vhci->field, name, cb) ++ if (__bce_vhci_event_queue_create(vhci, &vhci->ev_commands, "VHC1FirmwareCommands", ++ bce_vhci_firmware_event_completion) || ++ CREATE_EVENT_QUEUE(ev_system, "VHC1FirmwareSystemEvents", bce_vhci_handle_system_event) || ++ CREATE_EVENT_QUEUE(ev_isochronous, "VHC1FirmwareIsochronousEvents", bce_vhci_handle_usb_event) || ++ CREATE_EVENT_QUEUE(ev_interrupt, "VHC1FirmwareInterruptEvents", bce_vhci_handle_usb_event) || ++ CREATE_EVENT_QUEUE(ev_asynchronous, "VHC1FirmwareAsynchronousEvents", bce_vhci_handle_usb_event)) { ++ bce_vhci_destroy_event_queues(vhci); ++ return -EINVAL; ++ } ++#undef CREATE_EVENT_QUEUE ++ return 0; ++} ++ ++static void bce_vhci_destroy_event_queues(struct bce_vhci *vhci) ++{ ++ bce_vhci_event_queue_destroy(vhci, &vhci->ev_commands); ++ bce_vhci_event_queue_destroy(vhci, &vhci->ev_system); ++ bce_vhci_event_queue_destroy(vhci, &vhci->ev_isochronous); ++ bce_vhci_event_queue_destroy(vhci, &vhci->ev_interrupt); ++ bce_vhci_event_queue_destroy(vhci, &vhci->ev_asynchronous); ++ if (vhci->ev_cq) ++ bce_destroy_cq(vhci->dev, vhci->ev_cq); ++} ++ ++static void bce_vhci_send_fw_event_response(struct bce_vhci *vhci, struct bce_vhci_message *req, u16 status) ++{ ++ unsigned long timeout = 1000; ++ struct bce_vhci_message r = *req; ++ r.cmd = (u16) (req->cmd | 0x8000u); ++ r.status = status; ++ r.param1 = req->param1; ++ r.param2 = 0; ++ ++ if (bce_reserve_submission(vhci->msg_system.sq, &timeout)) { ++ pr_err("bce-vhci: Cannot reserve submision for FW event reply\n"); ++ return; ++ } ++ bce_vhci_message_queue_write(&vhci->msg_system, &r); ++} ++ ++static int bce_vhci_handle_firmware_event(struct bce_vhci *vhci, struct bce_vhci_message *msg) ++{ ++ unsigned long flags; ++ bce_vhci_device_t devid; ++ u8 endp; ++ struct bce_vhci_device *dev; ++ struct bce_vhci_transfer_queue *tq; ++ if (msg->cmd == BCE_VHCI_CMD_ENDPOINT_REQUEST_STATE || msg->cmd == BCE_VHCI_CMD_ENDPOINT_SET_STATE) { ++ devid = (bce_vhci_device_t) (msg->param1 & 0xff); ++ endp = bce_vhci_endpoint_index((u8) ((msg->param1 >> 8) & 0xff)); ++ dev = vhci->devices[devid]; ++ if (!dev || !(dev->tq_mask & BIT(endp))) ++ return BCE_VHCI_BAD_ARGUMENT; ++ tq = &dev->tq[endp]; ++ } ++ ++ if (msg->cmd == BCE_VHCI_CMD_ENDPOINT_REQUEST_STATE) { ++ if (msg->param2 == BCE_VHCI_ENDPOINT_ACTIVE) { ++ bce_vhci_transfer_queue_resume(tq, BCE_VHCI_PAUSE_FIRMWARE); ++ return BCE_VHCI_SUCCESS; ++ } else if (msg->param2 == BCE_VHCI_ENDPOINT_PAUSED) { ++ bce_vhci_transfer_queue_pause(tq, BCE_VHCI_PAUSE_FIRMWARE); ++ return BCE_VHCI_SUCCESS; ++ } ++ return BCE_VHCI_BAD_ARGUMENT; ++ } else if (msg->cmd == BCE_VHCI_CMD_ENDPOINT_SET_STATE) { ++ if (msg->param2 == BCE_VHCI_ENDPOINT_STALLED) { ++ tq->state = msg->param2; ++ spin_lock_irqsave(&tq->urb_lock, flags); ++ tq->stalled = true; ++ spin_unlock_irqrestore(&tq->urb_lock, flags); ++ return BCE_VHCI_SUCCESS; ++ } ++ return BCE_VHCI_BAD_ARGUMENT; ++ } ++ pr_warn("bce-vhci: Unhandled firmware event: %x s=%x p1=%x p2=%llx\n", ++ msg->cmd, msg->status, msg->param1, msg->param2); ++ return BCE_VHCI_BAD_ARGUMENT; ++} ++ ++static void bce_vhci_handle_firmware_events_w(struct work_struct *ws) ++{ ++ size_t cnt = 0; ++ int result; ++ struct bce_vhci *vhci = container_of(ws, struct bce_vhci, w_fw_events); ++ struct bce_queue_sq *sq = vhci->ev_commands.sq; ++ struct bce_sq_completion_data *cq; ++ struct bce_vhci_message *msg, *msg2 = NULL; ++ ++ while (true) { ++ if (msg2) { ++ msg = msg2; ++ msg2 = NULL; ++ } else if ((cq = bce_next_completion(sq))) { ++ if (cq->status == BCE_COMPLETION_ABORTED) { ++ bce_notify_submission_complete(sq); ++ continue; ++ } ++ msg = &vhci->ev_commands.data[sq->head]; ++ } else { ++ break; ++ } ++ ++ pr_debug("bce-vhci: Got fw event: %x s=%x p1=%x p2=%llx\n", msg->cmd, msg->status, msg->param1, msg->param2); ++ if ((cq = bce_next_completion(sq))) { ++ msg2 = &vhci->ev_commands.data[(sq->head + 1) % sq->el_count]; ++ pr_debug("bce-vhci: Got second fw event: %x s=%x p1=%x p2=%llx\n", ++ msg->cmd, msg->status, msg->param1, msg->param2); ++ if (cq->status != BCE_COMPLETION_ABORTED && ++ msg2->cmd == (msg->cmd | 0x4000) && msg2->param1 == msg->param1) { ++ /* Take two elements */ ++ pr_debug("bce-vhci: Cancelled\n"); ++ bce_vhci_send_fw_event_response(vhci, msg, BCE_VHCI_ABORT); ++ ++ bce_notify_submission_complete(sq); ++ bce_notify_submission_complete(sq); ++ msg2 = NULL; ++ cnt += 2; ++ continue; ++ } ++ ++ pr_warn("bce-vhci: Handle fw event - unexpected cancellation\n"); ++ } ++ ++ result = bce_vhci_handle_firmware_event(vhci, msg); ++ bce_vhci_send_fw_event_response(vhci, msg, (u16) result); ++ ++ ++ bce_notify_submission_complete(sq); ++ ++cnt; ++ } ++ bce_vhci_event_queue_submit_pending(&vhci->ev_commands, cnt); ++ if (atomic_read(&sq->available_commands) == sq->el_count - 1) { ++ pr_debug("bce-vhci: complete\n"); ++ complete(&vhci->ev_commands.queue_empty_completion); ++ } ++} ++ ++static void bce_vhci_firmware_event_completion(struct bce_queue_sq *sq) ++{ ++ struct bce_vhci_event_queue *q = sq->userdata; ++ queue_work(q->vhci->tq_state_wq, &q->vhci->w_fw_events); ++} ++ ++static void bce_vhci_handle_system_event(struct bce_vhci_event_queue *q, struct bce_vhci_message *msg) ++{ ++ if (msg->cmd & 0x8000) { ++ bce_vhci_command_queue_deliver_completion(&q->vhci->cq, msg); ++ } else { ++ pr_warn("bce-vhci: Unhandled system event: %x s=%x p1=%x p2=%llx\n", ++ msg->cmd, msg->status, msg->param1, msg->param2); ++ } ++} ++ ++static void bce_vhci_handle_usb_event(struct bce_vhci_event_queue *q, struct bce_vhci_message *msg) ++{ ++ bce_vhci_device_t devid; ++ u8 endp; ++ struct bce_vhci_device *dev; ++ if (msg->cmd & 0x8000) { ++ bce_vhci_command_queue_deliver_completion(&q->vhci->cq, msg); ++ } else if (msg->cmd == BCE_VHCI_CMD_TRANSFER_REQUEST || msg->cmd == BCE_VHCI_CMD_CONTROL_TRANSFER_STATUS) { ++ devid = (bce_vhci_device_t) (msg->param1 & 0xff); ++ endp = bce_vhci_endpoint_index((u8) ((msg->param1 >> 8) & 0xff)); ++ dev = q->vhci->devices[devid]; ++ if (!dev || (dev->tq_mask & BIT(endp)) == 0) { ++ pr_err("bce-vhci: Didn't find destination for transfer queue event\n"); ++ return; ++ } ++ bce_vhci_transfer_queue_event(&dev->tq[endp], msg); ++ } else { ++ pr_warn("bce-vhci: Unhandled USB event: %x s=%x p1=%x p2=%llx\n", ++ msg->cmd, msg->status, msg->param1, msg->param2); ++ } ++} ++ ++ ++ ++static const struct hc_driver bce_vhci_driver = { ++ .description = "bce-vhci", ++ .product_desc = "BCE VHCI Host Controller", ++ .hcd_priv_size = sizeof(struct bce_vhci *), ++ ++#if LINUX_VERSION_CODE < KERNEL_VERSION(5,4,0) ++ .flags = HCD_USB2, ++#else ++ .flags = HCD_USB2 | HCD_DMA, ++#endif ++ ++ .start = bce_vhci_start, ++ .stop = bce_vhci_stop, ++ .hub_status_data = bce_vhci_hub_status_data, ++ .hub_control = bce_vhci_hub_control, ++ .urb_enqueue = bce_vhci_urb_enqueue, ++ .urb_dequeue = bce_vhci_urb_dequeue, ++ .enable_device = bce_vhci_enable_device, ++ .free_dev = bce_vhci_free_device, ++ .address_device = bce_vhci_address_device, ++ .add_endpoint = bce_vhci_add_endpoint, ++ .drop_endpoint = bce_vhci_drop_endpoint, ++ .endpoint_reset = bce_vhci_endpoint_reset, ++ .check_bandwidth = bce_vhci_check_bandwidth, ++ .get_frame_number = bce_vhci_get_frame_number, ++ .bus_suspend = bce_vhci_bus_suspend, ++ .bus_resume = bce_vhci_bus_resume ++}; ++ ++ ++int __init bce_vhci_module_init(void) ++{ ++ int result; ++ if ((result = alloc_chrdev_region(&bce_vhci_chrdev, 0, 1, "bce-vhci"))) ++ goto fail_chrdev; ++#if LINUX_VERSION_CODE < KERNEL_VERSION(6,4,0) ++ bce_vhci_class = class_create(THIS_MODULE, "bce-vhci"); ++#else ++ bce_vhci_class = class_create("bce-vhci"); ++#endif ++ if (IS_ERR(bce_vhci_class)) { ++ result = PTR_ERR(bce_vhci_class); ++ goto fail_class; ++ } ++ return 0; ++ ++fail_class: ++ class_destroy(bce_vhci_class); ++fail_chrdev: ++ unregister_chrdev_region(bce_vhci_chrdev, 1); ++ if (!result) ++ result = -EINVAL; ++ return result; ++} ++void __exit bce_vhci_module_exit(void) ++{ ++ class_destroy(bce_vhci_class); ++ unregister_chrdev_region(bce_vhci_chrdev, 1); ++} ++ ++module_param_named(vhci_port_mask, bce_vhci_port_mask, ushort, 0444); ++MODULE_PARM_DESC(vhci_port_mask, "Specifies which VHCI ports are enabled"); +diff --git a/drivers/staging/apple-bce/vhci/vhci.h b/drivers/staging/apple-bce/vhci/vhci.h +new file mode 100644 +index 000000000000..111111111111 +--- /dev/null ++++ b/drivers/staging/apple-bce/vhci/vhci.h +@@ -0,0 +1,52 @@ ++#ifndef BCE_VHCI_H ++#define BCE_VHCI_H ++ ++#include "queue.h" ++#include "transfer.h" ++ ++struct usb_hcd; ++struct bce_queue_cq; ++ ++struct bce_vhci_device { ++ struct bce_vhci_transfer_queue tq[32]; ++ u32 tq_mask; ++}; ++struct bce_vhci { ++ struct apple_bce_device *dev; ++ dev_t vdevt; ++ struct device *vdev; ++ struct usb_hcd *hcd; ++ struct spinlock hcd_spinlock; ++ struct bce_vhci_message_queue msg_commands; ++ struct bce_vhci_message_queue msg_system; ++ struct bce_vhci_message_queue msg_isochronous; ++ struct bce_vhci_message_queue msg_interrupt; ++ struct bce_vhci_message_queue msg_asynchronous; ++ struct spinlock msg_asynchronous_lock; ++ struct bce_vhci_command_queue cq; ++ struct bce_queue_cq *ev_cq; ++ struct bce_vhci_event_queue ev_commands; ++ struct bce_vhci_event_queue ev_system; ++ struct bce_vhci_event_queue ev_isochronous; ++ struct bce_vhci_event_queue ev_interrupt; ++ struct bce_vhci_event_queue ev_asynchronous; ++ u16 port_mask; ++ u8 port_count; ++ u16 port_power_mask; ++ bce_vhci_device_t port_to_device[16]; ++ struct bce_vhci_device *devices[16]; ++ struct workqueue_struct *tq_state_wq; ++ struct work_struct w_fw_events; ++}; ++ ++int __init bce_vhci_module_init(void); ++void __exit bce_vhci_module_exit(void); ++ ++int bce_vhci_create(struct apple_bce_device *dev, struct bce_vhci *vhci); ++void bce_vhci_destroy(struct bce_vhci *vhci); ++int bce_vhci_start(struct usb_hcd *hcd); ++void bce_vhci_stop(struct usb_hcd *hcd); ++ ++struct bce_vhci *bce_vhci_from_hcd(struct usb_hcd *hcd); ++ ++#endif //BCE_VHCI_H +-- +Armbian + diff --git a/patch/kernel/archive/uefi-x86-6.19/1002-Put-apple-bce-in-drivers-staging.patch b/patch/kernel/archive/uefi-x86-6.19/1002-Put-apple-bce-in-drivers-staging.patch new file mode 100644 index 000000000000..b987bfe73fbd --- /dev/null +++ b/patch/kernel/archive/uefi-x86-6.19/1002-Put-apple-bce-in-drivers-staging.patch @@ -0,0 +1,77 @@ +From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001 +From: Redecorating <69827514+Redecorating@users.noreply.github.com> +Date: Mon, 7 Nov 2022 14:56:34 +0530 +Subject: Put apple-bce in drivers/staging + +- rpardini: 6.12: in drivers/staging/Makefile do it at the top to avoid + conflicts with Armbian's (wifi?) patching. + +Signed-off-by: Ricardo Pardini +--- + drivers/staging/Kconfig | 2 ++ + drivers/staging/Makefile | 1 + + drivers/staging/apple-bce/Kconfig | 18 ++++++++++ + drivers/staging/apple-bce/Makefile | 2 +- + 4 files changed, 22 insertions(+), 1 deletion(-) + +diff --git a/drivers/staging/Kconfig b/drivers/staging/Kconfig +index 111111111111..222222222222 100644 +--- a/drivers/staging/Kconfig ++++ b/drivers/staging/Kconfig +@@ -50,4 +50,6 @@ source "drivers/staging/vme_user/Kconfig" + + source "drivers/staging/rtl8723cs/Kconfig" + ++source "drivers/staging/apple-bce/Kconfig" ++ + endif # STAGING +diff --git a/drivers/staging/Makefile b/drivers/staging/Makefile +index 111111111111..222222222222 100644 +--- a/drivers/staging/Makefile ++++ b/drivers/staging/Makefile +@@ -2,6 +2,7 @@ + # Makefile for staging directory + + obj-y += media/ ++obj-$(CONFIG_APPLE_BCE) += apple-bce/ + obj-$(CONFIG_RTL8723BS) += rtl8723bs/ + obj-$(CONFIG_OCTEON_ETHERNET) += octeon/ + obj-$(CONFIG_VME_BUS) += vme_user/ +diff --git a/drivers/staging/apple-bce/Kconfig b/drivers/staging/apple-bce/Kconfig +new file mode 100644 +index 000000000000..111111111111 +--- /dev/null ++++ b/drivers/staging/apple-bce/Kconfig +@@ -0,0 +1,18 @@ ++config APPLE_BCE ++ tristate "Apple BCE driver (VHCI and Audio support)" ++ default m ++ depends on X86 ++ select SOUND ++ select SND ++ select SND_PCM ++ select SND_JACK ++ help ++ VHCI and audio support on Apple MacBooks with the T2 Chip. ++ This driver is divided in three components: ++ - BCE (Buffer Copy Engine): which establishes a basic communication ++ channel with the T2 chip. This component is required by the other two: ++ - VHCI (Virtual Host Controller Interface): Access to keyboard, mouse ++ and other system devices depend on this virtual USB host controller ++ - Audio: a driver for the T2 audio interface. ++ ++ If "M" is selected, the module will be called apple-bce.' +diff --git a/drivers/staging/apple-bce/Makefile b/drivers/staging/apple-bce/Makefile +index 111111111111..222222222222 100644 +--- a/drivers/staging/apple-bce/Makefile ++++ b/drivers/staging/apple-bce/Makefile +@@ -1,5 +1,5 @@ + modname := apple-bce +-obj-m += $(modname).o ++obj-$(CONFIG_APPLE_BCE) += $(modname).o + + apple-bce-objs := apple_bce.o mailbox.o queue.o queue_dma.o vhci/vhci.o vhci/queue.o vhci/transfer.o audio/audio.o audio/protocol.o audio/protocol_bce.o audio/pcm.o + +-- +Armbian + diff --git a/patch/kernel/archive/uefi-x86-6.19/1003-Fix-freezing-on-turning-off-camera.patch b/patch/kernel/archive/uefi-x86-6.19/1003-Fix-freezing-on-turning-off-camera.patch new file mode 100644 index 000000000000..f4f746a4d19f --- /dev/null +++ b/patch/kernel/archive/uefi-x86-6.19/1003-Fix-freezing-on-turning-off-camera.patch @@ -0,0 +1,72 @@ +From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001 +From: mnural +Date: Mon, 14 Apr 2025 14:44:20 +0530 +Subject: Fix freezing on turning off camera + +Detailed logs and reason behind can be seen here: +https://github.com/t2linux/T2-Debian-and-Ubuntu-Kernel/issues/130#issuecomment-2799130835 +--- + drivers/staging/apple-bce/vhci/transfer.c | 16 ++++++++-- + drivers/staging/apple-bce/vhci/transfer.h | 4 ++- + 2 files changed, 17 insertions(+), 3 deletions(-) + +diff --git a/drivers/staging/apple-bce/vhci/transfer.c b/drivers/staging/apple-bce/vhci/transfer.c +index 111111111111..222222222222 100644 +--- a/drivers/staging/apple-bce/vhci/transfer.c ++++ b/drivers/staging/apple-bce/vhci/transfer.c +@@ -400,6 +400,7 @@ int bce_vhci_urb_request_cancel(struct bce_vhci_transfer_queue *q, struct urb *u + struct bce_vhci_urb *vurb; + unsigned long flags; + int ret; ++ enum bce_vhci_urb_state old_state; + + spin_lock_irqsave(&q->urb_lock, flags); + if ((ret = usb_hcd_check_unlink_urb(q->vhci->hcd, urb, status))) { +@@ -408,8 +409,19 @@ int bce_vhci_urb_request_cancel(struct bce_vhci_transfer_queue *q, struct urb *u + } + + vurb = urb->hcpriv; ++ ++ old_state = vurb->state; /* save old state to use later because we'll set state as cancelled */ ++ ++ if (old_state == BCE_VHCI_URB_CANCELLED) { ++ spin_unlock_irqrestore(&q->urb_lock, flags); ++ pr_debug("bce-vhci: URB %p is already cancelled, skipping\n", urb); ++ return 0; ++ } ++ ++ vurb->state = BCE_VHCI_URB_CANCELLED; ++ + /* If the URB wasn't posted to the device yet, we can still remove it on the host without pausing the queue. */ +- if (vurb->state != BCE_VHCI_URB_INIT_PENDING) { ++ if (old_state != BCE_VHCI_URB_INIT_PENDING) { + pr_debug("bce-vhci: [%02x] Cancelling URB\n", q->endp_addr); + + spin_unlock_irqrestore(&q->urb_lock, flags); +@@ -425,7 +437,7 @@ int bce_vhci_urb_request_cancel(struct bce_vhci_transfer_queue *q, struct urb *u + + usb_hcd_giveback_urb(q->vhci->hcd, urb, status); + +- if (vurb->state != BCE_VHCI_URB_INIT_PENDING) ++ if (old_state != BCE_VHCI_URB_INIT_PENDING) + bce_vhci_transfer_queue_resume(q, BCE_VHCI_PAUSE_INTERNAL_WQ); + + kfree(vurb); +diff --git a/drivers/staging/apple-bce/vhci/transfer.h b/drivers/staging/apple-bce/vhci/transfer.h +index 111111111111..222222222222 100644 +--- a/drivers/staging/apple-bce/vhci/transfer.h ++++ b/drivers/staging/apple-bce/vhci/transfer.h +@@ -44,7 +44,9 @@ enum bce_vhci_urb_state { + + BCE_VHCI_URB_CONTROL_WAITING_FOR_SETUP_REQUEST, + BCE_VHCI_URB_CONTROL_WAITING_FOR_SETUP_COMPLETION, +- BCE_VHCI_URB_CONTROL_COMPLETE ++ BCE_VHCI_URB_CONTROL_COMPLETE, ++ ++ BCE_VHCI_URB_CANCELLED + }; + struct bce_vhci_urb { + struct urb *urb; +-- +Armbian + diff --git a/patch/kernel/archive/uefi-x86-6.19/2008-i915-4-lane-quirk-for-mbp15-1.patch b/patch/kernel/archive/uefi-x86-6.19/2008-i915-4-lane-quirk-for-mbp15-1.patch new file mode 100644 index 000000000000..c78df8887bca --- /dev/null +++ b/patch/kernel/archive/uefi-x86-6.19/2008-i915-4-lane-quirk-for-mbp15-1.patch @@ -0,0 +1,76 @@ +From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001 +From: Orlando Chamberlain +Date: Fri, 28 Jun 2024 04:43:50 +0000 +Subject: i915: 4 lane quirk for mbp15,1 + +Needed to use iGPU when dGPU was boot GPU + +Patch written by Kerem Karabay +--- + drivers/gpu/drm/i915/display/intel_ddi.c | 3 ++ + drivers/gpu/drm/i915/display/intel_quirks.c | 15 ++++++++++ + drivers/gpu/drm/i915/display/intel_quirks.h | 1 + + 3 files changed, 19 insertions(+) + +diff --git a/drivers/gpu/drm/i915/display/intel_ddi.c b/drivers/gpu/drm/i915/display/intel_ddi.c +index 111111111111..222222222222 100644 +--- a/drivers/gpu/drm/i915/display/intel_ddi.c ++++ b/drivers/gpu/drm/i915/display/intel_ddi.c +@@ -4890,6 +4890,9 @@ static bool intel_ddi_a_force_4_lanes(struct intel_digital_port *dig_port) + if (dig_port->ddi_a_4_lanes) + return false; + ++ if (intel_has_quirk(display, QUIRK_DDI_A_FORCE_4_LANES)) ++ return true; ++ + /* Broxton/Geminilake: Bspec says that DDI_A_4_LANES is the only + * supported configuration + */ +diff --git a/drivers/gpu/drm/i915/display/intel_quirks.c b/drivers/gpu/drm/i915/display/intel_quirks.c +index 111111111111..222222222222 100644 +--- a/drivers/gpu/drm/i915/display/intel_quirks.c ++++ b/drivers/gpu/drm/i915/display/intel_quirks.c +@@ -66,6 +66,18 @@ static void quirk_increase_ddi_disabled_time(struct intel_display *display) + drm_info(display->drm, "Applying Increase DDI Disabled quirk\n"); + } + ++/* ++ * In some cases, the firmware might not set the lane count to 4 (for example, ++ * when booting in some dual GPU Macs with the dGPU as the default GPU), this ++ * quirk is used to force it as otherwise it might not be possible to compute a ++ * valid link configuration. ++ */ ++static void quirk_ddi_a_force_4_lanes(struct intel_display *display) ++{ ++ intel_set_quirk(display, QUIRK_DDI_A_FORCE_4_LANES); ++ drm_info(display->drm, "Applying DDI A Forced 4 Lanes quirk\n"); ++} ++ + static void quirk_no_pps_backlight_power_hook(struct intel_display *display) + { + intel_set_quirk(display, QUIRK_NO_PPS_BACKLIGHT_POWER_HOOK); +@@ -240,6 +252,9 @@ static struct intel_quirk intel_quirks[] = { + + /* Dell XPS 13 7390 2-in-1 */ + { 0x8a12, 0x1028, 0x08b0, quirk_edp_limit_rate_hbr2 }, ++ ++ /* Apple MacBookPro15,1 */ ++ { 0x3e9b, 0x106b, 0x0176, quirk_ddi_a_force_4_lanes }, + }; + + static const struct intel_dpcd_quirk intel_dpcd_quirks[] = { +diff --git a/drivers/gpu/drm/i915/display/intel_quirks.h b/drivers/gpu/drm/i915/display/intel_quirks.h +index 111111111111..222222222222 100644 +--- a/drivers/gpu/drm/i915/display/intel_quirks.h ++++ b/drivers/gpu/drm/i915/display/intel_quirks.h +@@ -21,6 +21,7 @@ enum intel_quirk_id { + QUIRK_NO_PPS_BACKLIGHT_POWER_HOOK, + QUIRK_FW_SYNC_LEN, + QUIRK_EDP_LIMIT_RATE_HBR2, ++ QUIRK_DDI_A_FORCE_4_LANES, + }; + + void intel_init_quirks(struct intel_display *display); +-- +Armbian + diff --git a/patch/kernel/archive/uefi-x86-6.19/2009-apple-gmux-allow-switching-to-igpu-at-probe.patch b/patch/kernel/archive/uefi-x86-6.19/2009-apple-gmux-allow-switching-to-igpu-at-probe.patch new file mode 100644 index 000000000000..2527b1b82227 --- /dev/null +++ b/patch/kernel/archive/uefi-x86-6.19/2009-apple-gmux-allow-switching-to-igpu-at-probe.patch @@ -0,0 +1,109 @@ +From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001 +From: Orlando Chamberlain +Date: Fri, 10 Feb 2023 22:45:00 +1100 +Subject: apple-gmux: allow switching to igpu at probe + +This means user don't need to set the gpu-power-prefs efivar to use the +igpu while runtime switching isn't working, so macOS will be unaffected. + +This isn't really upstreamable, what we want upstream is the ability to +switch at runtime (so both gpus need to be able to probe the eDP panel). + +Based off of work by Kerem Karabay +--- + drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c | 3 ++ + drivers/gpu/vga/vga_switcheroo.c | 7 +--- + drivers/pci/vgaarb.c | 1 + + drivers/platform/x86/apple-gmux.c | 18 ++++++++++ + 4 files changed, 23 insertions(+), 6 deletions(-) + +diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c +index 111111111111..222222222222 100644 +--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c ++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c +@@ -2382,6 +2382,9 @@ static int amdgpu_pci_probe(struct pci_dev *pdev, + int ret, retry = 0, i; + bool supports_atomic = false; + ++ if (vga_switcheroo_client_probe_defer(pdev)) ++ return -EPROBE_DEFER; ++ + if ((pdev->class >> 8) == PCI_CLASS_DISPLAY_VGA || + (pdev->class >> 8) == PCI_CLASS_DISPLAY_OTHER) { + if (drm_firmware_drivers_only() && amdgpu_modeset == -1) +diff --git a/drivers/gpu/vga/vga_switcheroo.c b/drivers/gpu/vga/vga_switcheroo.c +index 111111111111..222222222222 100644 +--- a/drivers/gpu/vga/vga_switcheroo.c ++++ b/drivers/gpu/vga/vga_switcheroo.c +@@ -438,12 +438,7 @@ find_active_client(struct list_head *head) + bool vga_switcheroo_client_probe_defer(struct pci_dev *pdev) + { + if (pci_is_display(pdev)) { +- /* +- * apple-gmux is needed on pre-retina MacBook Pro +- * to probe the panel if pdev is the inactive GPU. +- */ +- if (apple_gmux_present() && pdev != vga_default_device() && +- !vgasr_priv.handler_flags) ++ if (apple_gmux_present() && !vgasr_priv.handler_flags) + return true; + } + +diff --git a/drivers/pci/vgaarb.c b/drivers/pci/vgaarb.c +index 111111111111..222222222222 100644 +--- a/drivers/pci/vgaarb.c ++++ b/drivers/pci/vgaarb.c +@@ -143,6 +143,7 @@ void vga_set_default_device(struct pci_dev *pdev) + pci_dev_put(vga_default); + vga_default = pci_dev_get(pdev); + } ++EXPORT_SYMBOL_GPL(vga_set_default_device); + + /** + * vga_remove_vgacon - deactivate VGA console +diff --git a/drivers/platform/x86/apple-gmux.c b/drivers/platform/x86/apple-gmux.c +index 111111111111..222222222222 100644 +--- a/drivers/platform/x86/apple-gmux.c ++++ b/drivers/platform/x86/apple-gmux.c +@@ -21,6 +21,7 @@ + #include + #include + #include ++#include + #include + #include + #include +@@ -107,6 +108,10 @@ struct apple_gmux_config { + + # define MMIO_GMUX_MAX_BRIGHTNESS 0xffff + ++static bool force_igd; ++module_param(force_igd, bool, 0); ++MODULE_PARM_DESC(force_idg, "Switch gpu to igd on module load. Make sure that you have apple-set-os set up and the iGPU is in `lspci -s 00:02.0`. (default: false) (bool)"); ++ + static u8 gmux_pio_read8(struct apple_gmux_data *gmux_data, int port) + { + return inb(gmux_data->iostart + port); +@@ -945,6 +950,19 @@ static int gmux_probe(struct pnp_dev *pnp, const struct pnp_device_id *id) + gmux_enable_interrupts(gmux_data); + gmux_read_switch_state(gmux_data); + ++ if (force_igd) { ++ struct pci_dev *pdev; ++ ++ pdev = pci_get_domain_bus_and_slot(0, 0, PCI_DEVFN(2, 0)); ++ if (pdev) { ++ pr_info("Switching to IGD"); ++ gmux_switchto(VGA_SWITCHEROO_IGD); ++ vga_set_default_device(pdev); ++ } else { ++ pr_err("force_idg is true, but couldn't find iGPU at 00:02.0! Is apple-set-os working?"); ++ } ++ } ++ + /* + * Retina MacBook Pros cannot switch the panel's AUX separately + * and need eDP pre-calibration. They are distinguishable from +-- +Armbian + diff --git a/patch/kernel/archive/uefi-x86-6.19/3001-applesmc-convert-static-structures-to-drvdata.patch b/patch/kernel/archive/uefi-x86-6.19/3001-applesmc-convert-static-structures-to-drvdata.patch new file mode 100644 index 000000000000..17c687ecb42e --- /dev/null +++ b/patch/kernel/archive/uefi-x86-6.19/3001-applesmc-convert-static-structures-to-drvdata.patch @@ -0,0 +1,1218 @@ +From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001 +From: Paul Pawlowski +Date: Sun, 17 Nov 2019 23:12:55 +0100 +Subject: applesmc: convert static structures to drvdata + +All static data structures have been moved to an applesmc_device struct, +which is then associated with the platform device. +This change is intended to ease the migration to an acpi_device, where +static data would preferably be avoided. + +Signed-off-by: Aun-Ali Zaidi +--- + drivers/hwmon/applesmc.c | 540 ++++++---- + 1 file changed, 319 insertions(+), 221 deletions(-) + +diff --git a/drivers/hwmon/applesmc.c b/drivers/hwmon/applesmc.c +index 111111111111..222222222222 100644 +--- a/drivers/hwmon/applesmc.c ++++ b/drivers/hwmon/applesmc.c +@@ -6,6 +6,7 @@ + * + * Copyright (C) 2007 Nicolas Boichat + * Copyright (C) 2010 Henrik Rydberg ++ * Copyright (C) 2019 Paul Pawlowski + * + * Based on hdaps.c driver: + * Copyright (C) 2005 Robert Love +@@ -119,7 +120,7 @@ struct applesmc_entry { + }; + + /* Register lookup and registers common to all SMCs */ +-static struct applesmc_registers { ++struct applesmc_registers { + struct mutex mutex; /* register read/write mutex */ + unsigned int key_count; /* number of SMC registers */ + unsigned int fan_count; /* number of fans */ +@@ -133,26 +134,32 @@ static struct applesmc_registers { + bool init_complete; /* true when fully initialized */ + struct applesmc_entry *cache; /* cached key entries */ + const char **index; /* temperature key index */ +-} smcreg = { +- .mutex = __MUTEX_INITIALIZER(smcreg.mutex), + }; + +-static const int debug; +-static struct platform_device *pdev; +-static s16 rest_x; +-static s16 rest_y; +-static u8 backlight_state[2]; ++struct applesmc_device { ++ struct platform_device *dev; ++ struct applesmc_registers reg; + +-static struct device *hwmon_dev; +-static struct input_dev *applesmc_idev; ++ s16 rest_x; ++ s16 rest_y; + +-/* +- * Last index written to key_at_index sysfs file, and value to use for all other +- * key_at_index_* sysfs files. +- */ +-static unsigned int key_at_index; ++ u8 backlight_state[2]; ++ ++ struct device *hwmon_dev; ++ struct input_dev *idev; ++ ++ /* ++ * Last index written to key_at_index sysfs file, and value to use for all other ++ * key_at_index_* sysfs files. ++ */ ++ unsigned int key_at_index; ++ ++ struct workqueue_struct *backlight_wq; ++ struct work_struct backlight_work; ++ struct led_classdev backlight_dev; ++}; + +-static struct workqueue_struct *applesmc_led_wq; ++static const int debug; + + /* + * Wait for specific status bits with a mask on the SMC. +@@ -338,36 +345,37 @@ static int read_register_count(unsigned int *count) + * All functions below are concurrency safe - callers should NOT hold lock. + */ + +-static int applesmc_read_entry(const struct applesmc_entry *entry, +- u8 *buf, u8 len) ++static int applesmc_read_entry(struct applesmc_device *smc, ++ const struct applesmc_entry *entry, u8 *buf, u8 len) + { + int ret; + + if (entry->len != len) + return -EINVAL; +- mutex_lock(&smcreg.mutex); ++ mutex_lock(&smc->reg.mutex); + ret = read_smc(APPLESMC_READ_CMD, entry->key, buf, len); +- mutex_unlock(&smcreg.mutex); ++ mutex_unlock(&smc->reg.mutex); + + return ret; + } + +-static int applesmc_write_entry(const struct applesmc_entry *entry, +- const u8 *buf, u8 len) ++static int applesmc_write_entry(struct applesmc_device *smc, ++ const struct applesmc_entry *entry, const u8 *buf, u8 len) + { + int ret; + + if (entry->len != len) + return -EINVAL; +- mutex_lock(&smcreg.mutex); ++ mutex_lock(&smc->reg.mutex); + ret = write_smc(APPLESMC_WRITE_CMD, entry->key, buf, len); +- mutex_unlock(&smcreg.mutex); ++ mutex_unlock(&smc->reg.mutex); + return ret; + } + +-static const struct applesmc_entry *applesmc_get_entry_by_index(int index) ++static const struct applesmc_entry *applesmc_get_entry_by_index( ++ struct applesmc_device *smc, int index) + { +- struct applesmc_entry *cache = &smcreg.cache[index]; ++ struct applesmc_entry *cache = &smc->reg.cache[index]; + u8 key[4], info[6]; + __be32 be; + int ret = 0; +@@ -375,7 +383,7 @@ static const struct applesmc_entry *applesmc_get_entry_by_index(int index) + if (cache->valid) + return cache; + +- mutex_lock(&smcreg.mutex); ++ mutex_lock(&smc->reg.mutex); + + if (cache->valid) + goto out; +@@ -394,20 +402,21 @@ static const struct applesmc_entry *applesmc_get_entry_by_index(int index) + cache->valid = true; + + out: +- mutex_unlock(&smcreg.mutex); ++ mutex_unlock(&smc->reg.mutex); + if (ret) + return ERR_PTR(ret); + return cache; + } + +-static int applesmc_get_lower_bound(unsigned int *lo, const char *key) ++static int applesmc_get_lower_bound(struct applesmc_device *smc, ++ unsigned int *lo, const char *key) + { +- int begin = 0, end = smcreg.key_count; ++ int begin = 0, end = smc->reg.key_count; + const struct applesmc_entry *entry; + + while (begin != end) { + int middle = begin + (end - begin) / 2; +- entry = applesmc_get_entry_by_index(middle); ++ entry = applesmc_get_entry_by_index(smc, middle); + if (IS_ERR(entry)) { + *lo = 0; + return PTR_ERR(entry); +@@ -422,16 +431,17 @@ static int applesmc_get_lower_bound(unsigned int *lo, const char *key) + return 0; + } + +-static int applesmc_get_upper_bound(unsigned int *hi, const char *key) ++static int applesmc_get_upper_bound(struct applesmc_device *smc, ++ unsigned int *hi, const char *key) + { +- int begin = 0, end = smcreg.key_count; ++ int begin = 0, end = smc->reg.key_count; + const struct applesmc_entry *entry; + + while (begin != end) { + int middle = begin + (end - begin) / 2; +- entry = applesmc_get_entry_by_index(middle); ++ entry = applesmc_get_entry_by_index(smc, middle); + if (IS_ERR(entry)) { +- *hi = smcreg.key_count; ++ *hi = smc->reg.key_count; + return PTR_ERR(entry); + } + if (strcmp(key, entry->key) < 0) +@@ -444,50 +454,54 @@ static int applesmc_get_upper_bound(unsigned int *hi, const char *key) + return 0; + } + +-static const struct applesmc_entry *applesmc_get_entry_by_key(const char *key) ++static const struct applesmc_entry *applesmc_get_entry_by_key( ++ struct applesmc_device *smc, const char *key) + { + int begin, end; + int ret; + +- ret = applesmc_get_lower_bound(&begin, key); ++ ret = applesmc_get_lower_bound(smc, &begin, key); + if (ret) + return ERR_PTR(ret); +- ret = applesmc_get_upper_bound(&end, key); ++ ret = applesmc_get_upper_bound(smc, &end, key); + if (ret) + return ERR_PTR(ret); + if (end - begin != 1) + return ERR_PTR(-EINVAL); + +- return applesmc_get_entry_by_index(begin); ++ return applesmc_get_entry_by_index(smc, begin); + } + +-static int applesmc_read_key(const char *key, u8 *buffer, u8 len) ++static int applesmc_read_key(struct applesmc_device *smc, ++ const char *key, u8 *buffer, u8 len) + { + const struct applesmc_entry *entry; + +- entry = applesmc_get_entry_by_key(key); ++ entry = applesmc_get_entry_by_key(smc, key); + if (IS_ERR(entry)) + return PTR_ERR(entry); + +- return applesmc_read_entry(entry, buffer, len); ++ return applesmc_read_entry(smc, entry, buffer, len); + } + +-static int applesmc_write_key(const char *key, const u8 *buffer, u8 len) ++static int applesmc_write_key(struct applesmc_device *smc, ++ const char *key, const u8 *buffer, u8 len) + { + const struct applesmc_entry *entry; + +- entry = applesmc_get_entry_by_key(key); ++ entry = applesmc_get_entry_by_key(smc, key); + if (IS_ERR(entry)) + return PTR_ERR(entry); + +- return applesmc_write_entry(entry, buffer, len); ++ return applesmc_write_entry(smc, entry, buffer, len); + } + +-static int applesmc_has_key(const char *key, bool *value) ++static int applesmc_has_key(struct applesmc_device *smc, ++ const char *key, bool *value) + { + const struct applesmc_entry *entry; + +- entry = applesmc_get_entry_by_key(key); ++ entry = applesmc_get_entry_by_key(smc, key); + if (IS_ERR(entry) && PTR_ERR(entry) != -EINVAL) + return PTR_ERR(entry); + +@@ -498,12 +512,13 @@ static int applesmc_has_key(const char *key, bool *value) + /* + * applesmc_read_s16 - Read 16-bit signed big endian register + */ +-static int applesmc_read_s16(const char *key, s16 *value) ++static int applesmc_read_s16(struct applesmc_device *smc, ++ const char *key, s16 *value) + { + u8 buffer[2]; + int ret; + +- ret = applesmc_read_key(key, buffer, 2); ++ ret = applesmc_read_key(smc, key, buffer, 2); + if (ret) + return ret; + +@@ -514,28 +529,29 @@ static int applesmc_read_s16(const char *key, s16 *value) + /* + * applesmc_device_init - initialize the accelerometer. Can sleep. + */ +-static void applesmc_device_init(void) ++static void applesmc_device_init(struct applesmc_device *smc) + { + int total; + u8 buffer[2]; + +- if (!smcreg.has_accelerometer) ++ if (!smc->reg.has_accelerometer) + return; + + for (total = INIT_TIMEOUT_MSECS; total > 0; total -= INIT_WAIT_MSECS) { +- if (!applesmc_read_key(MOTION_SENSOR_KEY, buffer, 2) && ++ if (!applesmc_read_key(smc, MOTION_SENSOR_KEY, buffer, 2) && + (buffer[0] != 0x00 || buffer[1] != 0x00)) + return; + buffer[0] = 0xe0; + buffer[1] = 0x00; +- applesmc_write_key(MOTION_SENSOR_KEY, buffer, 2); ++ applesmc_write_key(smc, MOTION_SENSOR_KEY, buffer, 2); + msleep(INIT_WAIT_MSECS); + } + + pr_warn("failed to init the device\n"); + } + +-static int applesmc_init_index(struct applesmc_registers *s) ++static int applesmc_init_index(struct applesmc_device *smc, ++ struct applesmc_registers *s) + { + const struct applesmc_entry *entry; + unsigned int i; +@@ -548,7 +564,7 @@ static int applesmc_init_index(struct applesmc_registers *s) + return -ENOMEM; + + for (i = s->temp_begin; i < s->temp_end; i++) { +- entry = applesmc_get_entry_by_index(i); ++ entry = applesmc_get_entry_by_index(smc, i); + if (IS_ERR(entry)) + continue; + if (strcmp(entry->type, TEMP_SENSOR_TYPE)) +@@ -562,9 +578,9 @@ static int applesmc_init_index(struct applesmc_registers *s) + /* + * applesmc_init_smcreg_try - Try to initialize register cache. Idempotent. + */ +-static int applesmc_init_smcreg_try(void) ++static int applesmc_init_smcreg_try(struct applesmc_device *smc) + { +- struct applesmc_registers *s = &smcreg; ++ struct applesmc_registers *s = &smc->reg; + bool left_light_sensor = false, right_light_sensor = false; + unsigned int count; + u8 tmp[1]; +@@ -590,35 +606,35 @@ static int applesmc_init_smcreg_try(void) + if (!s->cache) + return -ENOMEM; + +- ret = applesmc_read_key(FANS_COUNT, tmp, 1); ++ ret = applesmc_read_key(smc, FANS_COUNT, tmp, 1); + if (ret) + return ret; + s->fan_count = tmp[0]; + if (s->fan_count > 10) + s->fan_count = 10; + +- ret = applesmc_get_lower_bound(&s->temp_begin, "T"); ++ ret = applesmc_get_lower_bound(smc, &s->temp_begin, "T"); + if (ret) + return ret; +- ret = applesmc_get_lower_bound(&s->temp_end, "U"); ++ ret = applesmc_get_lower_bound(smc, &s->temp_end, "U"); + if (ret) + return ret; + s->temp_count = s->temp_end - s->temp_begin; + +- ret = applesmc_init_index(s); ++ ret = applesmc_init_index(smc, s); + if (ret) + return ret; + +- ret = applesmc_has_key(LIGHT_SENSOR_LEFT_KEY, &left_light_sensor); ++ ret = applesmc_has_key(smc, LIGHT_SENSOR_LEFT_KEY, &left_light_sensor); + if (ret) + return ret; +- ret = applesmc_has_key(LIGHT_SENSOR_RIGHT_KEY, &right_light_sensor); ++ ret = applesmc_has_key(smc, LIGHT_SENSOR_RIGHT_KEY, &right_light_sensor); + if (ret) + return ret; +- ret = applesmc_has_key(MOTION_SENSOR_KEY, &s->has_accelerometer); ++ ret = applesmc_has_key(smc, MOTION_SENSOR_KEY, &s->has_accelerometer); + if (ret) + return ret; +- ret = applesmc_has_key(BACKLIGHT_KEY, &s->has_key_backlight); ++ ret = applesmc_has_key(smc, BACKLIGHT_KEY, &s->has_key_backlight); + if (ret) + return ret; + +@@ -634,13 +650,13 @@ static int applesmc_init_smcreg_try(void) + return 0; + } + +-static void applesmc_destroy_smcreg(void) ++static void applesmc_destroy_smcreg(struct applesmc_device *smc) + { +- kfree(smcreg.index); +- smcreg.index = NULL; +- kfree(smcreg.cache); +- smcreg.cache = NULL; +- smcreg.init_complete = false; ++ kfree(smc->reg.index); ++ smc->reg.index = NULL; ++ kfree(smc->reg.cache); ++ smc->reg.cache = NULL; ++ smc->reg.init_complete = false; + } + + /* +@@ -649,12 +665,12 @@ static void applesmc_destroy_smcreg(void) + * Retries until initialization is successful, or the operation times out. + * + */ +-static int applesmc_init_smcreg(void) ++static int applesmc_init_smcreg(struct applesmc_device *smc) + { + int ms, ret; + + for (ms = 0; ms < INIT_TIMEOUT_MSECS; ms += INIT_WAIT_MSECS) { +- ret = applesmc_init_smcreg_try(); ++ ret = applesmc_init_smcreg_try(smc); + if (!ret) { + if (ms) + pr_info("init_smcreg() took %d ms\n", ms); +@@ -663,21 +679,58 @@ static int applesmc_init_smcreg(void) + msleep(INIT_WAIT_MSECS); + } + +- applesmc_destroy_smcreg(); ++ applesmc_destroy_smcreg(smc); + + return ret; + } + + /* Device model stuff */ ++static int applesmc_create_modules(struct applesmc_device *smc); ++static void applesmc_destroy_modules(struct applesmc_device *smc); + static int applesmc_probe(struct platform_device *dev) + { ++ struct applesmc_device *smc; + int ret; + +- ret = applesmc_init_smcreg(); ++ smc = kzalloc(sizeof(struct applesmc_device), GFP_KERNEL); ++ if (!smc) ++ return -ENOMEM; ++ smc->dev = dev; ++ mutex_init(&smc->reg.mutex); ++ ++ platform_set_drvdata(dev, smc); ++ ++ ret = applesmc_init_smcreg(smc); + if (ret) +- return ret; ++ goto out_mem; ++ ++ applesmc_device_init(smc); ++ ++ ret = applesmc_create_modules(smc); ++ if (ret) ++ goto out_reg; ++ ++ return 0; ++ ++out_reg: ++ applesmc_destroy_smcreg(smc); ++out_mem: ++ platform_set_drvdata(dev, NULL); ++ mutex_destroy(&smc->reg.mutex); ++ kfree(smc); + +- applesmc_device_init(); ++ return ret; ++} ++ ++static int applesmc_remove(struct platform_device *dev) ++{ ++ struct applesmc_device *smc = platform_get_drvdata(dev); ++ ++ applesmc_destroy_modules(smc); ++ applesmc_destroy_smcreg(smc); ++ ++ mutex_destroy(&smc->reg.mutex); ++ kfree(smc); + + return 0; + } +@@ -685,15 +738,21 @@ static int applesmc_probe(struct platform_device *dev) + /* Synchronize device with memorized backlight state */ + static int applesmc_pm_resume(struct device *dev) + { +- if (smcreg.has_key_backlight) +- applesmc_write_key(BACKLIGHT_KEY, backlight_state, 2); ++ struct applesmc_device *smc = dev_get_drvdata(dev); ++ ++ if (smc->reg.has_key_backlight) ++ applesmc_write_key(smc, BACKLIGHT_KEY, smc->backlight_state, 2); ++ + return 0; + } + + /* Reinitialize device on resume from hibernation */ + static int applesmc_pm_restore(struct device *dev) + { +- applesmc_device_init(); ++ struct applesmc_device *smc = dev_get_drvdata(dev); ++ ++ applesmc_device_init(smc); ++ + return applesmc_pm_resume(dev); + } + +@@ -704,6 +763,7 @@ static const struct dev_pm_ops applesmc_pm_ops = { + + static struct platform_driver applesmc_driver = { + .probe = applesmc_probe, ++ .remove = applesmc_remove, + .driver = { + .name = "applesmc", + .pm = &applesmc_pm_ops, +@@ -714,25 +774,26 @@ static struct platform_driver applesmc_driver = { + * applesmc_calibrate - Set our "resting" values. Callers must + * hold applesmc_lock. + */ +-static void applesmc_calibrate(void) ++static void applesmc_calibrate(struct applesmc_device *smc) + { +- applesmc_read_s16(MOTION_SENSOR_X_KEY, &rest_x); +- applesmc_read_s16(MOTION_SENSOR_Y_KEY, &rest_y); +- rest_x = -rest_x; ++ applesmc_read_s16(smc, MOTION_SENSOR_X_KEY, &smc->rest_x); ++ applesmc_read_s16(smc, MOTION_SENSOR_Y_KEY, &smc->rest_y); ++ smc->rest_x = -smc->rest_x; + } + + static void applesmc_idev_poll(struct input_dev *idev) + { ++ struct applesmc_device *smc = dev_get_drvdata(&idev->dev); + s16 x, y; + +- if (applesmc_read_s16(MOTION_SENSOR_X_KEY, &x)) ++ if (applesmc_read_s16(smc, MOTION_SENSOR_X_KEY, &x)) + return; +- if (applesmc_read_s16(MOTION_SENSOR_Y_KEY, &y)) ++ if (applesmc_read_s16(smc, MOTION_SENSOR_Y_KEY, &y)) + return; + + x = -x; +- input_report_abs(idev, ABS_X, x - rest_x); +- input_report_abs(idev, ABS_Y, y - rest_y); ++ input_report_abs(idev, ABS_X, x - smc->rest_x); ++ input_report_abs(idev, ABS_Y, y - smc->rest_y); + input_sync(idev); + } + +@@ -747,16 +808,17 @@ static ssize_t applesmc_name_show(struct device *dev, + static ssize_t applesmc_position_show(struct device *dev, + struct device_attribute *attr, char *buf) + { ++ struct applesmc_device *smc = dev_get_drvdata(dev); + int ret; + s16 x, y, z; + +- ret = applesmc_read_s16(MOTION_SENSOR_X_KEY, &x); ++ ret = applesmc_read_s16(smc, MOTION_SENSOR_X_KEY, &x); + if (ret) + goto out; +- ret = applesmc_read_s16(MOTION_SENSOR_Y_KEY, &y); ++ ret = applesmc_read_s16(smc, MOTION_SENSOR_Y_KEY, &y); + if (ret) + goto out; +- ret = applesmc_read_s16(MOTION_SENSOR_Z_KEY, &z); ++ ret = applesmc_read_s16(smc, MOTION_SENSOR_Z_KEY, &z); + if (ret) + goto out; + +@@ -770,6 +832,7 @@ static ssize_t applesmc_position_show(struct device *dev, + static ssize_t applesmc_light_show(struct device *dev, + struct device_attribute *attr, char *sysfsbuf) + { ++ struct applesmc_device *smc = dev_get_drvdata(dev); + const struct applesmc_entry *entry; + static int data_length; + int ret; +@@ -777,7 +840,7 @@ static ssize_t applesmc_light_show(struct device *dev, + u8 buffer[10]; + + if (!data_length) { +- entry = applesmc_get_entry_by_key(LIGHT_SENSOR_LEFT_KEY); ++ entry = applesmc_get_entry_by_key(smc, LIGHT_SENSOR_LEFT_KEY); + if (IS_ERR(entry)) + return PTR_ERR(entry); + if (entry->len > 10) +@@ -786,7 +849,7 @@ static ssize_t applesmc_light_show(struct device *dev, + pr_info("light sensor data length set to %d\n", data_length); + } + +- ret = applesmc_read_key(LIGHT_SENSOR_LEFT_KEY, buffer, data_length); ++ ret = applesmc_read_key(smc, LIGHT_SENSOR_LEFT_KEY, buffer, data_length); + if (ret) + goto out; + /* newer macbooks report a single 10-bit bigendian value */ +@@ -796,7 +859,7 @@ static ssize_t applesmc_light_show(struct device *dev, + } + left = buffer[2]; + +- ret = applesmc_read_key(LIGHT_SENSOR_RIGHT_KEY, buffer, data_length); ++ ret = applesmc_read_key(smc, LIGHT_SENSOR_RIGHT_KEY, buffer, data_length); + if (ret) + goto out; + right = buffer[2]; +@@ -812,7 +875,8 @@ static ssize_t applesmc_light_show(struct device *dev, + static ssize_t applesmc_show_sensor_label(struct device *dev, + struct device_attribute *devattr, char *sysfsbuf) + { +- const char *key = smcreg.index[to_index(devattr)]; ++ struct applesmc_device *smc = dev_get_drvdata(dev); ++ const char *key = smc->reg.index[to_index(devattr)]; + + return sysfs_emit(sysfsbuf, "%s\n", key); + } +@@ -821,12 +885,13 @@ static ssize_t applesmc_show_sensor_label(struct device *dev, + static ssize_t applesmc_show_temperature(struct device *dev, + struct device_attribute *devattr, char *sysfsbuf) + { +- const char *key = smcreg.index[to_index(devattr)]; ++ struct applesmc_device *smc = dev_get_drvdata(dev); ++ const char *key = smc->reg.index[to_index(devattr)]; + int ret; + s16 value; + int temp; + +- ret = applesmc_read_s16(key, &value); ++ ret = applesmc_read_s16(smc, key, &value); + if (ret) + return ret; + +@@ -838,6 +903,7 @@ static ssize_t applesmc_show_temperature(struct device *dev, + static ssize_t applesmc_show_fan_speed(struct device *dev, + struct device_attribute *attr, char *sysfsbuf) + { ++ struct applesmc_device *smc = dev_get_drvdata(dev); + int ret; + unsigned int speed = 0; + char newkey[5]; +@@ -846,7 +912,7 @@ static ssize_t applesmc_show_fan_speed(struct device *dev, + scnprintf(newkey, sizeof(newkey), fan_speed_fmt[to_option(attr)], + to_index(attr)); + +- ret = applesmc_read_key(newkey, buffer, 2); ++ ret = applesmc_read_key(smc, newkey, buffer, 2); + if (ret) + return ret; + +@@ -858,6 +924,7 @@ static ssize_t applesmc_store_fan_speed(struct device *dev, + struct device_attribute *attr, + const char *sysfsbuf, size_t count) + { ++ struct applesmc_device *smc = dev_get_drvdata(dev); + int ret; + unsigned long speed; + char newkey[5]; +@@ -871,7 +938,7 @@ static ssize_t applesmc_store_fan_speed(struct device *dev, + + buffer[0] = (speed >> 6) & 0xff; + buffer[1] = (speed << 2) & 0xff; +- ret = applesmc_write_key(newkey, buffer, 2); ++ ret = applesmc_write_key(smc, newkey, buffer, 2); + + if (ret) + return ret; +@@ -882,11 +949,12 @@ static ssize_t applesmc_store_fan_speed(struct device *dev, + static ssize_t applesmc_show_fan_manual(struct device *dev, + struct device_attribute *attr, char *sysfsbuf) + { ++ struct applesmc_device *smc = dev_get_drvdata(dev); + int ret; + u16 manual = 0; + u8 buffer[2]; + +- ret = applesmc_read_key(FANS_MANUAL, buffer, 2); ++ ret = applesmc_read_key(smc, FANS_MANUAL, buffer, 2); + if (ret) + return ret; + +@@ -898,6 +966,7 @@ static ssize_t applesmc_store_fan_manual(struct device *dev, + struct device_attribute *attr, + const char *sysfsbuf, size_t count) + { ++ struct applesmc_device *smc = dev_get_drvdata(dev); + int ret; + u8 buffer[2]; + unsigned long input; +@@ -906,7 +975,7 @@ static ssize_t applesmc_store_fan_manual(struct device *dev, + if (kstrtoul(sysfsbuf, 10, &input) < 0) + return -EINVAL; + +- ret = applesmc_read_key(FANS_MANUAL, buffer, 2); ++ ret = applesmc_read_key(smc, FANS_MANUAL, buffer, 2); + if (ret) + goto out; + +@@ -920,7 +989,7 @@ static ssize_t applesmc_store_fan_manual(struct device *dev, + buffer[0] = (val >> 8) & 0xFF; + buffer[1] = val & 0xFF; + +- ret = applesmc_write_key(FANS_MANUAL, buffer, 2); ++ ret = applesmc_write_key(smc, FANS_MANUAL, buffer, 2); + + out: + if (ret) +@@ -932,13 +1001,14 @@ static ssize_t applesmc_store_fan_manual(struct device *dev, + static ssize_t applesmc_show_fan_position(struct device *dev, + struct device_attribute *attr, char *sysfsbuf) + { ++ struct applesmc_device *smc = dev_get_drvdata(dev); + int ret; + char newkey[5]; + u8 buffer[17]; + + scnprintf(newkey, sizeof(newkey), FAN_ID_FMT, to_index(attr)); + +- ret = applesmc_read_key(newkey, buffer, 16); ++ ret = applesmc_read_key(smc, newkey, buffer, 16); + buffer[16] = 0; + + if (ret) +@@ -950,30 +1020,36 @@ static ssize_t applesmc_show_fan_position(struct device *dev, + static ssize_t applesmc_calibrate_show(struct device *dev, + struct device_attribute *attr, char *sysfsbuf) + { +- return sysfs_emit(sysfsbuf, "(%d,%d)\n", rest_x, rest_y); ++ struct applesmc_device *smc = dev_get_drvdata(dev); ++ ++ return sysfs_emit(sysfsbuf, "(%d,%d)\n", smc->rest_x, smc->rest_y); + } + + static ssize_t applesmc_calibrate_store(struct device *dev, + struct device_attribute *attr, const char *sysfsbuf, size_t count) + { +- applesmc_calibrate(); ++ struct applesmc_device *smc = dev_get_drvdata(dev); ++ ++ applesmc_calibrate(smc); + + return count; + } + + static void applesmc_backlight_set(struct work_struct *work) + { +- applesmc_write_key(BACKLIGHT_KEY, backlight_state, 2); ++ struct applesmc_device *smc = container_of(work, struct applesmc_device, backlight_work); ++ ++ applesmc_write_key(smc, BACKLIGHT_KEY, smc->backlight_state, 2); + } +-static DECLARE_WORK(backlight_work, &applesmc_backlight_set); + + static void applesmc_brightness_set(struct led_classdev *led_cdev, + enum led_brightness value) + { ++ struct applesmc_device *smc = dev_get_drvdata(led_cdev->dev); + int ret; + +- backlight_state[0] = value; +- ret = queue_work(applesmc_led_wq, &backlight_work); ++ smc->backlight_state[0] = value; ++ ret = queue_work(smc->backlight_wq, &smc->backlight_work); + + if (debug && (!ret)) + dev_dbg(led_cdev->dev, "work was already on the queue.\n"); +@@ -982,11 +1058,12 @@ static void applesmc_brightness_set(struct led_classdev *led_cdev, + static ssize_t applesmc_key_count_show(struct device *dev, + struct device_attribute *attr, char *sysfsbuf) + { ++ struct applesmc_device *smc = dev_get_drvdata(dev); + int ret; + u8 buffer[4]; + u32 count; + +- ret = applesmc_read_key(KEY_COUNT_KEY, buffer, 4); ++ ret = applesmc_read_key(smc, KEY_COUNT_KEY, buffer, 4); + if (ret) + return ret; + +@@ -998,13 +1075,14 @@ static ssize_t applesmc_key_count_show(struct device *dev, + static ssize_t applesmc_key_at_index_read_show(struct device *dev, + struct device_attribute *attr, char *sysfsbuf) + { ++ struct applesmc_device *smc = dev_get_drvdata(dev); + const struct applesmc_entry *entry; + int ret; + +- entry = applesmc_get_entry_by_index(key_at_index); ++ entry = applesmc_get_entry_by_index(smc, smc->key_at_index); + if (IS_ERR(entry)) + return PTR_ERR(entry); +- ret = applesmc_read_entry(entry, sysfsbuf, entry->len); ++ ret = applesmc_read_entry(smc, entry, sysfsbuf, entry->len); + if (ret) + return ret; + +@@ -1014,9 +1092,10 @@ static ssize_t applesmc_key_at_index_read_show(struct device *dev, + static ssize_t applesmc_key_at_index_data_length_show(struct device *dev, + struct device_attribute *attr, char *sysfsbuf) + { ++ struct applesmc_device *smc = dev_get_drvdata(dev); + const struct applesmc_entry *entry; + +- entry = applesmc_get_entry_by_index(key_at_index); ++ entry = applesmc_get_entry_by_index(smc, smc->key_at_index); + if (IS_ERR(entry)) + return PTR_ERR(entry); + +@@ -1026,9 +1105,10 @@ static ssize_t applesmc_key_at_index_data_length_show(struct device *dev, + static ssize_t applesmc_key_at_index_type_show(struct device *dev, + struct device_attribute *attr, char *sysfsbuf) + { ++ struct applesmc_device *smc = dev_get_drvdata(dev); + const struct applesmc_entry *entry; + +- entry = applesmc_get_entry_by_index(key_at_index); ++ entry = applesmc_get_entry_by_index(smc, smc->key_at_index); + if (IS_ERR(entry)) + return PTR_ERR(entry); + +@@ -1038,9 +1118,10 @@ static ssize_t applesmc_key_at_index_type_show(struct device *dev, + static ssize_t applesmc_key_at_index_name_show(struct device *dev, + struct device_attribute *attr, char *sysfsbuf) + { ++ struct applesmc_device *smc = dev_get_drvdata(dev); + const struct applesmc_entry *entry; + +- entry = applesmc_get_entry_by_index(key_at_index); ++ entry = applesmc_get_entry_by_index(smc, smc->key_at_index); + if (IS_ERR(entry)) + return PTR_ERR(entry); + +@@ -1050,28 +1131,25 @@ static ssize_t applesmc_key_at_index_name_show(struct device *dev, + static ssize_t applesmc_key_at_index_show(struct device *dev, + struct device_attribute *attr, char *sysfsbuf) + { +- return sysfs_emit(sysfsbuf, "%d\n", key_at_index); ++ struct applesmc_device *smc = dev_get_drvdata(dev); ++ ++ return sysfs_emit(sysfsbuf, "%d\n", smc->key_at_index); + } + + static ssize_t applesmc_key_at_index_store(struct device *dev, + struct device_attribute *attr, const char *sysfsbuf, size_t count) + { ++ struct applesmc_device *smc = dev_get_drvdata(dev); + unsigned long newkey; + + if (kstrtoul(sysfsbuf, 10, &newkey) < 0 +- || newkey >= smcreg.key_count) ++ || newkey >= smc->reg.key_count) + return -EINVAL; + +- key_at_index = newkey; ++ smc->key_at_index = newkey; + return count; + } + +-static struct led_classdev applesmc_backlight = { +- .name = "smc::kbd_backlight", +- .default_trigger = "nand-disk", +- .brightness_set = applesmc_brightness_set, +-}; +- + static struct applesmc_node_group info_group[] = { + { "name", applesmc_name_show }, + { "key_count", applesmc_key_count_show }, +@@ -1116,14 +1194,15 @@ static struct applesmc_node_group temp_group[] = { + /* + * applesmc_destroy_nodes - remove files and free associated memory + */ +-static void applesmc_destroy_nodes(struct applesmc_node_group *groups) ++static void applesmc_destroy_nodes(struct applesmc_device *smc, ++ struct applesmc_node_group *groups) + { + struct applesmc_node_group *grp; + struct applesmc_dev_attr *node; + + for (grp = groups; grp->nodes; grp++) { + for (node = grp->nodes; node->sda.dev_attr.attr.name; node++) +- sysfs_remove_file(&pdev->dev.kobj, ++ sysfs_remove_file(&smc->dev->dev.kobj, + &node->sda.dev_attr.attr); + kfree(grp->nodes); + grp->nodes = NULL; +@@ -1133,7 +1212,8 @@ static void applesmc_destroy_nodes(struct applesmc_node_group *groups) + /* + * applesmc_create_nodes - create a two-dimensional group of sysfs files + */ +-static int applesmc_create_nodes(struct applesmc_node_group *groups, int num) ++static int applesmc_create_nodes(struct applesmc_device *smc, ++ struct applesmc_node_group *groups, int num) + { + struct applesmc_node_group *grp; + struct applesmc_dev_attr *node; +@@ -1157,7 +1237,7 @@ static int applesmc_create_nodes(struct applesmc_node_group *groups, int num) + sysfs_attr_init(attr); + attr->name = node->name; + attr->mode = 0444 | (grp->store ? 0200 : 0); +- ret = sysfs_create_file(&pdev->dev.kobj, attr); ++ ret = sysfs_create_file(&smc->dev->dev.kobj, attr); + if (ret) { + attr->name = NULL; + goto out; +@@ -1167,57 +1247,57 @@ static int applesmc_create_nodes(struct applesmc_node_group *groups, int num) + + return 0; + out: +- applesmc_destroy_nodes(groups); ++ applesmc_destroy_nodes(smc, groups); + return ret; + } + + /* Create accelerometer resources */ +-static int applesmc_create_accelerometer(void) ++static int applesmc_create_accelerometer(struct applesmc_device *smc) + { + int ret; + +- if (!smcreg.has_accelerometer) ++ if (!smc->reg.has_accelerometer) + return 0; + +- ret = applesmc_create_nodes(accelerometer_group, 1); ++ ret = applesmc_create_nodes(smc, accelerometer_group, 1); + if (ret) + goto out; + +- applesmc_idev = input_allocate_device(); +- if (!applesmc_idev) { ++ smc->idev = input_allocate_device(); ++ if (!smc->idev) { + ret = -ENOMEM; + goto out_sysfs; + } + + /* initial calibrate for the input device */ +- applesmc_calibrate(); ++ applesmc_calibrate(smc); + + /* initialize the input device */ +- applesmc_idev->name = "applesmc"; +- applesmc_idev->id.bustype = BUS_HOST; +- applesmc_idev->dev.parent = &pdev->dev; +- input_set_abs_params(applesmc_idev, ABS_X, ++ smc->idev->name = "applesmc"; ++ smc->idev->id.bustype = BUS_HOST; ++ smc->idev->dev.parent = &smc->dev->dev; ++ input_set_abs_params(smc->idev, ABS_X, + -256, 256, APPLESMC_INPUT_FUZZ, APPLESMC_INPUT_FLAT); +- input_set_abs_params(applesmc_idev, ABS_Y, ++ input_set_abs_params(smc->idev, ABS_Y, + -256, 256, APPLESMC_INPUT_FUZZ, APPLESMC_INPUT_FLAT); + +- ret = input_setup_polling(applesmc_idev, applesmc_idev_poll); ++ ret = input_setup_polling(smc->idev, applesmc_idev_poll); + if (ret) + goto out_idev; + +- input_set_poll_interval(applesmc_idev, APPLESMC_POLL_INTERVAL); ++ input_set_poll_interval(smc->idev, APPLESMC_POLL_INTERVAL); + +- ret = input_register_device(applesmc_idev); ++ ret = input_register_device(smc->idev); + if (ret) + goto out_idev; + + return 0; + + out_idev: +- input_free_device(applesmc_idev); ++ input_free_device(smc->idev); + + out_sysfs: +- applesmc_destroy_nodes(accelerometer_group); ++ applesmc_destroy_nodes(smc, accelerometer_group); + + out: + pr_warn("driver init failed (ret=%d)!\n", ret); +@@ -1225,44 +1305,55 @@ static int applesmc_create_accelerometer(void) + } + + /* Release all resources used by the accelerometer */ +-static void applesmc_release_accelerometer(void) ++static void applesmc_release_accelerometer(struct applesmc_device *smc) + { +- if (!smcreg.has_accelerometer) ++ if (!smc->reg.has_accelerometer) + return; +- input_unregister_device(applesmc_idev); +- applesmc_destroy_nodes(accelerometer_group); ++ input_unregister_device(smc->idev); ++ applesmc_destroy_nodes(smc, accelerometer_group); + } + +-static int applesmc_create_light_sensor(void) ++static int applesmc_create_light_sensor(struct applesmc_device *smc) + { +- if (!smcreg.num_light_sensors) ++ if (!smc->reg.num_light_sensors) + return 0; +- return applesmc_create_nodes(light_sensor_group, 1); ++ return applesmc_create_nodes(smc, light_sensor_group, 1); + } + +-static void applesmc_release_light_sensor(void) ++static void applesmc_release_light_sensor(struct applesmc_device *smc) + { +- if (!smcreg.num_light_sensors) ++ if (!smc->reg.num_light_sensors) + return; +- applesmc_destroy_nodes(light_sensor_group); ++ applesmc_destroy_nodes(smc, light_sensor_group); + } + +-static int applesmc_create_key_backlight(void) ++static int applesmc_create_key_backlight(struct applesmc_device *smc) + { +- if (!smcreg.has_key_backlight) ++ int ret; ++ ++ if (!smc->reg.has_key_backlight) + return 0; +- applesmc_led_wq = create_singlethread_workqueue("applesmc-led"); +- if (!applesmc_led_wq) ++ smc->backlight_wq = create_singlethread_workqueue("applesmc-led"); ++ if (!smc->backlight_wq) + return -ENOMEM; +- return led_classdev_register(&pdev->dev, &applesmc_backlight); ++ ++ INIT_WORK(&smc->backlight_work, applesmc_backlight_set); ++ smc->backlight_dev.name = "smc::kbd_backlight"; ++ smc->backlight_dev.default_trigger = "nand-disk"; ++ smc->backlight_dev.brightness_set = applesmc_brightness_set; ++ ret = led_classdev_register(&smc->dev->dev, &smc->backlight_dev); ++ if (ret) ++ destroy_workqueue(smc->backlight_wq); ++ ++ return ret; + } + +-static void applesmc_release_key_backlight(void) ++static void applesmc_release_key_backlight(struct applesmc_device *smc) + { +- if (!smcreg.has_key_backlight) ++ if (!smc->reg.has_key_backlight) + return; +- led_classdev_unregister(&applesmc_backlight); +- destroy_workqueue(applesmc_led_wq); ++ led_classdev_unregister(&smc->backlight_dev); ++ destroy_workqueue(smc->backlight_wq); + } + + static int applesmc_dmi_match(const struct dmi_system_id *id) +@@ -1306,86 +1397,100 @@ static const struct dmi_system_id applesmc_whitelist[] __initconst = { + { .ident = NULL } + }; + +-static int __init applesmc_init(void) ++static int applesmc_create_modules(struct applesmc_device *smc) + { + int ret; + +- if (!dmi_check_system(applesmc_whitelist)) { +- pr_warn("supported laptop not found!\n"); +- ret = -ENODEV; +- goto out; +- } +- +- if (!request_region(APPLESMC_DATA_PORT, APPLESMC_NR_PORTS, +- "applesmc")) { +- ret = -ENXIO; +- goto out; +- } +- +- ret = platform_driver_register(&applesmc_driver); +- if (ret) +- goto out_region; +- +- pdev = platform_device_register_simple("applesmc", APPLESMC_DATA_PORT, +- NULL, 0); +- if (IS_ERR(pdev)) { +- ret = PTR_ERR(pdev); +- goto out_driver; +- } +- +- /* create register cache */ +- ret = applesmc_init_smcreg(); +- if (ret) +- goto out_device; +- +- ret = applesmc_create_nodes(info_group, 1); ++ ret = applesmc_create_nodes(smc, info_group, 1); + if (ret) +- goto out_smcreg; ++ goto out; + +- ret = applesmc_create_nodes(fan_group, smcreg.fan_count); ++ ret = applesmc_create_nodes(smc, fan_group, smc->reg.fan_count); + if (ret) + goto out_info; + +- ret = applesmc_create_nodes(temp_group, smcreg.index_count); ++ ret = applesmc_create_nodes(smc, temp_group, smc->reg.index_count); + if (ret) + goto out_fans; + +- ret = applesmc_create_accelerometer(); ++ ret = applesmc_create_accelerometer(smc); + if (ret) + goto out_temperature; + +- ret = applesmc_create_light_sensor(); ++ ret = applesmc_create_light_sensor(smc); + if (ret) + goto out_accelerometer; + +- ret = applesmc_create_key_backlight(); ++ ret = applesmc_create_key_backlight(smc); + if (ret) + goto out_light_sysfs; + +- hwmon_dev = hwmon_device_register(&pdev->dev); +- if (IS_ERR(hwmon_dev)) { +- ret = PTR_ERR(hwmon_dev); ++ smc->hwmon_dev = hwmon_device_register(&smc->dev->dev); ++ if (IS_ERR(smc->hwmon_dev)) { ++ ret = PTR_ERR(smc->hwmon_dev); + goto out_light_ledclass; + } + + return 0; + + out_light_ledclass: +- applesmc_release_key_backlight(); ++ applesmc_release_key_backlight(smc); + out_light_sysfs: +- applesmc_release_light_sensor(); ++ applesmc_release_light_sensor(smc); + out_accelerometer: +- applesmc_release_accelerometer(); ++ applesmc_release_accelerometer(smc); + out_temperature: +- applesmc_destroy_nodes(temp_group); ++ applesmc_destroy_nodes(smc, temp_group); + out_fans: +- applesmc_destroy_nodes(fan_group); ++ applesmc_destroy_nodes(smc, fan_group); + out_info: +- applesmc_destroy_nodes(info_group); +-out_smcreg: +- applesmc_destroy_smcreg(); +-out_device: +- platform_device_unregister(pdev); ++ applesmc_destroy_nodes(smc, info_group); ++out: ++ return ret; ++} ++ ++static void applesmc_destroy_modules(struct applesmc_device *smc) ++{ ++ hwmon_device_unregister(smc->hwmon_dev); ++ applesmc_release_key_backlight(smc); ++ applesmc_release_light_sensor(smc); ++ applesmc_release_accelerometer(smc); ++ applesmc_destroy_nodes(smc, temp_group); ++ applesmc_destroy_nodes(smc, fan_group); ++ applesmc_destroy_nodes(smc, info_group); ++} ++ ++static struct platform_device *pdev; ++ ++static int __init applesmc_init(void) ++{ ++ int ret; ++ ++ if (!dmi_check_system(applesmc_whitelist)) { ++ pr_warn("supported laptop not found!\n"); ++ ret = -ENODEV; ++ goto out; ++ } ++ ++ if (!request_region(APPLESMC_DATA_PORT, APPLESMC_NR_PORTS, ++ "applesmc")) { ++ ret = -ENXIO; ++ goto out; ++ } ++ ++ ret = platform_driver_register(&applesmc_driver); ++ if (ret) ++ goto out_region; ++ ++ pdev = platform_device_register_simple("applesmc", APPLESMC_DATA_PORT, ++ NULL, 0); ++ if (IS_ERR(pdev)) { ++ ret = PTR_ERR(pdev); ++ goto out_driver; ++ } ++ ++ return 0; ++ + out_driver: + platform_driver_unregister(&applesmc_driver); + out_region: +@@ -1397,14 +1502,6 @@ static int __init applesmc_init(void) + + static void __exit applesmc_exit(void) + { +- hwmon_device_unregister(hwmon_dev); +- applesmc_release_key_backlight(); +- applesmc_release_light_sensor(); +- applesmc_release_accelerometer(); +- applesmc_destroy_nodes(temp_group); +- applesmc_destroy_nodes(fan_group); +- applesmc_destroy_nodes(info_group); +- applesmc_destroy_smcreg(); + platform_device_unregister(pdev); + platform_driver_unregister(&applesmc_driver); + release_region(APPLESMC_DATA_PORT, APPLESMC_NR_PORTS); +@@ -1414,6 +1511,7 @@ module_init(applesmc_init); + module_exit(applesmc_exit); + + MODULE_AUTHOR("Nicolas Boichat"); ++MODULE_AUTHOR("Paul Pawlowski"); + MODULE_DESCRIPTION("Apple SMC"); + MODULE_LICENSE("GPL v2"); + MODULE_DEVICE_TABLE(dmi, applesmc_whitelist); +-- +Armbian + diff --git a/patch/kernel/archive/uefi-x86-6.19/3002-applesmc-make-io-port-base-addr-dynamic.patch b/patch/kernel/archive/uefi-x86-6.19/3002-applesmc-make-io-port-base-addr-dynamic.patch new file mode 100644 index 000000000000..c6640a593c87 --- /dev/null +++ b/patch/kernel/archive/uefi-x86-6.19/3002-applesmc-make-io-port-base-addr-dynamic.patch @@ -0,0 +1,312 @@ +From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001 +From: Paul Pawlowski +Date: Sun, 17 Nov 2019 23:11:56 +0100 +Subject: applesmc: make io port base addr dynamic + +This change makes the port base runtime configurable. +The reason why this change is made is so that when we switch to an +acpi_device we can resolve the port base addr from ACPI. + +This change is not strictly required for T2 support - the base +address is still 0x300 on T2 Macs. + +Signed-off-by: Aun-Ali Zaidi +--- + drivers/hwmon/applesmc.c | 91 +++++----- + 1 file changed, 49 insertions(+), 42 deletions(-) + +diff --git a/drivers/hwmon/applesmc.c b/drivers/hwmon/applesmc.c +index 111111111111..222222222222 100644 +--- a/drivers/hwmon/applesmc.c ++++ b/drivers/hwmon/applesmc.c +@@ -35,10 +35,11 @@ + #include + #include + ++#define APPLESMC_PORT_BASE 0x300 + /* data port used by Apple SMC */ +-#define APPLESMC_DATA_PORT 0x300 ++#define APPLESMC_DATA_PORT 0 + /* command/status port used by Apple SMC */ +-#define APPLESMC_CMD_PORT 0x304 ++#define APPLESMC_CMD_PORT 4 + + #define APPLESMC_NR_PORTS 32 /* 0x300-0x31f */ + +@@ -140,6 +141,8 @@ struct applesmc_device { + struct platform_device *dev; + struct applesmc_registers reg; + ++ u16 port_base; ++ + s16 rest_x; + s16 rest_y; + +@@ -169,7 +172,7 @@ static const int debug; + * run out past 500ms. + */ + +-static int wait_status(u8 val, u8 mask) ++static int wait_status(struct applesmc_device *smc, u8 val, u8 mask) + { + u8 status; + int us; +@@ -177,7 +180,7 @@ static int wait_status(u8 val, u8 mask) + + us = APPLESMC_MIN_WAIT; + for (i = 0; i < 24 ; i++) { +- status = inb(APPLESMC_CMD_PORT); ++ status = inb(smc->port_base + APPLESMC_CMD_PORT); + if ((status & mask) == val) + return 0; + usleep_range(us, us * 2); +@@ -189,11 +192,11 @@ static int wait_status(u8 val, u8 mask) + + /* send_byte - Write to SMC data port. Callers must hold applesmc_lock. */ + +-static int send_byte(u8 cmd, u16 port) ++static int send_byte(struct applesmc_device *smc, u8 cmd, u16 port) + { + int status; + +- status = wait_status(0, SMC_STATUS_IB_CLOSED); ++ status = wait_status(smc, 0, SMC_STATUS_IB_CLOSED); + if (status) + return status; + /* +@@ -202,24 +205,24 @@ static int send_byte(u8 cmd, u16 port) + * this extra read may not happen if status returns both + * simultaneously and this would appear to be required. + */ +- status = wait_status(SMC_STATUS_BUSY, SMC_STATUS_BUSY); ++ status = wait_status(smc, SMC_STATUS_BUSY, SMC_STATUS_BUSY); + if (status) + return status; + +- outb(cmd, port); ++ outb(cmd, smc->port_base + port); + return 0; + } + + /* send_command - Write a command to the SMC. Callers must hold applesmc_lock. */ + +-static int send_command(u8 cmd) ++static int send_command(struct applesmc_device *smc, u8 cmd) + { + int ret; + +- ret = wait_status(0, SMC_STATUS_IB_CLOSED); ++ ret = wait_status(smc, 0, SMC_STATUS_IB_CLOSED); + if (ret) + return ret; +- outb(cmd, APPLESMC_CMD_PORT); ++ outb(cmd, smc->port_base + APPLESMC_CMD_PORT); + return 0; + } + +@@ -229,108 +232,112 @@ static int send_command(u8 cmd) + * If busy is stuck high after the command then the SMC is jammed. + */ + +-static int smc_sane(void) ++static int smc_sane(struct applesmc_device *smc) + { + int ret; + +- ret = wait_status(0, SMC_STATUS_BUSY); ++ ret = wait_status(smc, 0, SMC_STATUS_BUSY); + if (!ret) + return ret; +- ret = send_command(APPLESMC_READ_CMD); ++ ret = send_command(smc, APPLESMC_READ_CMD); + if (ret) + return ret; +- return wait_status(0, SMC_STATUS_BUSY); ++ return wait_status(smc, 0, SMC_STATUS_BUSY); + } + +-static int send_argument(const char *key) ++static int send_argument(struct applesmc_device *smc, const char *key) + { + int i; + + for (i = 0; i < 4; i++) +- if (send_byte(key[i], APPLESMC_DATA_PORT)) ++ if (send_byte(smc, key[i], APPLESMC_DATA_PORT)) + return -EIO; + return 0; + } + +-static int read_smc(u8 cmd, const char *key, u8 *buffer, u8 len) ++static int read_smc(struct applesmc_device *smc, u8 cmd, const char *key, ++ u8 *buffer, u8 len) + { + u8 status, data = 0; + int i; + int ret; + +- ret = smc_sane(); ++ ret = smc_sane(smc); + if (ret) + return ret; + +- if (send_command(cmd) || send_argument(key)) { ++ if (send_command(smc, cmd) || send_argument(smc, key)) { + pr_warn("%.4s: read arg fail\n", key); + return -EIO; + } + + /* This has no effect on newer (2012) SMCs */ +- if (send_byte(len, APPLESMC_DATA_PORT)) { ++ if (send_byte(smc, len, APPLESMC_DATA_PORT)) { + pr_warn("%.4s: read len fail\n", key); + return -EIO; + } + + for (i = 0; i < len; i++) { +- if (wait_status(SMC_STATUS_AWAITING_DATA | SMC_STATUS_BUSY, ++ if (wait_status(smc, ++ SMC_STATUS_AWAITING_DATA | SMC_STATUS_BUSY, + SMC_STATUS_AWAITING_DATA | SMC_STATUS_BUSY)) { + pr_warn("%.4s: read data[%d] fail\n", key, i); + return -EIO; + } +- buffer[i] = inb(APPLESMC_DATA_PORT); ++ buffer[i] = inb(smc->port_base + APPLESMC_DATA_PORT); + } + + /* Read the data port until bit0 is cleared */ + for (i = 0; i < 16; i++) { + udelay(APPLESMC_MIN_WAIT); +- status = inb(APPLESMC_CMD_PORT); ++ status = inb(smc->port_base + APPLESMC_CMD_PORT); + if (!(status & SMC_STATUS_AWAITING_DATA)) + break; +- data = inb(APPLESMC_DATA_PORT); ++ data = inb(smc->port_base + APPLESMC_DATA_PORT); + } + if (i) + pr_warn("flushed %d bytes, last value is: %d\n", i, data); + +- return wait_status(0, SMC_STATUS_BUSY); ++ return wait_status(smc, 0, SMC_STATUS_BUSY); + } + +-static int write_smc(u8 cmd, const char *key, const u8 *buffer, u8 len) ++static int write_smc(struct applesmc_device *smc, u8 cmd, const char *key, ++ const u8 *buffer, u8 len) + { + int i; + int ret; + +- ret = smc_sane(); ++ ret = smc_sane(smc); + if (ret) + return ret; + +- if (send_command(cmd) || send_argument(key)) { ++ if (send_command(smc, cmd) || send_argument(smc, key)) { + pr_warn("%s: write arg fail\n", key); + return -EIO; + } + +- if (send_byte(len, APPLESMC_DATA_PORT)) { ++ if (send_byte(smc, len, APPLESMC_DATA_PORT)) { + pr_warn("%.4s: write len fail\n", key); + return -EIO; + } + + for (i = 0; i < len; i++) { +- if (send_byte(buffer[i], APPLESMC_DATA_PORT)) { ++ if (send_byte(smc, buffer[i], APPLESMC_DATA_PORT)) { + pr_warn("%s: write data fail\n", key); + return -EIO; + } + } + +- return wait_status(0, SMC_STATUS_BUSY); ++ return wait_status(smc, 0, SMC_STATUS_BUSY); + } + +-static int read_register_count(unsigned int *count) ++static int read_register_count(struct applesmc_device *smc, ++ unsigned int *count) + { + __be32 be; + int ret; + +- ret = read_smc(APPLESMC_READ_CMD, KEY_COUNT_KEY, (u8 *)&be, 4); ++ ret = read_smc(smc, APPLESMC_READ_CMD, KEY_COUNT_KEY, (u8 *)&be, 4); + if (ret) + return ret; + +@@ -353,7 +360,7 @@ static int applesmc_read_entry(struct applesmc_device *smc, + if (entry->len != len) + return -EINVAL; + mutex_lock(&smc->reg.mutex); +- ret = read_smc(APPLESMC_READ_CMD, entry->key, buf, len); ++ ret = read_smc(smc, APPLESMC_READ_CMD, entry->key, buf, len); + mutex_unlock(&smc->reg.mutex); + + return ret; +@@ -367,7 +374,7 @@ static int applesmc_write_entry(struct applesmc_device *smc, + if (entry->len != len) + return -EINVAL; + mutex_lock(&smc->reg.mutex); +- ret = write_smc(APPLESMC_WRITE_CMD, entry->key, buf, len); ++ ret = write_smc(smc, APPLESMC_WRITE_CMD, entry->key, buf, len); + mutex_unlock(&smc->reg.mutex); + return ret; + } +@@ -388,10 +395,10 @@ static const struct applesmc_entry *applesmc_get_entry_by_index( + if (cache->valid) + goto out; + be = cpu_to_be32(index); +- ret = read_smc(APPLESMC_GET_KEY_BY_INDEX_CMD, (u8 *)&be, key, 4); ++ ret = read_smc(smc, APPLESMC_GET_KEY_BY_INDEX_CMD, (u8 *)&be, key, 4); + if (ret) + goto out; +- ret = read_smc(APPLESMC_GET_KEY_TYPE_CMD, key, info, 6); ++ ret = read_smc(smc, APPLESMC_GET_KEY_TYPE_CMD, key, info, 6); + if (ret) + goto out; + +@@ -589,7 +596,7 @@ static int applesmc_init_smcreg_try(struct applesmc_device *smc) + if (s->init_complete) + return 0; + +- ret = read_register_count(&count); ++ ret = read_register_count(smc, &count); + if (ret) + return ret; + +@@ -1472,7 +1479,7 @@ static int __init applesmc_init(void) + goto out; + } + +- if (!request_region(APPLESMC_DATA_PORT, APPLESMC_NR_PORTS, ++ if (!request_region(APPLESMC_PORT_BASE, APPLESMC_NR_PORTS, + "applesmc")) { + ret = -ENXIO; + goto out; +@@ -1494,7 +1501,7 @@ static int __init applesmc_init(void) + out_driver: + platform_driver_unregister(&applesmc_driver); + out_region: +- release_region(APPLESMC_DATA_PORT, APPLESMC_NR_PORTS); ++ release_region(APPLESMC_PORT_BASE, APPLESMC_NR_PORTS); + out: + pr_warn("driver init failed (ret=%d)!\n", ret); + return ret; +@@ -1504,7 +1511,7 @@ static void __exit applesmc_exit(void) + { + platform_device_unregister(pdev); + platform_driver_unregister(&applesmc_driver); +- release_region(APPLESMC_DATA_PORT, APPLESMC_NR_PORTS); ++ release_region(APPLESMC_PORT_BASE, APPLESMC_NR_PORTS); + } + + module_init(applesmc_init); +-- +Armbian + diff --git a/patch/kernel/archive/uefi-x86-6.19/3003-applesmc-switch-to-acpi_device-from-platform.patch b/patch/kernel/archive/uefi-x86-6.19/3003-applesmc-switch-to-acpi_device-from-platform.patch new file mode 100644 index 000000000000..21b04a5a50aa --- /dev/null +++ b/patch/kernel/archive/uefi-x86-6.19/3003-applesmc-switch-to-acpi_device-from-platform.patch @@ -0,0 +1,265 @@ +From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001 +From: Paul Pawlowski +Date: Sat, 29 Jun 2024 04:49:16 +0000 +Subject: applesmc: switch to acpi_device (from platform) + +This change makes the change from platform_device +to acpi_device. The rationale for this change is +that on T2 Macs, an additional FixedMemory32 +region is needed for device operation, and it can +be easily resolved via ACPI tables (this will be +done in another commit). + +Additionally, on older Macs, the OS X driver also +looks for the specified ACPI device to resolve +its memory regions, and therefore this change +should not result in any incompatibilities. + +Signed-off-by: Aun-Ali Zaidi +--- + drivers/hwmon/applesmc.c | 124 +++++++--- + 1 file changed, 84 insertions(+), 40 deletions(-) + +diff --git a/drivers/hwmon/applesmc.c b/drivers/hwmon/applesmc.c +index 111111111111..222222222222 100644 +--- a/drivers/hwmon/applesmc.c ++++ b/drivers/hwmon/applesmc.c +@@ -19,7 +19,7 @@ + #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + + #include +-#include ++#include + #include + #include + #include +@@ -35,7 +35,6 @@ + #include + #include + +-#define APPLESMC_PORT_BASE 0x300 + /* data port used by Apple SMC */ + #define APPLESMC_DATA_PORT 0 + /* command/status port used by Apple SMC */ +@@ -138,9 +137,10 @@ struct applesmc_registers { + }; + + struct applesmc_device { +- struct platform_device *dev; ++ struct acpi_device *dev; + struct applesmc_registers reg; + ++ bool port_base_set; + u16 port_base; + + s16 rest_x; +@@ -692,9 +692,13 @@ static int applesmc_init_smcreg(struct applesmc_device *smc) + } + + /* Device model stuff */ ++ ++static int applesmc_init_resources(struct applesmc_device *smc); ++static void applesmc_free_resources(struct applesmc_device *smc); + static int applesmc_create_modules(struct applesmc_device *smc); + static void applesmc_destroy_modules(struct applesmc_device *smc); +-static int applesmc_probe(struct platform_device *dev) ++ ++static int applesmc_add(struct acpi_device *dev) + { + struct applesmc_device *smc; + int ret; +@@ -705,12 +709,16 @@ static int applesmc_probe(struct platform_device *dev) + smc->dev = dev; + mutex_init(&smc->reg.mutex); + +- platform_set_drvdata(dev, smc); ++ dev_set_drvdata(&dev->dev, smc); + +- ret = applesmc_init_smcreg(smc); ++ ret = applesmc_init_resources(smc); + if (ret) + goto out_mem; + ++ ret = applesmc_init_smcreg(smc); ++ if (ret) ++ goto out_res; ++ + applesmc_device_init(smc); + + ret = applesmc_create_modules(smc); +@@ -721,20 +729,23 @@ static int applesmc_probe(struct platform_device *dev) + + out_reg: + applesmc_destroy_smcreg(smc); ++out_res: ++ applesmc_free_resources(smc); + out_mem: +- platform_set_drvdata(dev, NULL); ++ dev_set_drvdata(&dev->dev, NULL); + mutex_destroy(&smc->reg.mutex); + kfree(smc); + + return ret; + } + +-static int applesmc_remove(struct platform_device *dev) ++static int applesmc_remove(struct acpi_device *dev) + { +- struct applesmc_device *smc = platform_get_drvdata(dev); ++ struct applesmc_device *smc = dev_get_drvdata(&dev->dev); + + applesmc_destroy_modules(smc); + applesmc_destroy_smcreg(smc); ++ applesmc_free_resources(smc); + + mutex_destroy(&smc->reg.mutex); + kfree(smc); +@@ -742,6 +753,52 @@ static int applesmc_remove(struct platform_device *dev) + return 0; + } + ++static acpi_status applesmc_walk_resources(struct acpi_resource *res, ++ void *data) ++{ ++ struct applesmc_device *smc = data; ++ ++ switch (res->type) { ++ case ACPI_RESOURCE_TYPE_IO: ++ if (!smc->port_base_set) { ++ if (res->data.io.address_length < APPLESMC_NR_PORTS) ++ return AE_ERROR; ++ smc->port_base = res->data.io.minimum; ++ smc->port_base_set = true; ++ } ++ return AE_OK; ++ ++ case ACPI_RESOURCE_TYPE_END_TAG: ++ if (smc->port_base_set) ++ return AE_OK; ++ else ++ return AE_NOT_FOUND; ++ ++ default: ++ return AE_OK; ++ } ++} ++ ++static int applesmc_init_resources(struct applesmc_device *smc) ++{ ++ int ret; ++ ++ ret = acpi_walk_resources(smc->dev->handle, METHOD_NAME__CRS, ++ applesmc_walk_resources, smc); ++ if (ACPI_FAILURE(ret)) ++ return -ENXIO; ++ ++ if (!request_region(smc->port_base, APPLESMC_NR_PORTS, "applesmc")) ++ return -ENXIO; ++ ++ return 0; ++} ++ ++static void applesmc_free_resources(struct applesmc_device *smc) ++{ ++ release_region(smc->port_base, APPLESMC_NR_PORTS); ++} ++ + /* Synchronize device with memorized backlight state */ + static int applesmc_pm_resume(struct device *dev) + { +@@ -763,17 +820,26 @@ static int applesmc_pm_restore(struct device *dev) + return applesmc_pm_resume(dev); + } + ++static const struct acpi_device_id applesmc_ids[] = { ++ {"APP0001", 0}, ++ {"", 0}, ++}; ++ + static const struct dev_pm_ops applesmc_pm_ops = { + .resume = applesmc_pm_resume, + .restore = applesmc_pm_restore, + }; + +-static struct platform_driver applesmc_driver = { +- .probe = applesmc_probe, +- .remove = applesmc_remove, +- .driver = { +- .name = "applesmc", +- .pm = &applesmc_pm_ops, ++static struct acpi_driver applesmc_driver = { ++ .name = "applesmc", ++ .class = "applesmc", ++ .ids = applesmc_ids, ++ .ops = { ++ .add = applesmc_add, ++ .remove = applesmc_remove ++ }, ++ .drv = { ++ .pm = &applesmc_pm_ops + }, + }; + +@@ -1262,7 +1328,6 @@ static int applesmc_create_nodes(struct applesmc_device *smc, + static int applesmc_create_accelerometer(struct applesmc_device *smc) + { + int ret; +- + if (!smc->reg.has_accelerometer) + return 0; + +@@ -1467,8 +1532,6 @@ static void applesmc_destroy_modules(struct applesmc_device *smc) + applesmc_destroy_nodes(smc, info_group); + } + +-static struct platform_device *pdev; +- + static int __init applesmc_init(void) + { + int ret; +@@ -1479,29 +1542,12 @@ static int __init applesmc_init(void) + goto out; + } + +- if (!request_region(APPLESMC_PORT_BASE, APPLESMC_NR_PORTS, +- "applesmc")) { +- ret = -ENXIO; +- goto out; +- } +- +- ret = platform_driver_register(&applesmc_driver); ++ ret = acpi_bus_register_driver(&applesmc_driver); + if (ret) +- goto out_region; +- +- pdev = platform_device_register_simple("applesmc", APPLESMC_DATA_PORT, +- NULL, 0); +- if (IS_ERR(pdev)) { +- ret = PTR_ERR(pdev); +- goto out_driver; +- } ++ goto out; + + return 0; + +-out_driver: +- platform_driver_unregister(&applesmc_driver); +-out_region: +- release_region(APPLESMC_PORT_BASE, APPLESMC_NR_PORTS); + out: + pr_warn("driver init failed (ret=%d)!\n", ret); + return ret; +@@ -1509,9 +1555,7 @@ static int __init applesmc_init(void) + + static void __exit applesmc_exit(void) + { +- platform_device_unregister(pdev); +- platform_driver_unregister(&applesmc_driver); +- release_region(APPLESMC_PORT_BASE, APPLESMC_NR_PORTS); ++ acpi_bus_unregister_driver(&applesmc_driver); + } + + module_init(applesmc_init); +-- +Armbian + diff --git a/patch/kernel/archive/uefi-x86-6.19/3004-applesmc-key-interface-wrappers.patch b/patch/kernel/archive/uefi-x86-6.19/3004-applesmc-key-interface-wrappers.patch new file mode 100644 index 000000000000..ad7e3105f9d3 --- /dev/null +++ b/patch/kernel/archive/uefi-x86-6.19/3004-applesmc-key-interface-wrappers.patch @@ -0,0 +1,298 @@ +From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001 +From: Paul Pawlowski +Date: Sun, 17 Nov 2019 23:12:14 +0100 +Subject: applesmc: key interface wrappers + +This change replaces the read_smc and write_smc +methods with wrappers, additionally removing the +command id parameter from them (and introducing +get_smc_key_by_index and get_smc_key_info). + +This is done as to allow simple implementation +replacement on T2 Macs. The newly introduced +methods mentioned in the previous paragraph need +special handling on T2 and as such had to be +separated. + +Signed-off-by: Aun-Ali Zaidi +--- + drivers/hwmon/applesmc.c | 119 ++++++---- + 1 file changed, 79 insertions(+), 40 deletions(-) + +diff --git a/drivers/hwmon/applesmc.c b/drivers/hwmon/applesmc.c +index 111111111111..222222222222 100644 +--- a/drivers/hwmon/applesmc.c ++++ b/drivers/hwmon/applesmc.c +@@ -172,7 +172,7 @@ static const int debug; + * run out past 500ms. + */ + +-static int wait_status(struct applesmc_device *smc, u8 val, u8 mask) ++static int port_wait_status(struct applesmc_device *smc, u8 val, u8 mask) + { + u8 status; + int us; +@@ -190,13 +190,13 @@ static int wait_status(struct applesmc_device *smc, u8 val, u8 mask) + return -EIO; + } + +-/* send_byte - Write to SMC data port. Callers must hold applesmc_lock. */ ++/* port_send_byte - Write to SMC data port. Callers must hold applesmc_lock. */ + +-static int send_byte(struct applesmc_device *smc, u8 cmd, u16 port) ++static int port_send_byte(struct applesmc_device *smc, u8 cmd, u16 port) + { + int status; + +- status = wait_status(smc, 0, SMC_STATUS_IB_CLOSED); ++ status = port_wait_status(smc, 0, SMC_STATUS_IB_CLOSED); + if (status) + return status; + /* +@@ -205,7 +205,7 @@ static int send_byte(struct applesmc_device *smc, u8 cmd, u16 port) + * this extra read may not happen if status returns both + * simultaneously and this would appear to be required. + */ +- status = wait_status(smc, SMC_STATUS_BUSY, SMC_STATUS_BUSY); ++ status = port_wait_status(smc, SMC_STATUS_BUSY, SMC_STATUS_BUSY); + if (status) + return status; + +@@ -213,15 +213,16 @@ static int send_byte(struct applesmc_device *smc, u8 cmd, u16 port) + return 0; + } + +-/* send_command - Write a command to the SMC. Callers must hold applesmc_lock. */ ++/* port_send_command - Write a command to the SMC. Callers must hold applesmc_lock. */ + +-static int send_command(struct applesmc_device *smc, u8 cmd) ++static int port_send_command(struct applesmc_device *smc, u8 cmd) + { + int ret; + +- ret = wait_status(smc, 0, SMC_STATUS_IB_CLOSED); ++ ret = port_wait_status(smc, 0, SMC_STATUS_IB_CLOSED); + if (ret) + return ret; ++ + outb(cmd, smc->port_base + APPLESMC_CMD_PORT); + return 0; + } +@@ -232,53 +233,53 @@ static int send_command(struct applesmc_device *smc, u8 cmd) + * If busy is stuck high after the command then the SMC is jammed. + */ + +-static int smc_sane(struct applesmc_device *smc) ++static int port_smc_sane(struct applesmc_device *smc) + { + int ret; + +- ret = wait_status(smc, 0, SMC_STATUS_BUSY); ++ ret = port_wait_status(smc, 0, SMC_STATUS_BUSY); + if (!ret) + return ret; +- ret = send_command(smc, APPLESMC_READ_CMD); ++ ret = port_send_command(smc, APPLESMC_READ_CMD); + if (ret) + return ret; +- return wait_status(smc, 0, SMC_STATUS_BUSY); ++ return port_wait_status(smc, 0, SMC_STATUS_BUSY); + } + +-static int send_argument(struct applesmc_device *smc, const char *key) ++static int port_send_argument(struct applesmc_device *smc, const char *key) + { + int i; + + for (i = 0; i < 4; i++) +- if (send_byte(smc, key[i], APPLESMC_DATA_PORT)) ++ if (port_send_byte(smc, key[i], APPLESMC_DATA_PORT)) + return -EIO; + return 0; + } + +-static int read_smc(struct applesmc_device *smc, u8 cmd, const char *key, ++static int port_read_smc(struct applesmc_device *smc, u8 cmd, const char *key, + u8 *buffer, u8 len) + { + u8 status, data = 0; + int i; + int ret; + +- ret = smc_sane(smc); ++ ret = port_smc_sane(smc); + if (ret) + return ret; + +- if (send_command(smc, cmd) || send_argument(smc, key)) { ++ if (port_send_command(smc, cmd) || port_send_argument(smc, key)) { + pr_warn("%.4s: read arg fail\n", key); + return -EIO; + } + + /* This has no effect on newer (2012) SMCs */ +- if (send_byte(smc, len, APPLESMC_DATA_PORT)) { ++ if (port_send_byte(smc, len, APPLESMC_DATA_PORT)) { + pr_warn("%.4s: read len fail\n", key); + return -EIO; + } + + for (i = 0; i < len; i++) { +- if (wait_status(smc, ++ if (port_wait_status(smc, + SMC_STATUS_AWAITING_DATA | SMC_STATUS_BUSY, + SMC_STATUS_AWAITING_DATA | SMC_STATUS_BUSY)) { + pr_warn("%.4s: read data[%d] fail\n", key, i); +@@ -298,37 +299,80 @@ static int read_smc(struct applesmc_device *smc, u8 cmd, const char *key, + if (i) + pr_warn("flushed %d bytes, last value is: %d\n", i, data); + +- return wait_status(smc, 0, SMC_STATUS_BUSY); ++ return port_wait_status(smc, 0, SMC_STATUS_BUSY); + } + +-static int write_smc(struct applesmc_device *smc, u8 cmd, const char *key, ++static int port_write_smc(struct applesmc_device *smc, u8 cmd, const char *key, + const u8 *buffer, u8 len) + { + int i; + int ret; + +- ret = smc_sane(smc); ++ ret = port_smc_sane(smc); + if (ret) + return ret; + +- if (send_command(smc, cmd) || send_argument(smc, key)) { ++ if (port_send_command(smc, cmd) || port_send_argument(smc, key)) { + pr_warn("%s: write arg fail\n", key); + return -EIO; + } + +- if (send_byte(smc, len, APPLESMC_DATA_PORT)) { ++ if (port_send_byte(smc, len, APPLESMC_DATA_PORT)) { + pr_warn("%.4s: write len fail\n", key); + return -EIO; + } + + for (i = 0; i < len; i++) { +- if (send_byte(smc, buffer[i], APPLESMC_DATA_PORT)) { ++ if (port_send_byte(smc, buffer[i], APPLESMC_DATA_PORT)) { + pr_warn("%s: write data fail\n", key); + return -EIO; + } + } + +- return wait_status(smc, 0, SMC_STATUS_BUSY); ++ return port_wait_status(smc, 0, SMC_STATUS_BUSY); ++} ++ ++static int port_get_smc_key_info(struct applesmc_device *smc, ++ const char *key, struct applesmc_entry *info) ++{ ++ int ret; ++ u8 raw[6]; ++ ++ ret = port_read_smc(smc, APPLESMC_GET_KEY_TYPE_CMD, key, raw, 6); ++ if (ret) ++ return ret; ++ info->len = raw[0]; ++ memcpy(info->type, &raw[1], 4); ++ info->flags = raw[5]; ++ return 0; ++} ++ ++static int read_smc(struct applesmc_device *smc, const char *key, ++ u8 *buffer, u8 len) ++{ ++ return port_read_smc(smc, APPLESMC_READ_CMD, key, buffer, len); ++} ++ ++static int write_smc(struct applesmc_device *smc, const char *key, ++ const u8 *buffer, u8 len) ++{ ++ return port_write_smc(smc, APPLESMC_WRITE_CMD, key, buffer, len); ++} ++ ++static int get_smc_key_by_index(struct applesmc_device *smc, ++ unsigned int index, char *key) ++{ ++ __be32 be; ++ ++ be = cpu_to_be32(index); ++ return port_read_smc(smc, APPLESMC_GET_KEY_BY_INDEX_CMD, ++ (const char *) &be, (u8 *) key, 4); ++} ++ ++static int get_smc_key_info(struct applesmc_device *smc, const char *key, ++ struct applesmc_entry *info) ++{ ++ return port_get_smc_key_info(smc, key, info); + } + + static int read_register_count(struct applesmc_device *smc, +@@ -337,8 +381,8 @@ static int read_register_count(struct applesmc_device *smc, + __be32 be; + int ret; + +- ret = read_smc(smc, APPLESMC_READ_CMD, KEY_COUNT_KEY, (u8 *)&be, 4); +- if (ret) ++ ret = read_smc(smc, KEY_COUNT_KEY, (u8 *)&be, 4); ++ if (ret < 0) + return ret; + + *count = be32_to_cpu(be); +@@ -360,7 +404,7 @@ static int applesmc_read_entry(struct applesmc_device *smc, + if (entry->len != len) + return -EINVAL; + mutex_lock(&smc->reg.mutex); +- ret = read_smc(smc, APPLESMC_READ_CMD, entry->key, buf, len); ++ ret = read_smc(smc, entry->key, buf, len); + mutex_unlock(&smc->reg.mutex); + + return ret; +@@ -374,7 +418,7 @@ static int applesmc_write_entry(struct applesmc_device *smc, + if (entry->len != len) + return -EINVAL; + mutex_lock(&smc->reg.mutex); +- ret = write_smc(smc, APPLESMC_WRITE_CMD, entry->key, buf, len); ++ ret = write_smc(smc, entry->key, buf, len); + mutex_unlock(&smc->reg.mutex); + return ret; + } +@@ -383,8 +427,7 @@ static const struct applesmc_entry *applesmc_get_entry_by_index( + struct applesmc_device *smc, int index) + { + struct applesmc_entry *cache = &smc->reg.cache[index]; +- u8 key[4], info[6]; +- __be32 be; ++ char key[4]; + int ret = 0; + + if (cache->valid) +@@ -394,18 +437,14 @@ static const struct applesmc_entry *applesmc_get_entry_by_index( + + if (cache->valid) + goto out; +- be = cpu_to_be32(index); +- ret = read_smc(smc, APPLESMC_GET_KEY_BY_INDEX_CMD, (u8 *)&be, key, 4); ++ ret = get_smc_key_by_index(smc, index, key); + if (ret) + goto out; +- ret = read_smc(smc, APPLESMC_GET_KEY_TYPE_CMD, key, info, 6); ++ memcpy(cache->key, key, 4); ++ ++ ret = get_smc_key_info(smc, key, cache); + if (ret) + goto out; +- +- memcpy(cache->key, key, 4); +- cache->len = info[0]; +- memcpy(cache->type, &info[1], 4); +- cache->flags = info[5]; + cache->valid = true; + + out: +-- +Armbian + diff --git a/patch/kernel/archive/uefi-x86-6.19/3005-applesmc-basic-mmio-interface-implementation.patch b/patch/kernel/archive/uefi-x86-6.19/3005-applesmc-basic-mmio-interface-implementation.patch new file mode 100644 index 000000000000..230656989ab9 --- /dev/null +++ b/patch/kernel/archive/uefi-x86-6.19/3005-applesmc-basic-mmio-interface-implementation.patch @@ -0,0 +1,343 @@ +From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001 +From: Aun-Ali Zaidi +Date: Sun, 17 Nov 2019 23:12:16 +0100 +Subject: applesmc: basic mmio interface implementation + +This change introduces a basic MMIO-based +interface implementation required to communicate +with the SMC on T2 Macs. The MMIO interface is +enabled only when it's supported on the running +system. + +The MMIO interface replaces legacy port-based SMC +key reads, writes and metadata requests (getting +key by index and getting key info). + +(Based on patch by @mcmrarm) + +Signed-off-by: Aun-Ali Zaidi +--- + drivers/hwmon/applesmc.c | 237 +++++++++- + 1 file changed, 231 insertions(+), 6 deletions(-) + +diff --git a/drivers/hwmon/applesmc.c b/drivers/hwmon/applesmc.c +index 111111111111..222222222222 100644 +--- a/drivers/hwmon/applesmc.c ++++ b/drivers/hwmon/applesmc.c +@@ -42,6 +42,18 @@ + + #define APPLESMC_NR_PORTS 32 /* 0x300-0x31f */ + ++#define APPLESMC_IOMEM_KEY_DATA 0 ++#define APPLESMC_IOMEM_KEY_STATUS 0x4005 ++#define APPLESMC_IOMEM_KEY_NAME 0x78 ++#define APPLESMC_IOMEM_KEY_DATA_LEN 0x7D ++#define APPLESMC_IOMEM_KEY_SMC_ID 0x7E ++#define APPLESMC_IOMEM_KEY_CMD 0x7F ++#define APPLESMC_IOMEM_MIN_SIZE 0x4006 ++ ++#define APPLESMC_IOMEM_KEY_TYPE_CODE 0 ++#define APPLESMC_IOMEM_KEY_TYPE_DATA_LEN 5 ++#define APPLESMC_IOMEM_KEY_TYPE_FLAGS 6 ++ + #define APPLESMC_MAX_DATA_LENGTH 32 + + /* Apple SMC status bits */ +@@ -138,10 +150,13 @@ struct applesmc_registers { + + struct applesmc_device { + struct acpi_device *dev; ++ struct device *ldev; + struct applesmc_registers reg; + +- bool port_base_set; ++ bool port_base_set, iomem_base_set; + u16 port_base; ++ u8 *__iomem iomem_base; ++ u32 iomem_base_addr, iomem_base_size; + + s16 rest_x; + s16 rest_y; +@@ -347,16 +362,156 @@ static int port_get_smc_key_info(struct applesmc_device *smc, + return 0; + } + ++ ++/* ++ * MMIO based communication. ++ * TODO: Use updated mechanism for cmd timeout/retry ++ */ ++ ++static void iomem_clear_status(struct applesmc_device *smc) ++{ ++ if (ioread8(smc->iomem_base + APPLESMC_IOMEM_KEY_STATUS)) ++ iowrite8(0, smc->iomem_base + APPLESMC_IOMEM_KEY_STATUS); ++} ++ ++static int iomem_wait_read(struct applesmc_device *smc) ++{ ++ u8 status; ++ int us; ++ int i; ++ ++ us = APPLESMC_MIN_WAIT; ++ for (i = 0; i < 24 ; i++) { ++ status = ioread8(smc->iomem_base + APPLESMC_IOMEM_KEY_STATUS); ++ if (status & 0x20) ++ return 0; ++ usleep_range(us, us * 2); ++ if (i > 9) ++ us <<= 1; ++ } ++ ++ dev_warn(smc->ldev, "%s... timeout\n", __func__); ++ return -EIO; ++} ++ ++static int iomem_read_smc(struct applesmc_device *smc, u8 cmd, const char *key, ++ u8 *buffer, u8 len) ++{ ++ u8 err, remote_len; ++ u32 key_int = *((u32 *) key); ++ ++ iomem_clear_status(smc); ++ iowrite32(key_int, smc->iomem_base + APPLESMC_IOMEM_KEY_NAME); ++ iowrite32(0, smc->iomem_base + APPLESMC_IOMEM_KEY_SMC_ID); ++ iowrite32(cmd, smc->iomem_base + APPLESMC_IOMEM_KEY_CMD); ++ ++ if (iomem_wait_read(smc)) ++ return -EIO; ++ ++ err = ioread8(smc->iomem_base + APPLESMC_IOMEM_KEY_CMD); ++ if (err != 0) { ++ dev_warn(smc->ldev, "read_smc_mmio(%x %8x/%.4s) failed: %u\n", ++ cmd, key_int, key, err); ++ return -EIO; ++ } ++ ++ if (cmd == APPLESMC_READ_CMD) { ++ remote_len = ioread8(smc->iomem_base + APPLESMC_IOMEM_KEY_DATA_LEN); ++ if (remote_len != len) { ++ dev_warn(smc->ldev, ++ "read_smc_mmio(%x %8x/%.4s) failed: buffer length mismatch (remote = %u, requested = %u)\n", ++ cmd, key_int, key, remote_len, len); ++ return -EINVAL; ++ } ++ } else { ++ remote_len = len; ++ } ++ ++ memcpy_fromio(buffer, smc->iomem_base + APPLESMC_IOMEM_KEY_DATA, ++ remote_len); ++ ++ dev_dbg(smc->ldev, "read_smc_mmio(%x %8x/%.4s): buflen=%u reslen=%u\n", ++ cmd, key_int, key, len, remote_len); ++ print_hex_dump_bytes("read_smc_mmio(): ", DUMP_PREFIX_NONE, buffer, remote_len); ++ return 0; ++} ++ ++static int iomem_get_smc_key_type(struct applesmc_device *smc, const char *key, ++ struct applesmc_entry *e) ++{ ++ u8 err; ++ u8 cmd = APPLESMC_GET_KEY_TYPE_CMD; ++ u32 key_int = *((u32 *) key); ++ ++ iomem_clear_status(smc); ++ iowrite32(key_int, smc->iomem_base + APPLESMC_IOMEM_KEY_NAME); ++ iowrite32(0, smc->iomem_base + APPLESMC_IOMEM_KEY_SMC_ID); ++ iowrite32(cmd, smc->iomem_base + APPLESMC_IOMEM_KEY_CMD); ++ ++ if (iomem_wait_read(smc)) ++ return -EIO; ++ ++ err = ioread8(smc->iomem_base + APPLESMC_IOMEM_KEY_CMD); ++ if (err != 0) { ++ dev_warn(smc->ldev, "get_smc_key_type_mmio(%.4s) failed: %u\n", key, err); ++ return -EIO; ++ } ++ ++ e->len = ioread8(smc->iomem_base + APPLESMC_IOMEM_KEY_TYPE_DATA_LEN); ++ *((uint32_t *) e->type) = ioread32( ++ smc->iomem_base + APPLESMC_IOMEM_KEY_TYPE_CODE); ++ e->flags = ioread8(smc->iomem_base + APPLESMC_IOMEM_KEY_TYPE_FLAGS); ++ ++ dev_dbg(smc->ldev, "get_smc_key_type_mmio(%.4s): len=%u type=%.4s flags=%x\n", ++ key, e->len, e->type, e->flags); ++ return 0; ++} ++ ++static int iomem_write_smc(struct applesmc_device *smc, u8 cmd, const char *key, ++ const u8 *buffer, u8 len) ++{ ++ u8 err; ++ u32 key_int = *((u32 *) key); ++ ++ iomem_clear_status(smc); ++ iowrite32(key_int, smc->iomem_base + APPLESMC_IOMEM_KEY_NAME); ++ memcpy_toio(smc->iomem_base + APPLESMC_IOMEM_KEY_DATA, buffer, len); ++ iowrite32(len, smc->iomem_base + APPLESMC_IOMEM_KEY_DATA_LEN); ++ iowrite32(0, smc->iomem_base + APPLESMC_IOMEM_KEY_SMC_ID); ++ iowrite32(cmd, smc->iomem_base + APPLESMC_IOMEM_KEY_CMD); ++ ++ if (iomem_wait_read(smc)) ++ return -EIO; ++ ++ err = ioread8(smc->iomem_base + APPLESMC_IOMEM_KEY_CMD); ++ if (err != 0) { ++ dev_warn(smc->ldev, "write_smc_mmio(%x %.4s) failed: %u\n", cmd, key, err); ++ print_hex_dump_bytes("write_smc_mmio(): ", DUMP_PREFIX_NONE, buffer, len); ++ return -EIO; ++ } ++ ++ dev_dbg(smc->ldev, "write_smc_mmio(%x %.4s): buflen=%u\n", cmd, key, len); ++ print_hex_dump_bytes("write_smc_mmio(): ", DUMP_PREFIX_NONE, buffer, len); ++ return 0; ++} ++ ++ + static int read_smc(struct applesmc_device *smc, const char *key, + u8 *buffer, u8 len) + { +- return port_read_smc(smc, APPLESMC_READ_CMD, key, buffer, len); ++ if (smc->iomem_base_set) ++ return iomem_read_smc(smc, APPLESMC_READ_CMD, key, buffer, len); ++ else ++ return port_read_smc(smc, APPLESMC_READ_CMD, key, buffer, len); + } + + static int write_smc(struct applesmc_device *smc, const char *key, + const u8 *buffer, u8 len) + { +- return port_write_smc(smc, APPLESMC_WRITE_CMD, key, buffer, len); ++ if (smc->iomem_base_set) ++ return iomem_write_smc(smc, APPLESMC_WRITE_CMD, key, buffer, len); ++ else ++ return port_write_smc(smc, APPLESMC_WRITE_CMD, key, buffer, len); + } + + static int get_smc_key_by_index(struct applesmc_device *smc, +@@ -365,14 +520,21 @@ static int get_smc_key_by_index(struct applesmc_device *smc, + __be32 be; + + be = cpu_to_be32(index); +- return port_read_smc(smc, APPLESMC_GET_KEY_BY_INDEX_CMD, +- (const char *) &be, (u8 *) key, 4); ++ if (smc->iomem_base_set) ++ return iomem_read_smc(smc, APPLESMC_GET_KEY_BY_INDEX_CMD, ++ (const char *) &be, (u8 *) key, 4); ++ else ++ return port_read_smc(smc, APPLESMC_GET_KEY_BY_INDEX_CMD, ++ (const char *) &be, (u8 *) key, 4); + } + + static int get_smc_key_info(struct applesmc_device *smc, const char *key, + struct applesmc_entry *info) + { +- return port_get_smc_key_info(smc, key, info); ++ if (smc->iomem_base_set) ++ return iomem_get_smc_key_type(smc, key, info); ++ else ++ return port_get_smc_key_info(smc, key, info); + } + + static int read_register_count(struct applesmc_device *smc, +@@ -746,6 +908,7 @@ static int applesmc_add(struct acpi_device *dev) + if (!smc) + return -ENOMEM; + smc->dev = dev; ++ smc->ldev = &dev->dev; + mutex_init(&smc->reg.mutex); + + dev_set_drvdata(&dev->dev, smc); +@@ -807,6 +970,20 @@ static acpi_status applesmc_walk_resources(struct acpi_resource *res, + } + return AE_OK; + ++ case ACPI_RESOURCE_TYPE_FIXED_MEMORY32: ++ if (!smc->iomem_base_set) { ++ if (res->data.fixed_memory32.address_length < ++ APPLESMC_IOMEM_MIN_SIZE) { ++ dev_warn(smc->ldev, "found iomem but it's too small: %u\n", ++ res->data.fixed_memory32.address_length); ++ return AE_OK; ++ } ++ smc->iomem_base_addr = res->data.fixed_memory32.address; ++ smc->iomem_base_size = res->data.fixed_memory32.address_length; ++ smc->iomem_base_set = true; ++ } ++ return AE_OK; ++ + case ACPI_RESOURCE_TYPE_END_TAG: + if (smc->port_base_set) + return AE_OK; +@@ -818,6 +995,8 @@ static acpi_status applesmc_walk_resources(struct acpi_resource *res, + } + } + ++static int applesmc_try_enable_iomem(struct applesmc_device *smc); ++ + static int applesmc_init_resources(struct applesmc_device *smc) + { + int ret; +@@ -830,11 +1009,57 @@ static int applesmc_init_resources(struct applesmc_device *smc) + if (!request_region(smc->port_base, APPLESMC_NR_PORTS, "applesmc")) + return -ENXIO; + ++ if (smc->iomem_base_set) { ++ if (applesmc_try_enable_iomem(smc)) ++ smc->iomem_base_set = false; ++ } ++ + return 0; + } + ++static int applesmc_try_enable_iomem(struct applesmc_device *smc) ++{ ++ u8 test_val, ldkn_version; ++ ++ dev_dbg(smc->ldev, "Trying to enable iomem based communication\n"); ++ smc->iomem_base = ioremap(smc->iomem_base_addr, smc->iomem_base_size); ++ if (!smc->iomem_base) ++ goto out; ++ ++ /* Apple's driver does this check for some reason */ ++ test_val = ioread8(smc->iomem_base + APPLESMC_IOMEM_KEY_STATUS); ++ if (test_val == 0xff) { ++ dev_warn(smc->ldev, ++ "iomem enable failed: initial status is 0xff (is %x)\n", ++ test_val); ++ goto out_iomem; ++ } ++ ++ if (read_smc(smc, "LDKN", &ldkn_version, 1)) { ++ dev_warn(smc->ldev, "iomem enable failed: ldkn read failed\n"); ++ goto out_iomem; ++ } ++ ++ if (ldkn_version < 2) { ++ dev_warn(smc->ldev, ++ "iomem enable failed: ldkn version %u is less than minimum (2)\n", ++ ldkn_version); ++ goto out_iomem; ++ } ++ ++ return 0; ++ ++out_iomem: ++ iounmap(smc->iomem_base); ++ ++out: ++ return -ENXIO; ++} ++ + static void applesmc_free_resources(struct applesmc_device *smc) + { ++ if (smc->iomem_base_set) ++ iounmap(smc->iomem_base); + release_region(smc->port_base, APPLESMC_NR_PORTS); + } + +-- +Armbian + diff --git a/patch/kernel/archive/uefi-x86-6.19/3006-applesmc-fan-support-on-T2-Macs.patch b/patch/kernel/archive/uefi-x86-6.19/3006-applesmc-fan-support-on-T2-Macs.patch new file mode 100644 index 000000000000..4d2cbb318e86 --- /dev/null +++ b/patch/kernel/archive/uefi-x86-6.19/3006-applesmc-fan-support-on-T2-Macs.patch @@ -0,0 +1,227 @@ +From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001 +From: Paul Pawlowski +Date: Sun, 17 Nov 2019 23:12:18 +0100 +Subject: applesmc: fan support on T2 Macs + +T2 Macs changed the fan values from shorts to +floats, and changed the fan manual override +setting from a bitmask to a per-fan boolean +named F0Md (thanks to @kleuter for mentioning +it). + +A minimal soft-float implementation has been +written for convert floats to integers (and vice +versa). + +Signed-off-by: Aun-Ali Zaidi +--- + drivers/hwmon/applesmc.c | 119 ++++++++-- + 1 file changed, 102 insertions(+), 17 deletions(-) + +diff --git a/drivers/hwmon/applesmc.c b/drivers/hwmon/applesmc.c +index 111111111111..222222222222 100644 +--- a/drivers/hwmon/applesmc.c ++++ b/drivers/hwmon/applesmc.c +@@ -87,6 +87,7 @@ + #define FAN_ID_FMT "F%dID" /* r-o char[16] */ + + #define TEMP_SENSOR_TYPE "sp78" ++#define FLOAT_TYPE "flt " + + /* List of keys used to read/write fan speeds */ + static const char *const fan_speed_fmt[] = { +@@ -96,6 +97,7 @@ static const char *const fan_speed_fmt[] = { + "F%dSf", /* safe speed - not all models */ + "F%dTg", /* target speed (manual: rw) */ + }; ++#define FAN_MANUAL_FMT "F%dMd" + + #define INIT_TIMEOUT_MSECS 5000 /* wait up to 5s for device init ... */ + #define INIT_WAIT_MSECS 50 /* ... in 50ms increments */ +@@ -734,6 +736,42 @@ static int applesmc_read_s16(struct applesmc_device *smc, + return 0; + } + ++/** ++ * applesmc_float_to_u32 - Retrieve the integral part of a float. ++ * This is needed because Apple made fans use float values in the T2. ++ * The fractional point is not significantly useful though, and the integral ++ * part can be easily extracted. ++ */ ++static inline u32 applesmc_float_to_u32(u32 d) ++{ ++ u8 sign = (u8) ((d >> 31) & 1); ++ s32 exp = (s32) ((d >> 23) & 0xff) - 0x7f; ++ u32 fr = d & ((1u << 23) - 1); ++ ++ if (sign || exp < 0) ++ return 0; ++ ++ return (u32) ((1u << exp) + (fr >> (23 - exp))); ++} ++ ++/** ++ * applesmc_u32_to_float - Convert an u32 into a float. ++ * See applesmc_float_to_u32 for a rationale. ++ */ ++static inline u32 applesmc_u32_to_float(u32 d) ++{ ++ u32 dc = d, bc = 0, exp; ++ ++ if (!d) ++ return 0; ++ ++ while (dc >>= 1) ++ ++bc; ++ exp = 0x7f + bc; ++ ++ return (u32) ((exp << 23) | ++ ((d << (23 - (exp - 0x7f))) & ((1u << 23) - 1))); ++} + /* + * applesmc_device_init - initialize the accelerometer. Can sleep. + */ +@@ -1241,6 +1279,7 @@ static ssize_t applesmc_show_fan_speed(struct device *dev, + struct device_attribute *attr, char *sysfsbuf) + { + struct applesmc_device *smc = dev_get_drvdata(dev); ++ const struct applesmc_entry *entry; + int ret; + unsigned int speed = 0; + char newkey[5]; +@@ -1249,11 +1288,21 @@ static ssize_t applesmc_show_fan_speed(struct device *dev, + scnprintf(newkey, sizeof(newkey), fan_speed_fmt[to_option(attr)], + to_index(attr)); + +- ret = applesmc_read_key(smc, newkey, buffer, 2); ++ entry = applesmc_get_entry_by_key(smc, newkey); ++ if (IS_ERR(entry)) ++ return PTR_ERR(entry); ++ ++ if (!strcmp(entry->type, FLOAT_TYPE)) { ++ ret = applesmc_read_entry(smc, entry, (u8 *) &speed, 4); ++ speed = applesmc_float_to_u32(speed); ++ } else { ++ ret = applesmc_read_entry(smc, entry, buffer, 2); ++ speed = ((buffer[0] << 8 | buffer[1]) >> 2); ++ } ++ + if (ret) + return ret; + +- speed = ((buffer[0] << 8 | buffer[1]) >> 2); + return sysfs_emit(sysfsbuf, "%u\n", speed); + } + +@@ -1262,6 +1311,7 @@ static ssize_t applesmc_store_fan_speed(struct device *dev, + const char *sysfsbuf, size_t count) + { + struct applesmc_device *smc = dev_get_drvdata(dev); ++ const struct applesmc_entry *entry; + int ret; + unsigned long speed; + char newkey[5]; +@@ -1273,9 +1323,18 @@ static ssize_t applesmc_store_fan_speed(struct device *dev, + scnprintf(newkey, sizeof(newkey), fan_speed_fmt[to_option(attr)], + to_index(attr)); + +- buffer[0] = (speed >> 6) & 0xff; +- buffer[1] = (speed << 2) & 0xff; +- ret = applesmc_write_key(smc, newkey, buffer, 2); ++ entry = applesmc_get_entry_by_key(smc, newkey); ++ if (IS_ERR(entry)) ++ return PTR_ERR(entry); ++ ++ if (!strcmp(entry->type, FLOAT_TYPE)) { ++ speed = applesmc_u32_to_float(speed); ++ ret = applesmc_write_entry(smc, entry, (u8 *) &speed, 4); ++ } else { ++ buffer[0] = (speed >> 6) & 0xff; ++ buffer[1] = (speed << 2) & 0xff; ++ ret = applesmc_write_key(smc, newkey, buffer, 2); ++ } + + if (ret) + return ret; +@@ -1290,12 +1349,26 @@ static ssize_t applesmc_show_fan_manual(struct device *dev, + int ret; + u16 manual = 0; + u8 buffer[2]; ++ char newkey[5]; ++ bool has_newkey = false; ++ ++ scnprintf(newkey, sizeof(newkey), FAN_MANUAL_FMT, to_index(attr)); ++ ++ ret = applesmc_has_key(smc, newkey, &has_newkey); ++ if (ret) ++ return ret; ++ ++ if (has_newkey) { ++ ret = applesmc_read_key(smc, newkey, buffer, 1); ++ manual = buffer[0]; ++ } else { ++ ret = applesmc_read_key(smc, FANS_MANUAL, buffer, 2); ++ manual = ((buffer[0] << 8 | buffer[1]) >> to_index(attr)) & 0x01; ++ } + +- ret = applesmc_read_key(smc, FANS_MANUAL, buffer, 2); + if (ret) + return ret; + +- manual = ((buffer[0] << 8 | buffer[1]) >> to_index(attr)) & 0x01; + return sysfs_emit(sysfsbuf, "%d\n", manual); + } + +@@ -1306,27 +1379,39 @@ static ssize_t applesmc_store_fan_manual(struct device *dev, + struct applesmc_device *smc = dev_get_drvdata(dev); + int ret; + u8 buffer[2]; ++ char newkey[5]; ++ bool has_newkey = false; + unsigned long input; + u16 val; + + if (kstrtoul(sysfsbuf, 10, &input) < 0) + return -EINVAL; + +- ret = applesmc_read_key(smc, FANS_MANUAL, buffer, 2); ++ scnprintf(newkey, sizeof(newkey), FAN_MANUAL_FMT, to_index(attr)); ++ ++ ret = applesmc_has_key(smc, newkey, &has_newkey); + if (ret) +- goto out; ++ return ret; + +- val = (buffer[0] << 8 | buffer[1]); ++ if (has_newkey) { ++ buffer[0] = input & 1; ++ ret = applesmc_write_key(smc, newkey, buffer, 1); ++ } else { ++ ret = applesmc_read_key(smc, FANS_MANUAL, buffer, 2); ++ val = (buffer[0] << 8 | buffer[1]); ++ if (ret) ++ goto out; + +- if (input) +- val = val | (0x01 << to_index(attr)); +- else +- val = val & ~(0x01 << to_index(attr)); ++ if (input) ++ val = val | (0x01 << to_index(attr)); ++ else ++ val = val & ~(0x01 << to_index(attr)); + +- buffer[0] = (val >> 8) & 0xFF; +- buffer[1] = val & 0xFF; ++ buffer[0] = (val >> 8) & 0xFF; ++ buffer[1] = val & 0xFF; + +- ret = applesmc_write_key(smc, FANS_MANUAL, buffer, 2); ++ ret = applesmc_write_key(smc, FANS_MANUAL, buffer, 2); ++ } + + out: + if (ret) +-- +Armbian + diff --git a/patch/kernel/archive/uefi-x86-6.19/3007-applesmc-Add-iMacPro-to-applesmc_whitelist.patch b/patch/kernel/archive/uefi-x86-6.19/3007-applesmc-Add-iMacPro-to-applesmc_whitelist.patch new file mode 100644 index 000000000000..ea124754fc5a --- /dev/null +++ b/patch/kernel/archive/uefi-x86-6.19/3007-applesmc-Add-iMacPro-to-applesmc_whitelist.patch @@ -0,0 +1,31 @@ +From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001 +From: Orlando Chamberlain +Date: Sun, 9 Oct 2022 15:59:01 +0530 +Subject: applesmc: Add iMacPro to applesmc_whitelist + +The iMacPro1,1 is the only iMacPro released before the line was +discontinued. Add it to the applesmc_whitelist. + +Signed-off-by: Orlando Chamberlain +--- + drivers/hwmon/applesmc.c | 4 ++++ + 1 file changed, 4 insertions(+) + +diff --git a/drivers/hwmon/applesmc.c b/drivers/hwmon/applesmc.c +index 111111111111..222222222222 100644 +--- a/drivers/hwmon/applesmc.c ++++ b/drivers/hwmon/applesmc.c +@@ -1803,6 +1803,10 @@ static const struct dmi_system_id applesmc_whitelist[] __initconst = { + DMI_MATCH(DMI_BOARD_VENDOR, "Apple"), + DMI_MATCH(DMI_PRODUCT_NAME, "Macmini") }, + }, ++ { applesmc_dmi_match, "Apple iMacPro", { ++ DMI_MATCH(DMI_BOARD_VENDOR, "Apple"), ++ DMI_MATCH(DMI_PRODUCT_NAME, "iMacPro") }, ++ }, + { applesmc_dmi_match, "Apple MacPro", { + DMI_MATCH(DMI_BOARD_VENDOR, "Apple"), + DMI_MATCH(DMI_PRODUCT_NAME, "MacPro") }, +-- +Armbian + diff --git a/patch/kernel/archive/uefi-x86-6.19/3008-applesmc-make-applesmc_remove-void.patch b/patch/kernel/archive/uefi-x86-6.19/3008-applesmc-make-applesmc_remove-void.patch new file mode 100644 index 000000000000..6ad7f1422328 --- /dev/null +++ b/patch/kernel/archive/uefi-x86-6.19/3008-applesmc-make-applesmc_remove-void.patch @@ -0,0 +1,35 @@ +From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001 +From: Orlando Chamberlain +Date: Tue, 24 Jan 2023 15:46:48 +1100 +Subject: applesmc: make applesmc_remove void + +for linux6.2 compatibility +--- + drivers/hwmon/applesmc.c | 4 ++-- + 1 file changed, 2 insertions(+), 2 deletions(-) + +diff --git a/drivers/hwmon/applesmc.c b/drivers/hwmon/applesmc.c +index 111111111111..222222222222 100644 +--- a/drivers/hwmon/applesmc.c ++++ b/drivers/hwmon/applesmc.c +@@ -979,7 +979,7 @@ static int applesmc_add(struct acpi_device *dev) + return ret; + } + +-static int applesmc_remove(struct acpi_device *dev) ++static void applesmc_remove(struct acpi_device *dev) + { + struct applesmc_device *smc = dev_get_drvdata(&dev->dev); + +@@ -990,7 +990,7 @@ static int applesmc_remove(struct acpi_device *dev) + mutex_destroy(&smc->reg.mutex); + kfree(smc); + +- return 0; ++ return; + } + + static acpi_status applesmc_walk_resources(struct acpi_resource *res, +-- +Armbian + diff --git a/patch/kernel/archive/uefi-x86-6.19/3009-applesmc-battery-charge-limiter.patch b/patch/kernel/archive/uefi-x86-6.19/3009-applesmc-battery-charge-limiter.patch new file mode 100644 index 000000000000..d485b1ca2426 --- /dev/null +++ b/patch/kernel/archive/uefi-x86-6.19/3009-applesmc-battery-charge-limiter.patch @@ -0,0 +1,96 @@ +From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001 +From: Orlando Chamberlain +Date: Mon, 30 Jan 2023 18:42:21 +1100 +Subject: applesmc: battery charge limiter + +--- + drivers/hwmon/applesmc.c | 42 +++++++++- + 1 file changed, 41 insertions(+), 1 deletion(-) + +diff --git a/drivers/hwmon/applesmc.c b/drivers/hwmon/applesmc.c +index 111111111111..222222222222 100644 +--- a/drivers/hwmon/applesmc.c ++++ b/drivers/hwmon/applesmc.c +@@ -1477,6 +1477,35 @@ static void applesmc_brightness_set(struct led_classdev *led_cdev, + dev_dbg(led_cdev->dev, "work was already on the queue.\n"); + } + ++static ssize_t applesmc_BCLM_store(struct device *dev, ++ struct device_attribute *attr, char *sysfsbuf, size_t count) ++{ ++ struct applesmc_device *smc = dev_get_drvdata(dev); ++ u8 val; ++ ++ if (kstrtou8(sysfsbuf, 10, &val) < 0) ++ return -EINVAL; ++ ++ if (val < 0 || val > 100) ++ return -EINVAL; ++ ++ if (applesmc_write_key(smc, "BCLM", &val, 1)) ++ return -ENODEV; ++ return count; ++} ++ ++static ssize_t applesmc_BCLM_show(struct device *dev, ++ struct device_attribute *attr, char *sysfsbuf) ++{ ++ struct applesmc_device *smc = dev_get_drvdata(dev); ++ u8 val; ++ ++ if (applesmc_read_key(smc, "BCLM", &val, 1)) ++ return -ENODEV; ++ ++ return sysfs_emit(sysfsbuf, "%d\n", val); ++} ++ + static ssize_t applesmc_key_count_show(struct device *dev, + struct device_attribute *attr, char *sysfsbuf) + { +@@ -1611,6 +1640,11 @@ static struct applesmc_node_group temp_group[] = { + { } + }; + ++static struct applesmc_node_group BCLM_group[] = { ++ { "battery_charge_limit", applesmc_BCLM_show, applesmc_BCLM_store }, ++ { } ++}; ++ + /* Module stuff */ + + /* +@@ -1829,10 +1863,13 @@ static int applesmc_create_modules(struct applesmc_device *smc) + ret = applesmc_create_nodes(smc, info_group, 1); + if (ret) + goto out; ++ ret = applesmc_create_nodes(smc, BCLM_group, 1); ++ if (ret) ++ goto out_info; + + ret = applesmc_create_nodes(smc, fan_group, smc->reg.fan_count); + if (ret) +- goto out_info; ++ goto out_bclm; + + ret = applesmc_create_nodes(smc, temp_group, smc->reg.index_count); + if (ret) +@@ -1868,6 +1905,8 @@ static int applesmc_create_modules(struct applesmc_device *smc) + applesmc_destroy_nodes(smc, temp_group); + out_fans: + applesmc_destroy_nodes(smc, fan_group); ++out_bclm: ++ applesmc_destroy_nodes(smc, BCLM_group); + out_info: + applesmc_destroy_nodes(smc, info_group); + out: +@@ -1882,6 +1921,7 @@ static void applesmc_destroy_modules(struct applesmc_device *smc) + applesmc_release_accelerometer(smc); + applesmc_destroy_nodes(smc, temp_group); + applesmc_destroy_nodes(smc, fan_group); ++ applesmc_destroy_nodes(smc, BCLM_group); + applesmc_destroy_nodes(smc, info_group); + } + +-- +Armbian + diff --git a/patch/kernel/archive/uefi-x86-6.19/4001-asahi-trackpad.patch b/patch/kernel/archive/uefi-x86-6.19/4001-asahi-trackpad.patch new file mode 100644 index 000000000000..94745ae5e184 --- /dev/null +++ b/patch/kernel/archive/uefi-x86-6.19/4001-asahi-trackpad.patch @@ -0,0 +1,5052 @@ +From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001 +From: Janne Grunau +Date: Sun, 12 Dec 2021 20:40:04 +0100 +Subject: HID: add device IDs for Apple SPI HID devices + +Apple Silicon based laptop use SPI as transport for HID. Add support for +SPI-based HID devices and and Apple keyboard and trackpad devices. +Intel based laptops using the keyboard input driver applespi use the +same HID over SPI protocol and can be supported later. + +This requires SPI keyboard/mouse HID types since Apple's intenal +keyboards/trackpads use the same product id. + +Signed-off-by: Janne Grunau +--- + drivers/hid/hid-core.c | 3 +++ + drivers/hid/hid-ids.h | 5 +++++ + include/linux/hid.h | 6 +++++- + 3 files changed, 13 insertions(+), 1 deletion(-) + +diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c +index 111111111111..222222222222 100644 +--- a/drivers/hid/hid-core.c ++++ b/drivers/hid/hid-core.c +@@ -2316,6 +2316,9 @@ int hid_connect(struct hid_device *hdev, unsigned int connect_mask) + case BUS_I2C: + bus = "I2C"; + break; ++ case BUS_SPI: ++ bus = "SPI"; ++ break; + case BUS_SDW: + bus = "SOUNDWIRE"; + break; +diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h +index 111111111111..222222222222 100644 +--- a/drivers/hid/hid-ids.h ++++ b/drivers/hid/hid-ids.h +@@ -93,6 +93,7 @@ + + #define USB_VENDOR_ID_APPLE 0x05ac + #define BT_VENDOR_ID_APPLE 0x004c ++#define SPI_VENDOR_ID_APPLE 0x05ac + #define USB_DEVICE_ID_APPLE_MIGHTYMOUSE 0x0304 + #define USB_DEVICE_ID_APPLE_MAGICMOUSE 0x030d + #define USB_DEVICE_ID_APPLE_MAGICMOUSE2 0x0269 +@@ -197,6 +198,10 @@ + #define USB_DEVICE_ID_APPLE_IRCONTROL5 0x8243 + #define USB_DEVICE_ID_APPLE_TOUCHBAR_BACKLIGHT 0x8102 + #define USB_DEVICE_ID_APPLE_TOUCHBAR_DISPLAY 0x8302 ++#define SPI_DEVICE_ID_APPLE_MACBOOK_AIR_2020 0x0281 ++#define SPI_DEVICE_ID_APPLE_MACBOOK_PRO13_2020 0x0341 ++#define SPI_DEVICE_ID_APPLE_MACBOOK_PRO14_2021 0x0342 ++#define SPI_DEVICE_ID_APPLE_MACBOOK_PRO16_2021 0x0343 + + #define USB_VENDOR_ID_ASETEK 0x2433 + #define USB_DEVICE_ID_ASETEK_INVICTA 0xf300 +diff --git a/include/linux/hid.h b/include/linux/hid.h +index 111111111111..222222222222 100644 +--- a/include/linux/hid.h ++++ b/include/linux/hid.h +@@ -625,7 +625,9 @@ struct hid_input { + enum hid_type { + HID_TYPE_OTHER = 0, + HID_TYPE_USBMOUSE, +- HID_TYPE_USBNONE ++ HID_TYPE_USBNONE, ++ HID_TYPE_SPI_KEYBOARD, ++ HID_TYPE_SPI_MOUSE, + }; + + enum hid_battery_status { +@@ -786,6 +788,8 @@ struct hid_descriptor { + .bus = BUS_BLUETOOTH, .vendor = (ven), .product = (prod) + #define HID_I2C_DEVICE(ven, prod) \ + .bus = BUS_I2C, .vendor = (ven), .product = (prod) ++#define HID_SPI_DEVICE(ven, prod) \ ++ .bus = BUS_SPI, .vendor = (ven), .product = (prod) + + #define HID_REPORT_ID(rep) \ + .report_type = (rep) +-- +Armbian + +From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001 +From: Hector Martin +Date: Fri, 8 Jul 2022 00:29:43 +0900 +Subject: HID: add HOST vendor/device IDs for Apple MTP devices + +Apple M2* chips have an embedded MTP processor that handles all HID +functions, and does not go over a traditional bus like SPI. The devices +still have real IDs, so add them here. + +Signed-off-by: Hector Martin +--- + drivers/hid/hid-ids.h | 5 +++++ + 1 file changed, 5 insertions(+) + +diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h +index 111111111111..222222222222 100644 +--- a/drivers/hid/hid-ids.h ++++ b/drivers/hid/hid-ids.h +@@ -94,6 +94,7 @@ + #define USB_VENDOR_ID_APPLE 0x05ac + #define BT_VENDOR_ID_APPLE 0x004c + #define SPI_VENDOR_ID_APPLE 0x05ac ++#define HOST_VENDOR_ID_APPLE 0x05ac + #define USB_DEVICE_ID_APPLE_MIGHTYMOUSE 0x0304 + #define USB_DEVICE_ID_APPLE_MAGICMOUSE 0x030d + #define USB_DEVICE_ID_APPLE_MAGICMOUSE2 0x0269 +@@ -202,6 +203,10 @@ + #define SPI_DEVICE_ID_APPLE_MACBOOK_PRO13_2020 0x0341 + #define SPI_DEVICE_ID_APPLE_MACBOOK_PRO14_2021 0x0342 + #define SPI_DEVICE_ID_APPLE_MACBOOK_PRO16_2021 0x0343 ++#define HOST_DEVICE_ID_APPLE_MACBOOK_AIR13_2022 0x0351 ++#define HOST_DEVICE_ID_APPLE_MACBOOK_PRO14_2023 0x0352 ++#define HOST_DEVICE_ID_APPLE_MACBOOK_PRO16_2023 0x0353 ++#define HOST_DEVICE_ID_APPLE_MACBOOK_PRO13_2022 0x0354 + + #define USB_VENDOR_ID_ASETEK 0x2433 + #define USB_DEVICE_ID_ASETEK_INVICTA 0xf300 +-- +Armbian + +From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001 +From: Hector Martin +Date: Fri, 8 Jul 2022 02:06:15 +0900 +Subject: HID: core: Handle HOST bus type when announcing devices + +Signed-off-by: Hector Martin +--- + drivers/hid/hid-core.c | 3 +++ + 1 file changed, 3 insertions(+) + +diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c +index 111111111111..222222222222 100644 +--- a/drivers/hid/hid-core.c ++++ b/drivers/hid/hid-core.c +@@ -2319,6 +2319,9 @@ int hid_connect(struct hid_device *hdev, unsigned int connect_mask) + case BUS_SPI: + bus = "SPI"; + break; ++ case BUS_HOST: ++ bus = "HOST"; ++ break; + case BUS_SDW: + bus = "SOUNDWIRE"; + break; +-- +Armbian + +From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001 +From: Hector Martin +Date: Mon, 10 Apr 2023 22:44:44 +0900 +Subject: HID: Bump maximum report size to 16384 + +This maximum is arbitrary. Recent Apple devices have some vendor-defined +reports with 16384 here which fail to parse without this, so let's bump +it to that. + +This value is used as follows: + +report->size += parser->global.report_size * parser->global.report_count; + +[...] + +/* Total size check: Allow for possible report index byte */ +if (report->size > (max_buffer_size - 1) << 3) { + hid_err(parser->device, "report is too long\n"); + return -1; +} + +All of these fields are unsigned integers, and report_count is bounded +by HID_MAX_USAGES (12288). Therefore, as long as the respective maximums +do not overflow an unsigned integer (let's say a signed integer just in +case), we're safe. This holds for 16384. + +Signed-off-by: Hector Martin +--- + drivers/hid/hid-core.c | 5 ++++- + 1 file changed, 4 insertions(+), 1 deletion(-) + +diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c +index 111111111111..222222222222 100644 +--- a/drivers/hid/hid-core.c ++++ b/drivers/hid/hid-core.c +@@ -468,7 +468,10 @@ static int hid_parser_global(struct hid_parser *parser, struct hid_item *item) + + case HID_GLOBAL_ITEM_TAG_REPORT_SIZE: + parser->global.report_size = item_udata(item); +- if (parser->global.report_size > 256) { ++ /* Arbitrary maximum. Some Apple devices have 16384 here. ++ * This * HID_MAX_USAGES must fit in a signed integer. ++ */ ++ if (parser->global.report_size > 16384) { + hid_err(parser->device, "invalid report_size %d\n", + parser->global.report_size); + return -1; +-- +Armbian + +From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001 +From: Janne Grunau +Date: Thu, 16 Dec 2021 21:15:31 +0100 +Subject: HID: apple: Bind Apple silicon SPI devices + +Apple MacBook keyboards started using HID over SPI in 2015. With the +addition of the SPI HID transport they can be supported by this driver. +Support all product ids over with the Apple SPI vendor id for now. + +The Macbook Pro (M1, 13-inch, 2020) uses the same function key mapping +as other Macbook Pros with touchbar and dedicated ESC key. +Apple silicon Macbooks use the same function key mapping as the 2021 and +later Magic Keyboards. + +Signed-off-by: Janne Grunau +--- + drivers/hid/Kconfig | 2 +- + drivers/hid/hid-apple.c | 15 ++++++++++ + 2 files changed, 16 insertions(+), 1 deletion(-) + +diff --git a/drivers/hid/Kconfig b/drivers/hid/Kconfig +index 111111111111..222222222222 100644 +--- a/drivers/hid/Kconfig ++++ b/drivers/hid/Kconfig +@@ -140,7 +140,7 @@ config HID_APPLE + tristate "Apple {i,Power,Mac}Books" + depends on LEDS_CLASS + depends on NEW_LEDS +- default !EXPERT ++ default !EXPERT || SPI_HID_APPLE + help + Support for some Apple devices which less or more break + HID specification. +diff --git a/drivers/hid/hid-apple.c b/drivers/hid/hid-apple.c +index 111111111111..222222222222 100644 +--- a/drivers/hid/hid-apple.c ++++ b/drivers/hid/hid-apple.c +@@ -518,6 +518,15 @@ static int hidinput_apple_event(struct hid_device *hid, struct input_dev *input, + table = macbookair_fn_keys; + else if (hid->product < 0x21d || hid->product >= 0x300) + table = powerbook_fn_keys; ++ else if (hid->bus == BUS_SPI) ++ switch (hid->product) { ++ case SPI_DEVICE_ID_APPLE_MACBOOK_PRO13_2020: ++ table = macbookpro_dedicated_esc_fn_keys; ++ break; ++ default: ++ table = magic_keyboard_2021_and_2024_fn_keys; ++ break; ++ } + else + table = apple_fn_keys; + } +@@ -938,6 +947,10 @@ static int apple_probe(struct hid_device *hdev, + struct apple_sc *asc; + int ret; + ++ if (id->bus == BUS_SPI && id->vendor == SPI_VENDOR_ID_APPLE && ++ hdev->type != HID_TYPE_SPI_KEYBOARD) ++ return -ENODEV; ++ + asc = devm_kzalloc(&hdev->dev, sizeof(*asc), GFP_KERNEL); + if (asc == NULL) { + hid_err(hdev, "can't alloc apple descriptor\n"); +@@ -1216,6 +1229,8 @@ static const struct hid_device_id apple_devices[] = { + .driver_data = APPLE_HAS_FN | APPLE_ISO_TILDE_QUIRK | APPLE_RDESC_BATTERY }, + { HID_BLUETOOTH_DEVICE(BT_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_MAGIC_KEYBOARD_NUMPAD_2024), + .driver_data = APPLE_HAS_FN | APPLE_ISO_TILDE_QUIRK }, ++ { HID_SPI_DEVICE(SPI_VENDOR_ID_APPLE, HID_ANY_ID), ++ .driver_data = APPLE_HAS_FN | APPLE_ISO_TILDE_QUIRK }, + { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_TOUCHBAR_BACKLIGHT), + .driver_data = APPLE_MAGIC_BACKLIGHT }, + +-- +Armbian + +From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001 +From: Hector Martin +Date: Fri, 8 Jul 2022 02:12:24 +0900 +Subject: HID: apple: Bind to HOST devices for MTP + +We use BUS_HOST for MTP HID subdevices + +Signed-off-by: Hector Martin +--- + drivers/hid/hid-apple.c | 7 +++++-- + 1 file changed, 5 insertions(+), 2 deletions(-) + +diff --git a/drivers/hid/hid-apple.c b/drivers/hid/hid-apple.c +index 111111111111..222222222222 100644 +--- a/drivers/hid/hid-apple.c ++++ b/drivers/hid/hid-apple.c +@@ -518,9 +518,10 @@ static int hidinput_apple_event(struct hid_device *hid, struct input_dev *input, + table = macbookair_fn_keys; + else if (hid->product < 0x21d || hid->product >= 0x300) + table = powerbook_fn_keys; +- else if (hid->bus == BUS_SPI) ++ else if (hid->bus == BUS_HOST || hid->bus == BUS_SPI) + switch (hid->product) { + case SPI_DEVICE_ID_APPLE_MACBOOK_PRO13_2020: ++ case HOST_DEVICE_ID_APPLE_MACBOOK_PRO13_2022: + table = macbookpro_dedicated_esc_fn_keys; + break; + default: +@@ -947,7 +948,7 @@ static int apple_probe(struct hid_device *hdev, + struct apple_sc *asc; + int ret; + +- if (id->bus == BUS_SPI && id->vendor == SPI_VENDOR_ID_APPLE && ++ if ((id->bus == BUS_SPI || id->bus == BUS_HOST) && id->vendor == SPI_VENDOR_ID_APPLE && + hdev->type != HID_TYPE_SPI_KEYBOARD) + return -ENODEV; + +@@ -1231,6 +1232,8 @@ static const struct hid_device_id apple_devices[] = { + .driver_data = APPLE_HAS_FN | APPLE_ISO_TILDE_QUIRK }, + { HID_SPI_DEVICE(SPI_VENDOR_ID_APPLE, HID_ANY_ID), + .driver_data = APPLE_HAS_FN | APPLE_ISO_TILDE_QUIRK }, ++ { HID_DEVICE(BUS_HOST, HID_GROUP_ANY, HOST_VENDOR_ID_APPLE, HID_ANY_ID), ++ .driver_data = APPLE_HAS_FN | APPLE_ISO_TILDE_QUIRK }, + { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_TOUCHBAR_BACKLIGHT), + .driver_data = APPLE_MAGIC_BACKLIGHT }, + +-- +Armbian + +From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001 +From: Janne Grunau +Date: Thu, 16 Dec 2021 00:10:51 +0100 +Subject: HID: magicmouse: use a define of the max number of touch contacts + +Signed-off-by: Janne Grunau +--- + drivers/hid/hid-magicmouse.c | 8 +++++--- + 1 file changed, 5 insertions(+), 3 deletions(-) + +diff --git a/drivers/hid/hid-magicmouse.c b/drivers/hid/hid-magicmouse.c +index 111111111111..222222222222 100644 +--- a/drivers/hid/hid-magicmouse.c ++++ b/drivers/hid/hid-magicmouse.c +@@ -62,6 +62,8 @@ MODULE_PARM_DESC(report_undeciphered, "Report undeciphered multi-touch state fie + #define DOUBLE_REPORT_ID 0xf7 + #define USB_BATTERY_TIMEOUT_SEC 60 + ++#define MAX_CONTACTS 16 ++ + /* These definitions are not precise, but they're close enough. (Bits + * 0x03 seem to indicate the aspect ratio of the touch, bits 0x70 seem + * to be some kind of bit mask -- 0x20 may be a near-field reading, +@@ -143,8 +145,8 @@ struct magicmouse_sc { + u8 size; + bool scroll_x_active; + bool scroll_y_active; +- } touches[16]; +- int tracking_ids[16]; ++ } touches[MAX_CONTACTS]; ++ int tracking_ids[MAX_CONTACTS]; + + struct hid_device *hdev; + struct delayed_work work; +@@ -615,7 +617,7 @@ static int magicmouse_setup_input(struct input_dev *input, struct hid_device *hd + + __set_bit(EV_ABS, input->evbit); + +- error = input_mt_init_slots(input, 16, mt_flags); ++ error = input_mt_init_slots(input, MAX_CONTACTS, mt_flags); + if (error) + return error; + input_set_abs_params(input, ABS_MT_TOUCH_MAJOR, 0, 255 << 2, +-- +Armbian + +From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001 +From: Janne Grunau +Date: Thu, 16 Dec 2021 00:12:35 +0100 +Subject: HID: magicmouse: use struct input_mt_pos for X/Y + +Signed-off-by: Janne Grunau +--- + drivers/hid/hid-magicmouse.c | 10 +++++----- + 1 file changed, 5 insertions(+), 5 deletions(-) + +diff --git a/drivers/hid/hid-magicmouse.c b/drivers/hid/hid-magicmouse.c +index 111111111111..222222222222 100644 +--- a/drivers/hid/hid-magicmouse.c ++++ b/drivers/hid/hid-magicmouse.c +@@ -121,6 +121,7 @@ MODULE_PARM_DESC(report_undeciphered, "Report undeciphered multi-touch state fie + * @ntouches: Number of touches in most recent touch report. + * @scroll_accel: Number of consecutive scroll motions. + * @scroll_jiffies: Time of last scroll motion. ++ * @pos: multi touch position data of the last report. + * @touches: Most recent data for a touch, indexed by tracking ID. + * @tracking_ids: Mapping of current touch input data to @touches. + * @hdev: Pointer to the underlying HID device. +@@ -135,9 +136,8 @@ struct magicmouse_sc { + int scroll_accel; + unsigned long scroll_jiffies; + ++ struct input_mt_pos pos[MAX_CONTACTS]; + struct { +- short x; +- short y; + short scroll_x; + short scroll_y; + short scroll_x_hr; +@@ -194,7 +194,7 @@ static void magicmouse_emit_buttons(struct magicmouse_sc *msc, int state) + } else if (last_state != 0) { + state = last_state; + } else if ((id = magicmouse_firm_touch(msc)) >= 0) { +- int x = msc->touches[id].x; ++ int x = msc->pos[id].x; + if (x < middle_button_start) + state = 1; + else if (x > middle_button_stop) +@@ -258,8 +258,8 @@ static void magicmouse_emit_touch(struct magicmouse_sc *msc, int raw_id, u8 *tda + + /* Store tracking ID and other fields. */ + msc->tracking_ids[raw_id] = id; +- msc->touches[id].x = x; +- msc->touches[id].y = y; ++ msc->pos[id].x = x; ++ msc->pos[id].y = y; + msc->touches[id].size = size; + + /* If requested, emulate a scroll wheel by detecting small +-- +Armbian + +From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001 +From: Janne Grunau +Date: Thu, 16 Dec 2021 00:15:30 +0100 +Subject: HID: magicmouse: use ops function pointers for input functionality + +Will be used for supporting MacBook trackpads connected via SPI. + +Signed-off-by: Janne Grunau +--- + drivers/hid/hid-magicmouse.c | 32 +++++++++- + 1 file changed, 31 insertions(+), 1 deletion(-) + +diff --git a/drivers/hid/hid-magicmouse.c b/drivers/hid/hid-magicmouse.c +index 111111111111..222222222222 100644 +--- a/drivers/hid/hid-magicmouse.c ++++ b/drivers/hid/hid-magicmouse.c +@@ -114,6 +114,13 @@ MODULE_PARM_DESC(report_undeciphered, "Report undeciphered multi-touch state fie + #define TRACKPAD2_RES_Y \ + ((TRACKPAD2_MAX_Y - TRACKPAD2_MIN_Y) / (TRACKPAD2_DIMENSION_Y / 100)) + ++ ++struct magicmouse_input_ops { ++ int (*raw_event)(struct hid_device *hdev, ++ struct hid_report *report, u8 *data, int size); ++ int (*setup_input)(struct input_dev *input, struct hid_device *hdev); ++}; ++ + /** + * struct magicmouse_sc - Tracks Magic Mouse-specific data. + * @input: Input device through which we report events. +@@ -127,6 +134,7 @@ MODULE_PARM_DESC(report_undeciphered, "Report undeciphered multi-touch state fie + * @hdev: Pointer to the underlying HID device. + * @work: Workqueue to handle initialization retry for quirky devices. + * @battery_timer: Timer for obtaining battery level information. ++ * @input_ops: Input ops based on device type. + */ + struct magicmouse_sc { + struct input_dev *input; +@@ -151,6 +159,7 @@ struct magicmouse_sc { + struct hid_device *hdev; + struct delayed_work work; + struct timer_list battery_timer; ++ struct magicmouse_input_ops input_ops; + }; + + static int magicmouse_firm_touch(struct magicmouse_sc *msc) +@@ -389,6 +398,14 @@ static int magicmouse_raw_event(struct hid_device *hdev, + struct hid_report *report, u8 *data, int size) + { + struct magicmouse_sc *msc = hid_get_drvdata(hdev); ++ ++ return msc->input_ops.raw_event(hdev, report, data, size); ++} ++ ++static int magicmouse_raw_event_usb(struct hid_device *hdev, ++ struct hid_report *report, u8 *data, int size) ++{ ++ struct magicmouse_sc *msc = hid_get_drvdata(hdev); + struct input_dev *input = msc->input; + int x = 0, y = 0, ii, clicks = 0, npoints; + +@@ -538,7 +555,17 @@ static int magicmouse_event(struct hid_device *hdev, struct hid_field *field, + return 0; + } + +-static int magicmouse_setup_input(struct input_dev *input, struct hid_device *hdev) ++ ++static int magicmouse_setup_input(struct input_dev *input, ++ struct hid_device *hdev) ++{ ++ struct magicmouse_sc *msc = hid_get_drvdata(hdev); ++ ++ return msc->input_ops.setup_input(input, hdev); ++} ++ ++static int magicmouse_setup_input_usb(struct input_dev *input, ++ struct hid_device *hdev) + { + int error; + int mt_flags = 0; +@@ -860,6 +887,9 @@ static int magicmouse_probe(struct hid_device *hdev, + return -ENOMEM; + } + ++ msc->input_ops.raw_event = magicmouse_raw_event_usb; ++ msc->input_ops.setup_input = magicmouse_setup_input_usb; ++ + msc->scroll_accel = SCROLL_ACCEL_DEFAULT; + msc->hdev = hdev; + INIT_DEFERRABLE_WORK(&msc->work, magicmouse_enable_mt_work); +-- +Armbian + +From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001 +From: Janne Grunau +Date: Thu, 16 Dec 2021 01:17:48 +0100 +Subject: HID: magicmouse: add support for Macbook trackpads + +The trackpads in Macbooks beginning in 2015 are HID devices connected +over SPI. On Intel Macbooks they are currently supported by applespi.c. +This chang adds support for the trackpads on Apple Silicon Macbooks +starting in late 2020. They use a new HID over SPI transport driver. +The touch report format differs from USB/BT Magic Trackpads. It is the +same format as the type 4 format supported by bcm5974.c. + +Signed-off-by: Janne Grunau +--- + drivers/hid/Kconfig | 4 +- + drivers/hid/hid-magicmouse.c | 266 +++++++++- + 2 files changed, 266 insertions(+), 4 deletions(-) + +diff --git a/drivers/hid/Kconfig b/drivers/hid/Kconfig +index 111111111111..222222222222 100644 +--- a/drivers/hid/Kconfig ++++ b/drivers/hid/Kconfig +@@ -727,11 +727,13 @@ config LOGIWHEELS_FF + + config HID_MAGICMOUSE + tristate "Apple Magic Mouse/Trackpad multi-touch support" ++ default SPI_HID_APPLE + help + Support for the Apple Magic Mouse/Trackpad multi-touch. + + Say Y here if you want support for the multi-touch features of the +- Apple Wireless "Magic" Mouse and the Apple Wireless "Magic" Trackpad. ++ Apple Wireless "Magic" Mouse, the Apple Wireless "Magic" Trackpad and ++ force touch Trackpads in Macbooks starting from 2015. + + config HID_MALTRON + tristate "Maltron L90 keyboard" +diff --git a/drivers/hid/hid-magicmouse.c b/drivers/hid/hid-magicmouse.c +index 111111111111..222222222222 100644 +--- a/drivers/hid/hid-magicmouse.c ++++ b/drivers/hid/hid-magicmouse.c +@@ -60,6 +60,7 @@ MODULE_PARM_DESC(report_undeciphered, "Report undeciphered multi-touch state fie + #define MOUSE_REPORT_ID 0x29 + #define MOUSE2_REPORT_ID 0x12 + #define DOUBLE_REPORT_ID 0xf7 ++#define SPI_REPORT_ID 0x02 + #define USB_BATTERY_TIMEOUT_SEC 60 + + #define MAX_CONTACTS 16 +@@ -114,6 +115,18 @@ MODULE_PARM_DESC(report_undeciphered, "Report undeciphered multi-touch state fie + #define TRACKPAD2_RES_Y \ + ((TRACKPAD2_MAX_Y - TRACKPAD2_MIN_Y) / (TRACKPAD2_DIMENSION_Y / 100)) + ++#define J314_TP_DIMENSION_X (float)13000 ++#define J314_TP_MIN_X -5900 ++#define J314_TP_MAX_X 6500 ++#define J314_TP_RES_X \ ++ ((J314_TP_MAX_X - J314_TP_MIN_X) / (J314_TP_DIMENSION_X / 100)) ++#define J314_TP_DIMENSION_Y (float)8100 ++#define J314_TP_MIN_Y -200 ++#define J314_TP_MAX_Y 7400 ++#define J314_TP_RES_Y \ ++ ((J314_TP_MAX_Y - J314_TP_MIN_Y) / (J314_TP_DIMENSION_Y / 100)) ++ ++#define J314_TP_MAX_FINGER_ORIENTATION 16384 + + struct magicmouse_input_ops { + int (*raw_event)(struct hid_device *hdev, +@@ -537,6 +550,154 @@ static int magicmouse_raw_event_usb(struct hid_device *hdev, + return 1; + } + ++/** ++ * struct tp_finger - single trackpad finger structure, le16-aligned ++ * ++ * @unknown1: unknown ++ * @unknown2: unknown ++ * @abs_x: absolute x coordinate ++ * @abs_y: absolute y coordinate ++ * @rel_x: relative x coordinate ++ * @rel_y: relative y coordinate ++ * @tool_major: tool area, major axis ++ * @tool_minor: tool area, minor axis ++ * @orientation: 16384 when point, else 15 bit angle ++ * @touch_major: touch area, major axis ++ * @touch_minor: touch area, minor axis ++ * @unused: zeros ++ * @pressure: pressure on forcetouch touchpad ++ * @multi: one finger: varies, more fingers: constant ++ */ ++struct tp_finger { ++ __le16 unknown1; ++ __le16 unknown2; ++ __le16 abs_x; ++ __le16 abs_y; ++ __le16 rel_x; ++ __le16 rel_y; ++ __le16 tool_major; ++ __le16 tool_minor; ++ __le16 orientation; ++ __le16 touch_major; ++ __le16 touch_minor; ++ __le16 unused[2]; ++ __le16 pressure; ++ __le16 multi; ++} __attribute__((packed, aligned(2))); ++ ++/** ++ * struct trackpad report ++ * ++ * @report_id: reportid ++ * @buttons: HID Usage Buttons 3 1-bit reports ++ * @num_fingers: the number of fingers being reported in @fingers ++ * @clicked: same as @buttons ++ */ ++struct tp_header { ++ // HID mouse report ++ u8 report_id; ++ u8 buttons; ++ u8 rel_x; ++ u8 rel_y; ++ u8 padding[4]; ++ // HID vendor part, up to 1751 bytes ++ u8 unknown[22]; ++ u8 num_fingers; ++ u8 clicked; ++ u8 unknown3[14]; ++}; ++ ++static inline int le16_to_int(__le16 x) ++{ ++ return (signed short)le16_to_cpu(x); ++} ++ ++static void report_finger_data(struct input_dev *input, int slot, ++ const struct input_mt_pos *pos, ++ const struct tp_finger *f) ++{ ++ input_mt_slot(input, slot); ++ input_mt_report_slot_state(input, MT_TOOL_FINGER, true); ++ ++ input_report_abs(input, ABS_MT_TOUCH_MAJOR, ++ le16_to_int(f->touch_major) << 1); ++ input_report_abs(input, ABS_MT_TOUCH_MINOR, ++ le16_to_int(f->touch_minor) << 1); ++ input_report_abs(input, ABS_MT_WIDTH_MAJOR, ++ le16_to_int(f->tool_major) << 1); ++ input_report_abs(input, ABS_MT_WIDTH_MINOR, ++ le16_to_int(f->tool_minor) << 1); ++ input_report_abs(input, ABS_MT_ORIENTATION, ++ J314_TP_MAX_FINGER_ORIENTATION - le16_to_int(f->orientation)); ++ input_report_abs(input, ABS_MT_PRESSURE, le16_to_int(f->pressure)); ++ input_report_abs(input, ABS_MT_POSITION_X, pos->x); ++ input_report_abs(input, ABS_MT_POSITION_Y, pos->y); ++} ++ ++static int magicmouse_raw_event_spi(struct hid_device *hdev, ++ struct hid_report *report, u8 *data, int size) ++{ ++ struct magicmouse_sc *msc = hid_get_drvdata(hdev); ++ struct input_dev *input = msc->input; ++ struct tp_header *tp_hdr; ++ struct tp_finger *f; ++ int i, n; ++ u32 npoints; ++ const size_t hdr_sz = sizeof(struct tp_header); ++ const size_t touch_sz = sizeof(struct tp_finger); ++ u8 map_contacs[MAX_CONTACTS]; ++ ++ // hid_warn(hdev, "%s\n", __func__); ++ // print_hex_dump_debug("appleft ev: ", DUMP_PREFIX_OFFSET, 16, 1, data, ++ // size, false); ++ ++ if (data[0] != SPI_REPORT_ID) ++ return 0; ++ ++ /* Expect 46 bytes of prefix, and N * 30 bytes of touch data. */ ++ if (size < hdr_sz || ((size - hdr_sz) % touch_sz) != 0) ++ return 0; ++ ++ tp_hdr = (struct tp_header *)data; ++ ++ npoints = (size - hdr_sz) / touch_sz; ++ if (npoints < tp_hdr->num_fingers || npoints > MAX_CONTACTS) { ++ hid_warn(hdev, ++ "unexpected number of touches (%u) for " ++ "report\n", ++ npoints); ++ return 0; ++ } ++ ++ n = 0; ++ for (i = 0; i < tp_hdr->num_fingers; i++) { ++ f = (struct tp_finger *)(data + hdr_sz + i * touch_sz); ++ if (le16_to_int(f->touch_major) == 0) ++ continue; ++ ++ hid_dbg(hdev, "ev x:%04x y:%04x\n", le16_to_int(f->abs_x), ++ le16_to_int(f->abs_y)); ++ msc->pos[n].x = le16_to_int(f->abs_x); ++ msc->pos[n].y = -le16_to_int(f->abs_y); ++ map_contacs[n] = i; ++ n++; ++ } ++ ++ input_mt_assign_slots(input, msc->tracking_ids, msc->pos, n, 0); ++ ++ for (i = 0; i < n; i++) { ++ int idx = map_contacs[i]; ++ f = (struct tp_finger *)(data + hdr_sz + idx * touch_sz); ++ report_finger_data(input, msc->tracking_ids[i], &msc->pos[i], f); ++ } ++ ++ input_mt_sync_frame(input); ++ input_report_key(input, BTN_MOUSE, data[1] & 1); ++ ++ input_sync(input); ++ return 1; ++} ++ + static int magicmouse_event(struct hid_device *hdev, struct hid_field *field, + struct hid_usage *usage, __s32 value) + { +@@ -727,6 +888,79 @@ static int magicmouse_setup_input_usb(struct input_dev *input, + return 0; + } + ++static int magicmouse_setup_input_spi(struct input_dev *input, ++ struct hid_device *hdev) ++{ ++ int error; ++ int mt_flags = 0; ++ ++ __set_bit(INPUT_PROP_BUTTONPAD, input->propbit); ++ __clear_bit(BTN_0, input->keybit); ++ __clear_bit(BTN_RIGHT, input->keybit); ++ __clear_bit(BTN_MIDDLE, input->keybit); ++ __clear_bit(EV_REL, input->evbit); ++ __clear_bit(REL_X, input->relbit); ++ __clear_bit(REL_Y, input->relbit); ++ ++ mt_flags = INPUT_MT_POINTER | INPUT_MT_DROP_UNUSED | INPUT_MT_TRACK; ++ ++ /* finger touch area */ ++ input_set_abs_params(input, ABS_MT_TOUCH_MAJOR, 0, 5000, 0, 0); ++ input_set_abs_params(input, ABS_MT_TOUCH_MINOR, 0, 5000, 0, 0); ++ ++ /* finger approach area */ ++ input_set_abs_params(input, ABS_MT_WIDTH_MAJOR, 0, 5000, 0, 0); ++ input_set_abs_params(input, ABS_MT_WIDTH_MINOR, 0, 5000, 0, 0); ++ ++ /* Note: Touch Y position from the device is inverted relative ++ * to how pointer motion is reported (and relative to how USB ++ * HID recommends the coordinates work). This driver keeps ++ * the origin at the same position, and just uses the additive ++ * inverse of the reported Y. ++ */ ++ ++ input_set_abs_params(input, ABS_MT_PRESSURE, 0, 6000, 0, 0); ++ ++ /* ++ * This makes libinput recognize this as a PressurePad and ++ * stop trying to use pressure for touch size. Pressure unit ++ * seems to be ~grams on these touchpads. ++ */ ++ input_abs_set_res(input, ABS_MT_PRESSURE, 1); ++ ++ /* finger orientation */ ++ input_set_abs_params(input, ABS_MT_ORIENTATION, -J314_TP_MAX_FINGER_ORIENTATION, ++ J314_TP_MAX_FINGER_ORIENTATION, 0, 0); ++ ++ /* finger position */ ++ input_set_abs_params(input, ABS_MT_POSITION_X, J314_TP_MIN_X, J314_TP_MAX_X, ++ 0, 0); ++ /* Y axis is inverted */ ++ input_set_abs_params(input, ABS_MT_POSITION_Y, -J314_TP_MAX_Y, -J314_TP_MIN_Y, ++ 0, 0); ++ ++ /* X/Y resolution */ ++ input_abs_set_res(input, ABS_MT_POSITION_X, J314_TP_RES_X); ++ input_abs_set_res(input, ABS_MT_POSITION_Y, J314_TP_RES_Y); ++ ++ input_set_events_per_packet(input, 60); ++ ++ /* touchpad button */ ++ input_set_capability(input, EV_KEY, BTN_MOUSE); ++ ++ /* ++ * hid-input may mark device as using autorepeat, but the trackpad does ++ * not actually want it. ++ */ ++ __clear_bit(EV_REP, input->evbit); ++ ++ error = input_mt_init_slots(input, MAX_CONTACTS, mt_flags); ++ if (error) ++ return error; ++ ++ return 0; ++} ++ + static int magicmouse_input_mapping(struct hid_device *hdev, + struct hid_input *hi, struct hid_field *field, + struct hid_usage *usage, unsigned long **bit, int *max) +@@ -777,6 +1011,10 @@ static int magicmouse_enable_multitouch(struct hid_device *hdev) + int feature_size; + + switch (hdev->product) { ++ case SPI_DEVICE_ID_APPLE_MACBOOK_AIR_2020: ++ case SPI_DEVICE_ID_APPLE_MACBOOK_PRO13_2020: ++ case SPI_DEVICE_ID_APPLE_MACBOOK_PRO14_2021: ++ case SPI_DEVICE_ID_APPLE_MACBOOK_PRO16_2021: + case USB_DEVICE_ID_APPLE_MAGICTRACKPAD2: + case USB_DEVICE_ID_APPLE_MAGICTRACKPAD2_USBC: + switch (hdev->vendor) { +@@ -784,7 +1022,7 @@ static int magicmouse_enable_multitouch(struct hid_device *hdev) + feature_size = sizeof(feature_mt_trackpad2_bt); + feature = feature_mt_trackpad2_bt; + break; +- default: /* USB_VENDOR_ID_APPLE */ ++ default: /* USB_VENDOR_ID_APPLE || SPI_VENDOR_ID_APPLE */ + feature_size = sizeof(feature_mt_trackpad2_usb); + feature = feature_mt_trackpad2_usb; + } +@@ -881,14 +1119,25 @@ static int magicmouse_probe(struct hid_device *hdev, + struct hid_report *report; + int ret; + ++ if (id->bus == BUS_SPI && id->vendor == SPI_VENDOR_ID_APPLE && ++ hdev->type != HID_TYPE_SPI_MOUSE) ++ return -ENODEV; ++ + msc = devm_kzalloc(&hdev->dev, sizeof(*msc), GFP_KERNEL); + if (msc == NULL) { + hid_err(hdev, "can't alloc magicmouse descriptor\n"); + return -ENOMEM; + } + +- msc->input_ops.raw_event = magicmouse_raw_event_usb; +- msc->input_ops.setup_input = magicmouse_setup_input_usb; ++ // internal trackpad use a data format use input ops to avoid ++ // conflicts with the report ID. ++ if (id->vendor == SPI_VENDOR_ID_APPLE) { ++ msc->input_ops.raw_event = magicmouse_raw_event_spi; ++ msc->input_ops.setup_input = magicmouse_setup_input_spi; ++ } else { ++ msc->input_ops.raw_event = magicmouse_raw_event_usb; ++ msc->input_ops.setup_input = magicmouse_setup_input_usb; ++ } + + msc->scroll_accel = SCROLL_ACCEL_DEFAULT; + msc->hdev = hdev; +@@ -948,6 +1197,15 @@ static int magicmouse_probe(struct hid_device *hdev, + TRACKPAD2_USB_REPORT_ID, 0); + } + break; ++ case HID_ANY_ID: ++ switch (id->bus) { ++ case BUS_SPI: ++ report = hid_register_report(hdev, HID_INPUT_REPORT, SPI_REPORT_ID, 0); ++ break; ++ default: ++ break; ++ } ++ break; + default: /* USB_DEVICE_ID_APPLE_MAGICTRACKPAD */ + report = hid_register_report(hdev, HID_INPUT_REPORT, + TRACKPAD_REPORT_ID, 0); +@@ -1055,6 +1313,8 @@ static const struct hid_device_id magic_mice[] = { + USB_DEVICE_ID_APPLE_MAGICTRACKPAD2_USBC), .driver_data = 0 }, + { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, + USB_DEVICE_ID_APPLE_MAGICTRACKPAD2_USBC), .driver_data = 0 }, ++ { HID_SPI_DEVICE(SPI_VENDOR_ID_APPLE, HID_ANY_ID), ++ .driver_data = 0 }, + { } + }; + MODULE_DEVICE_TABLE(hid, magic_mice); +-- +Armbian + +From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001 +From: Hector Martin +Date: Fri, 8 Jul 2022 02:12:57 +0900 +Subject: HID: magicmouse: Add MTP multi-touch device support + +Apple M2 devices expose the multi-touch device over the HID over +DockChannel transport, which we represent as the HOST bus type. The +report format is the same, except the legacy mouse header is gone and +there is no enable request needed. + +Signed-off-by: Hector Martin +--- + drivers/hid/hid-magicmouse.c | 63 +++++++--- + 1 file changed, 47 insertions(+), 16 deletions(-) + +diff --git a/drivers/hid/hid-magicmouse.c b/drivers/hid/hid-magicmouse.c +index 111111111111..222222222222 100644 +--- a/drivers/hid/hid-magicmouse.c ++++ b/drivers/hid/hid-magicmouse.c +@@ -61,6 +61,7 @@ MODULE_PARM_DESC(report_undeciphered, "Report undeciphered multi-touch state fie + #define MOUSE2_REPORT_ID 0x12 + #define DOUBLE_REPORT_ID 0xf7 + #define SPI_REPORT_ID 0x02 ++#define MTP_REPORT_ID 0x75 + #define USB_BATTERY_TIMEOUT_SEC 60 + + #define MAX_CONTACTS 16 +@@ -586,25 +587,32 @@ struct tp_finger { + } __attribute__((packed, aligned(2))); + + /** +- * struct trackpad report ++ * vendor trackpad report + * +- * @report_id: reportid +- * @buttons: HID Usage Buttons 3 1-bit reports + * @num_fingers: the number of fingers being reported in @fingers +- * @clicked: same as @buttons ++ * @buttons: same as HID buttons + */ + struct tp_header { ++ // HID vendor part, up to 1751 bytes ++ u8 unknown[22]; ++ u8 num_fingers; ++ u8 buttons; ++ u8 unknown3[14]; ++}; ++ ++/** ++ * standard HID mouse report ++ * ++ * @report_id: reportid ++ * @buttons: HID Usage Buttons 3 1-bit reports ++ */ ++struct tp_mouse_report { + // HID mouse report + u8 report_id; + u8 buttons; + u8 rel_x; + u8 rel_y; + u8 padding[4]; +- // HID vendor part, up to 1751 bytes +- u8 unknown[22]; +- u8 num_fingers; +- u8 clicked; +- u8 unknown3[14]; + }; + + static inline int le16_to_int(__le16 x) +@@ -634,7 +642,7 @@ static void report_finger_data(struct input_dev *input, int slot, + input_report_abs(input, ABS_MT_POSITION_Y, pos->y); + } + +-static int magicmouse_raw_event_spi(struct hid_device *hdev, ++static int magicmouse_raw_event_mtp(struct hid_device *hdev, + struct hid_report *report, u8 *data, int size) + { + struct magicmouse_sc *msc = hid_get_drvdata(hdev); +@@ -651,9 +659,6 @@ static int magicmouse_raw_event_spi(struct hid_device *hdev, + // print_hex_dump_debug("appleft ev: ", DUMP_PREFIX_OFFSET, 16, 1, data, + // size, false); + +- if (data[0] != SPI_REPORT_ID) +- return 0; +- + /* Expect 46 bytes of prefix, and N * 30 bytes of touch data. */ + if (size < hdr_sz || ((size - hdr_sz) % touch_sz) != 0) + return 0; +@@ -692,12 +697,26 @@ static int magicmouse_raw_event_spi(struct hid_device *hdev, + } + + input_mt_sync_frame(input); +- input_report_key(input, BTN_MOUSE, data[1] & 1); ++ input_report_key(input, BTN_MOUSE, tp_hdr->buttons & 1); + + input_sync(input); + return 1; + } + ++static int magicmouse_raw_event_spi(struct hid_device *hdev, ++ struct hid_report *report, u8 *data, int size) ++{ ++ const size_t hdr_sz = sizeof(struct tp_mouse_report); ++ ++ if (size < hdr_sz) ++ return 0; ++ ++ if (data[0] != SPI_REPORT_ID) ++ return 0; ++ ++ return magicmouse_raw_event_mtp(hdev, report, data + hdr_sz, size - hdr_sz); ++} ++ + static int magicmouse_event(struct hid_device *hdev, struct hid_field *field, + struct hid_usage *usage, __s32 value) + { +@@ -1119,7 +1138,7 @@ static int magicmouse_probe(struct hid_device *hdev, + struct hid_report *report; + int ret; + +- if (id->bus == BUS_SPI && id->vendor == SPI_VENDOR_ID_APPLE && ++ if ((id->bus == BUS_SPI || id->bus == BUS_HOST) && id->vendor == SPI_VENDOR_ID_APPLE && + hdev->type != HID_TYPE_SPI_MOUSE) + return -ENODEV; + +@@ -1131,7 +1150,10 @@ static int magicmouse_probe(struct hid_device *hdev, + + // internal trackpad use a data format use input ops to avoid + // conflicts with the report ID. +- if (id->vendor == SPI_VENDOR_ID_APPLE) { ++ if (id->bus == BUS_HOST) { ++ msc->input_ops.raw_event = magicmouse_raw_event_mtp; ++ msc->input_ops.setup_input = magicmouse_setup_input_spi; ++ } else if (id->bus == BUS_SPI) { + msc->input_ops.raw_event = magicmouse_raw_event_spi; + msc->input_ops.setup_input = magicmouse_setup_input_spi; + } else { +@@ -1199,6 +1221,9 @@ static int magicmouse_probe(struct hid_device *hdev, + break; + case HID_ANY_ID: + switch (id->bus) { ++ case BUS_HOST: ++ report = hid_register_report(hdev, HID_INPUT_REPORT, MTP_REPORT_ID, 0); ++ break; + case BUS_SPI: + report = hid_register_report(hdev, HID_INPUT_REPORT, SPI_REPORT_ID, 0); + break; +@@ -1220,6 +1245,10 @@ static int magicmouse_probe(struct hid_device *hdev, + } + report->size = 6; + ++ /* MTP devices do not need the MT enable, this is handled by the MTP driver */ ++ if (id->bus == BUS_HOST) ++ return 0; ++ + /* + * Some devices repond with 'invalid report id' when feature + * report switching it into multitouch mode is sent to it. +@@ -1315,6 +1344,8 @@ static const struct hid_device_id magic_mice[] = { + USB_DEVICE_ID_APPLE_MAGICTRACKPAD2_USBC), .driver_data = 0 }, + { HID_SPI_DEVICE(SPI_VENDOR_ID_APPLE, HID_ANY_ID), + .driver_data = 0 }, ++ { HID_DEVICE(BUS_HOST, HID_GROUP_ANY, HOST_VENDOR_ID_APPLE, ++ HID_ANY_ID), .driver_data = 0 }, + { } + }; + MODULE_DEVICE_TABLE(hid, magic_mice); +-- +Armbian + +From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001 +From: Janne Grunau +Date: Sun, 11 Dec 2022 22:56:16 +0100 +Subject: HID: magicmouse: Add .reset_resume for SPI trackpads + +The trackpad has to request multi touch reports during resume. + +Signed-off-by: Janne Grunau +--- + drivers/hid/hid-magicmouse.c | 14 ++++++++++ + 1 file changed, 14 insertions(+) + +diff --git a/drivers/hid/hid-magicmouse.c b/drivers/hid/hid-magicmouse.c +index 111111111111..222222222222 100644 +--- a/drivers/hid/hid-magicmouse.c ++++ b/drivers/hid/hid-magicmouse.c +@@ -1350,6 +1350,16 @@ static const struct hid_device_id magic_mice[] = { + }; + MODULE_DEVICE_TABLE(hid, magic_mice); + ++#ifdef CONFIG_PM ++static int magicmouse_reset_resume(struct hid_device *hdev) ++{ ++ if (hdev->bus == BUS_SPI) ++ return magicmouse_enable_multitouch(hdev); ++ ++ return 0; ++} ++#endif ++ + static struct hid_driver magicmouse_driver = { + .name = "magicmouse", + .id_table = magic_mice, +@@ -1360,6 +1370,10 @@ static struct hid_driver magicmouse_driver = { + .event = magicmouse_event, + .input_mapping = magicmouse_input_mapping, + .input_configured = magicmouse_input_configured, ++#ifdef CONFIG_PM ++ .reset_resume = magicmouse_reset_resume, ++#endif ++ + }; + module_hid_driver(magicmouse_driver); + +-- +Armbian + +From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001 +From: Hector Martin +Date: Sun, 30 Apr 2023 23:48:45 +0900 +Subject: HID: magicmouse: Handle touch controller resets on SPI devices + +On at least some SPI devices (e.g. recent Apple Silicon machines), the +Broadcom touch controller is prone to crashing. When this happens, the +STM eventually notices and resets it. It then notifies the driver via +HID report 0x60, and the driver needs to re-enable MT mode to make +things work again. + +This poses an additional issue: the hidinput core will close the +low-level transport while the device is closed, which can cause us to +miss a reset notification. To fix this, override the input open/close +callbacks and send the MT enable every time the HID device is opened, +instead of only once on probe. This should increase general robustness, +even if the reset mechanism doesn't work for some reason, so it's worth +doing it for USB devices too. MTP devices are exempt since they do not +require the MT enable at all. + +Signed-off-by: Hector Martin +--- + drivers/hid/hid-magicmouse.c | 108 ++++++++-- + 1 file changed, 87 insertions(+), 21 deletions(-) + +diff --git a/drivers/hid/hid-magicmouse.c b/drivers/hid/hid-magicmouse.c +index 111111111111..222222222222 100644 +--- a/drivers/hid/hid-magicmouse.c ++++ b/drivers/hid/hid-magicmouse.c +@@ -61,6 +61,7 @@ MODULE_PARM_DESC(report_undeciphered, "Report undeciphered multi-touch state fie + #define MOUSE2_REPORT_ID 0x12 + #define DOUBLE_REPORT_ID 0xf7 + #define SPI_REPORT_ID 0x02 ++#define SPI_RESET_REPORT_ID 0x60 + #define MTP_REPORT_ID 0x75 + #define USB_BATTERY_TIMEOUT_SEC 60 + +@@ -176,6 +177,50 @@ struct magicmouse_sc { + struct magicmouse_input_ops input_ops; + }; + ++static int magicmouse_enable_multitouch(struct hid_device *hdev); ++ ++static int magicmouse_open(struct input_dev *dev) ++{ ++ struct hid_device *hdev = input_get_drvdata(dev); ++ struct magicmouse_sc *msc = hid_get_drvdata(hdev); ++ int ret; ++ ++ ret = hid_hw_open(hdev); ++ if (ret) ++ return ret; ++ ++ /* ++ * Some devices repond with 'invalid report id' when feature ++ * report switching it into multitouch mode is sent to it. ++ * ++ * This results in -EIO from the _raw low-level transport callback, ++ * but there seems to be no other way of switching the mode. ++ * Thus the super-ugly hacky success check below. ++ */ ++ ret = magicmouse_enable_multitouch(hdev); ++ if (ret != -EIO && ret < 0) { ++ hid_err(hdev, "unable to request touch data (%d)\n", ret); ++ return ret; ++ } ++ if (ret == -EIO && (hdev->product == USB_DEVICE_ID_APPLE_MAGICMOUSE2 || ++ hdev->product == USB_DEVICE_ID_APPLE_MAGICMOUSE2_USBC)) { ++ schedule_delayed_work(&msc->work, msecs_to_jiffies(500)); ++ } ++ ++ /* ++ * MT enable is usually not required after the first time, so don't ++ * consider it fatal. ++ */ ++ return 0; ++} ++ ++static void magicmouse_close(struct input_dev *dev) ++{ ++ struct hid_device *hdev = input_get_drvdata(dev); ++ ++ hid_hw_close(hdev); ++} ++ + static int magicmouse_firm_touch(struct magicmouse_sc *msc) + { + int touch = -1; +@@ -706,12 +751,19 @@ static int magicmouse_raw_event_mtp(struct hid_device *hdev, + static int magicmouse_raw_event_spi(struct hid_device *hdev, + struct hid_report *report, u8 *data, int size) + { ++ struct magicmouse_sc *msc = hid_get_drvdata(hdev); + const size_t hdr_sz = sizeof(struct tp_mouse_report); + +- if (size < hdr_sz) ++ if (!size) + return 0; + +- if (data[0] != SPI_REPORT_ID) ++ if (data[0] == SPI_RESET_REPORT_ID) { ++ hid_info(hdev, "Touch controller was reset, re-enabling touch mode\n"); ++ schedule_delayed_work(&msc->work, msecs_to_jiffies(10)); ++ return 1; ++ } ++ ++ if (data[0] != SPI_REPORT_ID || size < hdr_sz) + return 0; + + return magicmouse_raw_event_mtp(hdev, report, data + hdr_sz, size - hdr_sz); +@@ -904,10 +956,17 @@ static int magicmouse_setup_input_usb(struct input_dev *input, + */ + __clear_bit(EV_REP, input->evbit); + ++ /* ++ * This isn't strictly speaking needed for USB, but enabling MT on ++ * device open is probably more robust than only doing it once on probe ++ * even if USB devices are not known to suffer from the SPI reset issue. ++ */ ++ input->open = magicmouse_open; ++ input->close = magicmouse_close; + return 0; + } + +-static int magicmouse_setup_input_spi(struct input_dev *input, ++static int magicmouse_setup_input_mtp(struct input_dev *input, + struct hid_device *hdev) + { + int error; +@@ -980,6 +1039,25 @@ static int magicmouse_setup_input_spi(struct input_dev *input, + return 0; + } + ++static int magicmouse_setup_input_spi(struct input_dev *input, ++ struct hid_device *hdev) ++{ ++ int ret = magicmouse_setup_input_mtp(input, hdev); ++ if (ret) ++ return ret; ++ ++ /* ++ * Override the default input->open function to send the MT ++ * enable every time the device is opened. This ensures it works ++ * even if we missed a reset event due to the device being closed. ++ * input->close is overridden for symmetry. ++ */ ++ input->open = magicmouse_open; ++ input->close = magicmouse_close; ++ ++ return 0; ++} ++ + static int magicmouse_input_mapping(struct hid_device *hdev, + struct hid_input *hi, struct hid_field *field, + struct hid_usage *usage, unsigned long **bit, int *max) +@@ -1041,7 +1119,7 @@ static int magicmouse_enable_multitouch(struct hid_device *hdev) + feature_size = sizeof(feature_mt_trackpad2_bt); + feature = feature_mt_trackpad2_bt; + break; +- default: /* USB_VENDOR_ID_APPLE || SPI_VENDOR_ID_APPLE */ ++ default: /* USB_VENDOR_ID_APPLE || SPI_VENDOR_ID_APPLE */ + feature_size = sizeof(feature_mt_trackpad2_usb); + feature = feature_mt_trackpad2_usb; + } +@@ -1152,7 +1230,7 @@ static int magicmouse_probe(struct hid_device *hdev, + // conflicts with the report ID. + if (id->bus == BUS_HOST) { + msc->input_ops.raw_event = magicmouse_raw_event_mtp; +- msc->input_ops.setup_input = magicmouse_setup_input_spi; ++ msc->input_ops.setup_input = magicmouse_setup_input_mtp; + } else if (id->bus == BUS_SPI) { + msc->input_ops.raw_event = magicmouse_raw_event_spi; + msc->input_ops.setup_input = magicmouse_setup_input_spi; +@@ -1249,22 +1327,10 @@ static int magicmouse_probe(struct hid_device *hdev, + if (id->bus == BUS_HOST) + return 0; + +- /* +- * Some devices repond with 'invalid report id' when feature +- * report switching it into multitouch mode is sent to it. +- * +- * This results in -EIO from the _raw low-level transport callback, +- * but there seems to be no other way of switching the mode. +- * Thus the super-ugly hacky success check below. +- */ +- ret = magicmouse_enable_multitouch(hdev); +- if (ret != -EIO && ret < 0) { +- hid_err(hdev, "unable to request touch data (%d)\n", ret); +- goto err_stop_hw; +- } +- if (ret == -EIO && (id->product == USB_DEVICE_ID_APPLE_MAGICMOUSE2 || +- id->product == USB_DEVICE_ID_APPLE_MAGICMOUSE2_USBC)) { +- schedule_delayed_work(&msc->work, msecs_to_jiffies(500)); ++ /* SPI devices need to watch for reset events to re-send the MT enable */ ++ if (id->bus == BUS_SPI) { ++ report = hid_register_report(hdev, HID_INPUT_REPORT, SPI_RESET_REPORT_ID, 0); ++ report->size = 2; + } + + return 0; +-- +Armbian + +From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001 +From: Hector Martin +Date: Sun, 3 Dec 2023 21:08:17 +0900 +Subject: HID: magicmouse: Query device dimensions via HID report + +For SPI/MTP trackpads, query the dimensions via HID report instead of +hardcoding values. + +TODO: Does this work for the USB/BT devices? Maybe we can get rid of the +hardcoded sizes everywhere? + +Signed-off-by: Hector Martin +--- + drivers/hid/hid-magicmouse.c | 104 +++++++--- + 1 file changed, 80 insertions(+), 24 deletions(-) + +diff --git a/drivers/hid/hid-magicmouse.c b/drivers/hid/hid-magicmouse.c +index 111111111111..222222222222 100644 +--- a/drivers/hid/hid-magicmouse.c ++++ b/drivers/hid/hid-magicmouse.c +@@ -63,6 +63,7 @@ MODULE_PARM_DESC(report_undeciphered, "Report undeciphered multi-touch state fie + #define SPI_REPORT_ID 0x02 + #define SPI_RESET_REPORT_ID 0x60 + #define MTP_REPORT_ID 0x75 ++#define SENSOR_DIMENSIONS_REPORT_ID 0xd9 + #define USB_BATTERY_TIMEOUT_SEC 60 + + #define MAX_CONTACTS 16 +@@ -117,6 +118,7 @@ MODULE_PARM_DESC(report_undeciphered, "Report undeciphered multi-touch state fie + #define TRACKPAD2_RES_Y \ + ((TRACKPAD2_MAX_Y - TRACKPAD2_MIN_Y) / (TRACKPAD2_DIMENSION_Y / 100)) + ++/* These are fallback values, since the real values will be queried from the device. */ + #define J314_TP_DIMENSION_X (float)13000 + #define J314_TP_MIN_X -5900 + #define J314_TP_MAX_X 6500 +@@ -140,6 +142,7 @@ struct magicmouse_input_ops { + * struct magicmouse_sc - Tracks Magic Mouse-specific data. + * @input: Input device through which we report events. + * @quirks: Currently unused. ++ * @query_dimensions: Whether to query and update dimensions on first open + * @ntouches: Number of touches in most recent touch report. + * @scroll_accel: Number of consecutive scroll motions. + * @scroll_jiffies: Time of last scroll motion. +@@ -154,6 +157,7 @@ struct magicmouse_input_ops { + struct magicmouse_sc { + struct input_dev *input; + unsigned long quirks; ++ bool query_dimensions; + + int ntouches; + int scroll_accel; +@@ -179,6 +183,11 @@ struct magicmouse_sc { + + static int magicmouse_enable_multitouch(struct hid_device *hdev); + ++static inline int le16_to_int(__le16 x) ++{ ++ return (signed short)le16_to_cpu(x); ++} ++ + static int magicmouse_open(struct input_dev *dev) + { + struct hid_device *hdev = input_get_drvdata(dev); +@@ -196,21 +205,69 @@ static int magicmouse_open(struct input_dev *dev) + * This results in -EIO from the _raw low-level transport callback, + * but there seems to be no other way of switching the mode. + * Thus the super-ugly hacky success check below. ++ * ++ * MTP devices do not need this. + */ +- ret = magicmouse_enable_multitouch(hdev); +- if (ret != -EIO && ret < 0) { +- hid_err(hdev, "unable to request touch data (%d)\n", ret); +- return ret; +- } +- if (ret == -EIO && (hdev->product == USB_DEVICE_ID_APPLE_MAGICMOUSE2 || +- hdev->product == USB_DEVICE_ID_APPLE_MAGICMOUSE2_USBC)) { +- schedule_delayed_work(&msc->work, msecs_to_jiffies(500)); ++ if (hdev->bus != BUS_HOST) { ++ ret = magicmouse_enable_multitouch(hdev); ++ if (ret != -EIO && ret < 0) { ++ hid_err(hdev, "unable to request touch data (%d)\n", ret); ++ return ret; ++ } ++ if (ret == -EIO && (hdev->product == USB_DEVICE_ID_APPLE_MAGICMOUSE2 || ++ hdev->product == USB_DEVICE_ID_APPLE_MAGICMOUSE2_USBC)) { ++ schedule_delayed_work(&msc->work, msecs_to_jiffies(500)); ++ } + } + + /* +- * MT enable is usually not required after the first time, so don't +- * consider it fatal. ++ * For Apple Silicon trackpads, we want to query the dimensions on ++ * device open. This is because doing so requires the firmware, but ++ * we don't want to force a firmware load until the device is opened ++ * for the first time. So do that here and update the input properties ++ * just in time before userspace queries them. + */ ++ if (msc->query_dimensions) { ++ struct input_dev *input = msc->input; ++ u8 buf[32]; ++ struct { ++ __le32 width; ++ __le32 height; ++ __le16 min_x; ++ __le16 min_y; ++ __le16 max_x; ++ __le16 max_y; ++ } dim; ++ uint32_t x_span, y_span; ++ ++ ret = hid_hw_raw_request(hdev, SENSOR_DIMENSIONS_REPORT_ID, buf, sizeof(buf), HID_FEATURE_REPORT, HID_REQ_GET_REPORT); ++ if (ret < (int)(1 + sizeof(dim))) { ++ hid_err(hdev, "unable to request dimensions (%d)\n", ret); ++ return ret; ++ } ++ ++ memcpy(&dim, buf + 1, sizeof(dim)); ++ ++ /* finger position */ ++ input_set_abs_params(input, ABS_MT_POSITION_X, ++ le16_to_int(dim.min_x), le16_to_int(dim.max_x), 0, 0); ++ /* Y axis is inverted */ ++ input_set_abs_params(input, ABS_MT_POSITION_Y, ++ -le16_to_int(dim.max_y), -le16_to_int(dim.min_y), 0, 0); ++ x_span = le16_to_int(dim.max_x) - le16_to_int(dim.min_x); ++ y_span = le16_to_int(dim.max_y) - le16_to_int(dim.min_y); ++ ++ /* X/Y resolution */ ++ input_abs_set_res(input, ABS_MT_POSITION_X, 100 * x_span / le32_to_cpu(dim.width) ); ++ input_abs_set_res(input, ABS_MT_POSITION_Y, 100 * y_span / le32_to_cpu(dim.height) ); ++ ++ /* copy info, as input_mt_init_slots() does */ ++ dev->absinfo[ABS_X] = dev->absinfo[ABS_MT_POSITION_X]; ++ dev->absinfo[ABS_Y] = dev->absinfo[ABS_MT_POSITION_Y]; ++ ++ msc->query_dimensions = false; ++ } ++ + return 0; + } + +@@ -660,11 +717,6 @@ struct tp_mouse_report { + u8 padding[4]; + }; + +-static inline int le16_to_int(__le16 x) +-{ +- return (signed short)le16_to_cpu(x); +-} +- + static void report_finger_data(struct input_dev *input, int slot, + const struct input_mt_pos *pos, + const struct tp_finger *f) +@@ -971,6 +1023,7 @@ static int magicmouse_setup_input_mtp(struct input_dev *input, + { + int error; + int mt_flags = 0; ++ struct magicmouse_sc *msc = hid_get_drvdata(hdev); + + __set_bit(INPUT_PROP_BUTTONPAD, input->propbit); + __clear_bit(BTN_0, input->keybit); +@@ -1036,6 +1089,18 @@ static int magicmouse_setup_input_mtp(struct input_dev *input, + if (error) + return error; + ++ /* ++ * Override the default input->open function to send the MT ++ * enable every time the device is opened. This ensures it works ++ * even if we missed a reset event due to the device being closed. ++ * input->close is overridden for symmetry. ++ * ++ * This also takes care of the dimensions query. ++ */ ++ input->open = magicmouse_open; ++ input->close = magicmouse_close; ++ msc->query_dimensions = true; ++ + return 0; + } + +@@ -1046,15 +1111,6 @@ static int magicmouse_setup_input_spi(struct input_dev *input, + if (ret) + return ret; + +- /* +- * Override the default input->open function to send the MT +- * enable every time the device is opened. This ensures it works +- * even if we missed a reset event due to the device being closed. +- * input->close is overridden for symmetry. +- */ +- input->open = magicmouse_open; +- input->close = magicmouse_close; +- + return 0; + } + +-- +Armbian + +From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001 +From: Janne Grunau +Date: Fri, 10 Dec 2021 19:38:43 +0100 +Subject: WIP: HID: transport: spi: add Apple SPI transport + +Keyboard and trackpad of Apple Sillicon SoCs (M1, M1 Pro/Max) laptops +are are HID devices connected via SPI. + +This is the same protocol as implemented by applespi.c. It was not +noticed that protocol is a transport for HID. Adding support for ACPI +based Intel MacBooks will be done in a separate commit. + +How HID is mapped in this protocol is not yet fully understood. + +Microsoft has a specification for HID over SPI [1] incompatible with the +transport protocol used by Apple. + +[1] https://docs.microsoft.com/en-us/windows-hardware/drivers/hid/hid-over-spi + +Contains "HID: transport: spi: apple: Increase receive buffer size" + +The SPI receive buffer is passed directly to hid_input_report() if it +contains a complete report. It is then passed to hid_report_raw_event() +which computes the expected report size and memsets the "missing +trailing data up to HID_MAX_BUFFER_SIZE (16K) or +hid_ll_driver.max_buffer_size (if set) to zero. + +Co-developed-by: Hector Martin +Signed-off-by: Hector Martin +Signed-off-by: Janne Grunau +--- + drivers/hid/Kconfig | 2 + + drivers/hid/Makefile | 2 + + drivers/hid/spi-hid/Kconfig | 26 + + drivers/hid/spi-hid/Makefile | 10 + + drivers/hid/spi-hid/spi-hid-apple-core.c | 1194 ++++++++++ + drivers/hid/spi-hid/spi-hid-apple-of.c | 153 ++ + drivers/hid/spi-hid/spi-hid-apple.h | 35 + + 7 files changed, 1422 insertions(+) + +diff --git a/drivers/hid/Kconfig b/drivers/hid/Kconfig +index 111111111111..222222222222 100644 +--- a/drivers/hid/Kconfig ++++ b/drivers/hid/Kconfig +@@ -1450,4 +1450,6 @@ endif # HID + + source "drivers/hid/usbhid/Kconfig" + ++source "drivers/hid/spi-hid/Kconfig" ++ + endif # HID_SUPPORT +diff --git a/drivers/hid/Makefile b/drivers/hid/Makefile +index 111111111111..222222222222 100644 +--- a/drivers/hid/Makefile ++++ b/drivers/hid/Makefile +@@ -173,6 +173,8 @@ obj-$(CONFIG_INTEL_ISH_HID) += intel-ish-hid/ + + obj-$(CONFIG_AMD_SFH_HID) += amd-sfh-hid/ + ++obj-$(CONFIG_SPI_HID_APPLE_CORE) += spi-hid/ ++ + obj-$(CONFIG_SURFACE_HID_CORE) += surface-hid/ + + obj-$(CONFIG_INTEL_THC_HID) += intel-thc-hid/ +diff --git a/drivers/hid/spi-hid/Kconfig b/drivers/hid/spi-hid/Kconfig +new file mode 100644 +index 000000000000..111111111111 +--- /dev/null ++++ b/drivers/hid/spi-hid/Kconfig +@@ -0,0 +1,26 @@ ++# SPDX-License-Identifier: GPL-2.0-only ++menu "SPI HID support" ++ depends on SPI ++ ++config SPI_HID_APPLE_OF ++ tristate "HID over SPI transport layer for Apple Silicon SoCs" ++ default ARCH_APPLE ++ depends on SPI && INPUT && OF ++ help ++ Say Y here if you use Apple Silicon based laptop. The keyboard and ++ touchpad are HID based devices connected via SPI. ++ ++ If unsure, say N. ++ ++ This support is also available as a module. If so, the module ++ will be called spi-hid-apple-of. It will also build/depend on the ++ module spi-hid-apple. ++ ++endmenu ++ ++config SPI_HID_APPLE_CORE ++ tristate ++ default y if SPI_HID_APPLE_OF=y ++ default m if SPI_HID_APPLE_OF=m ++ select HID ++ select CRC16 +diff --git a/drivers/hid/spi-hid/Makefile b/drivers/hid/spi-hid/Makefile +new file mode 100644 +index 000000000000..111111111111 +--- /dev/null ++++ b/drivers/hid/spi-hid/Makefile +@@ -0,0 +1,10 @@ ++# SPDX-License-Identifier: GPL-2.0-only ++# ++# Makefile for SPI HID tarnsport drivers ++# ++ ++obj-$(CONFIG_SPI_HID_APPLE_CORE) += spi-hid-apple.o ++ ++spi-hid-apple-objs = spi-hid-apple-core.o ++ ++obj-$(CONFIG_SPI_HID_APPLE_OF) += spi-hid-apple-of.o +diff --git a/drivers/hid/spi-hid/spi-hid-apple-core.c b/drivers/hid/spi-hid/spi-hid-apple-core.c +new file mode 100644 +index 000000000000..111111111111 +--- /dev/null ++++ b/drivers/hid/spi-hid/spi-hid-apple-core.c +@@ -0,0 +1,1194 @@ ++/* ++ * SPDX-License-Identifier: GPL-2.0 ++ * ++ * Apple SPI HID transport driver ++ * ++ * Copyright (C) The Asahi Linux Contributors ++ * ++ * Based on: drivers/input/applespi.c ++ * ++ * MacBook (Pro) SPI keyboard and touchpad driver ++ * ++ * Copyright (c) 2015-2018 Federico Lorenzi ++ * Copyright (c) 2017-2018 Ronald Tschalär ++ * ++ */ ++ ++//#define DEBUG 2 ++ ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++ ++#include "spi-hid-apple.h" ++ ++#define SPIHID_DEF_WAIT msecs_to_jiffies(1000) ++ ++#define SPIHID_MAX_INPUT_REPORT_SIZE 0x800 ++ ++/* support only keyboard, trackpad and management dev for now */ ++#define SPIHID_MAX_DEVICES 3 ++ ++#define SPIHID_DEVICE_ID_MNGT 0x0 ++#define SPIHID_DEVICE_ID_KBD 0x1 ++#define SPIHID_DEVICE_ID_TP 0x2 ++#define SPIHID_DEVICE_ID_INFO 0xd0 ++ ++#define SPIHID_READ_PACKET 0x20 ++#define SPIHID_WRITE_PACKET 0x40 ++ ++#define SPIHID_DESC_MAX 512 ++ ++#define SPIHID_SET_LEDS 0x0151 /* caps lock */ ++ ++#define SPI_RW_CHG_DELAY_US 200 /* 'Inter Stage Us'? */ ++ ++static const u8 spi_hid_apple_booted[4] = { 0xa0, 0x80, 0x00, 0x00 }; ++static const u8 spi_hid_apple_status_ok[4] = { 0xac, 0x27, 0x68, 0xd5 }; ++ ++struct spihid_interface { ++ struct hid_device *hid; ++ u8 *hid_desc; ++ u32 hid_desc_len; ++ u32 id; ++ unsigned country; ++ u32 max_control_report_len; ++ u32 max_input_report_len; ++ u32 max_output_report_len; ++ u8 name[32]; ++ u8 reply_buf[SPIHID_DESC_MAX]; ++ u32 reply_len; ++ bool ready; ++}; ++ ++struct spihid_input_report { ++ u8 *buf; ++ u32 length; ++ u32 offset; ++ u8 device; ++ u8 flags; ++}; ++ ++struct spihid_apple { ++ struct spi_device *spidev; ++ ++ struct spihid_apple_ops *ops; ++ ++ struct spihid_interface mngt; ++ struct spihid_interface kbd; ++ struct spihid_interface tp; ++ ++ wait_queue_head_t wait; ++ struct mutex tx_lock; //< protects against concurrent SPI writes ++ ++ struct spi_message rx_msg; ++ struct spi_message tx_msg; ++ struct spi_transfer rx_transfer; ++ struct spi_transfer tx_transfer; ++ struct spi_transfer status_transfer; ++ ++ u8 *rx_buf; ++ u8 *tx_buf; ++ u8 *status_buf; ++ ++ u8 vendor[32]; ++ u8 product[64]; ++ u8 serial[32]; ++ ++ u32 num_devices; ++ ++ u32 vendor_id; ++ u32 product_id; ++ u32 version_number; ++ ++ u8 msg_id; ++ ++ /* fragmented HID report */ ++ struct spihid_input_report report; ++ ++ /* state tracking flags */ ++ bool status_booted; ++ ++#ifdef IRQ_WAKE_SUPPORT ++ bool irq_wake_enabled; ++#endif ++}; ++ ++/** ++ * struct spihid_msg_hdr - common header of protocol messages. ++ * ++ * Each message begins with fixed header, followed by a message-type specific ++ * payload, and ends with a 16-bit crc. Because of the varying lengths of the ++ * payload, the crc is defined at the end of each payload struct, rather than ++ * in this struct. ++ * ++ * @unknown0: request type? output, input (0x10), feature, protocol ++ * @unknown1: maybe report id? ++ * @unknown2: mostly zero, in info request maybe device num ++ * @id: incremented on each message, rolls over after 255; there is a ++ * separate counter for each message type. ++ * @rsplen: response length (the exact nature of this field is quite ++ * speculative). On a request/write this is often the same as ++ * @length, though in some cases it has been seen to be much larger ++ * (e.g. 0x400); on a response/read this the same as on the ++ * request; for reads that are not responses it is 0. ++ * @length: length of the remainder of the data in the whole message ++ * structure (after re-assembly in case of being split over ++ * multiple spi-packets), minus the trailing crc. The total size ++ * of a message is therefore @length + 10. ++ */ ++ ++struct spihid_msg_hdr { ++ u8 unknown0; ++ u8 unknown1; ++ u8 unknown2; ++ u8 id; ++ __le16 rsplen; ++ __le16 length; ++}; ++ ++/** ++ * struct spihid_transfer_packet - a complete spi packet; always 256 bytes. This carries ++ * the (parts of the) message in the data. But note that this does not ++ * necessarily contain a complete message, as in some cases (e.g. many ++ * fingers pressed) the message is split over multiple packets (see the ++ * @offset, @remain, and @length fields). In general the data parts in ++ * spihid_transfer_packet's are concatenated until @remaining is 0, and the ++ * result is an message. ++ * ++ * @flags: 0x40 = write (to device), 0x20 = read (from device); note that ++ * the response to a write still has 0x40. ++ * @device: 1 = keyboard, 2 = touchpad ++ * @offset: specifies the offset of this packet's data in the complete ++ * message; i.e. > 0 indicates this is a continuation packet (in ++ * the second packet for a message split over multiple packets ++ * this would then be the same as the @length in the first packet) ++ * @remain: number of message bytes remaining in subsequents packets (in ++ * the first packet of a message split over two packets this would ++ * then be the same as the @length in the second packet) ++ * @length: length of the valid data in the @data in this packet ++ * @data: all or part of a message ++ * @crc16: crc over this whole structure minus this @crc16 field. This ++ * covers just this packet, even on multi-packet messages (in ++ * contrast to the crc in the message). ++ */ ++struct spihid_transfer_packet { ++ u8 flags; ++ u8 device; ++ __le16 offset; ++ __le16 remain; ++ __le16 length; ++ u8 data[246]; ++ __le16 crc16; ++}; ++ ++/* ++ * how HID is mapped onto the protocol is not fully clear. This are the known ++ * reports/request: ++ * ++ * pkt.flags pkt.dev? msg.u0 msg.u1 msg.u2 ++ * info 0x40 0xd0 0x20 0x01 0xd0 ++ * ++ * info mngt: 0x40 0xd0 0x20 0x10 0x00 ++ * info kbd: 0x40 0xd0 0x20 0x10 0x01 ++ * info tp: 0x40 0xd0 0x20 0x10 0x02 ++ * ++ * desc kbd: 0x40 0xd0 0x20 0x10 0x01 ++ * desc trackpad: 0x40 0xd0 0x20 0x10 0x02 ++ * ++ * mt mode: 0x40 0x02 0x52 0x02 0x00 set protocol? ++ * capslock led 0x40 0x01 0x51 0x01 0x00 output report ++ * ++ * report kbd: 0x20 0x01 0x10 0x01 0x00 input report ++ * report tp: 0x20 0x02 0x10 0x02 0x00 input report ++ * ++ */ ++ ++ ++static int spihid_apple_request(struct spihid_apple *spihid, u8 target, u8 unk0, ++ u8 unk1, u8 unk2, u16 resp_len, u8 *buf, ++ size_t len) ++{ ++ struct spihid_transfer_packet *pkt; ++ struct spihid_msg_hdr *hdr; ++ u16 crc; ++ int err; ++ ++ /* know reports are small enoug to fit in a single packet */ ++ if (len > sizeof(pkt->data) - sizeof(*hdr) - sizeof(__le16)) ++ return -EINVAL; ++ ++ err = mutex_lock_interruptible(&spihid->tx_lock); ++ if (err < 0) ++ return err; ++ ++ pkt = (struct spihid_transfer_packet *)spihid->tx_buf; ++ ++ memset(pkt, 0, sizeof(*pkt)); ++ pkt->flags = SPIHID_WRITE_PACKET; ++ pkt->device = target; ++ pkt->length = cpu_to_le16(sizeof(*hdr) + len + sizeof(__le16)); ++ ++ hdr = (struct spihid_msg_hdr *)&pkt->data[0]; ++ hdr->unknown0 = unk0; ++ hdr->unknown1 = unk1; ++ hdr->unknown2 = unk2; ++ hdr->id = spihid->msg_id++; ++ hdr->rsplen = cpu_to_le16(resp_len); ++ hdr->length = cpu_to_le16(len); ++ ++ if (len) ++ memcpy(pkt->data + sizeof(*hdr), buf, len); ++ crc = crc16(0, &pkt->data[0], sizeof(*hdr) + len); ++ put_unaligned_le16(crc, pkt->data + sizeof(*hdr) + len); ++ ++ pkt->crc16 = cpu_to_le16(crc16(0, spihid->tx_buf, ++ offsetof(struct spihid_transfer_packet, crc16))); ++ ++ memset(spihid->status_buf, 0, sizeof(spi_hid_apple_status_ok)); ++ ++ err = spi_sync(spihid->spidev, &spihid->tx_msg); ++ ++ if (memcmp(spihid->status_buf, spi_hid_apple_status_ok, ++ sizeof(spi_hid_apple_status_ok))) { ++ u8 *b = spihid->status_buf; ++ dev_warn_ratelimited(&spihid->spidev->dev, "status message " ++ "mismatch: %02x %02x %02x %02x\n", ++ b[0], b[1], b[2], b[3]); ++ } ++ mutex_unlock(&spihid->tx_lock); ++ if (err < 0) ++ return err; ++ ++ return (int)len; ++} ++ ++static struct spihid_apple *spihid_get_data(struct spihid_interface *idev) ++{ ++ switch (idev->id) { ++ case SPIHID_DEVICE_ID_KBD: ++ return container_of(idev, struct spihid_apple, kbd); ++ case SPIHID_DEVICE_ID_TP: ++ return container_of(idev, struct spihid_apple, tp); ++ default: ++ return NULL; ++ } ++} ++ ++static int apple_ll_start(struct hid_device *hdev) ++{ ++ /* no-op SPI transport is already setup */ ++ return 0; ++}; ++ ++static void apple_ll_stop(struct hid_device *hdev) ++{ ++ /* no-op, devices will be desstroyed on driver destruction */ ++} ++ ++static int apple_ll_open(struct hid_device *hdev) ++{ ++ struct spihid_apple *spihid; ++ struct spihid_interface *idev = hdev->driver_data; ++ ++ if (idev->hid_desc_len == 0) { ++ spihid = spihid_get_data(idev); ++ dev_warn(&spihid->spidev->dev, ++ "HID descriptor missing for dev %u", idev->id); ++ } else ++ idev->ready = true; ++ ++ return 0; ++} ++ ++static void apple_ll_close(struct hid_device *hdev) ++{ ++ struct spihid_interface *idev = hdev->driver_data; ++ idev->ready = false; ++} ++ ++static int apple_ll_parse(struct hid_device *hdev) ++{ ++ struct spihid_interface *idev = hdev->driver_data; ++ ++ return hid_parse_report(hdev, idev->hid_desc, idev->hid_desc_len); ++} ++ ++static int apple_ll_raw_request(struct hid_device *hdev, ++ unsigned char reportnum, __u8 *buf, size_t len, ++ unsigned char rtype, int reqtype) ++{ ++ struct spihid_interface *idev = hdev->driver_data; ++ struct spihid_apple *spihid = spihid_get_data(idev); ++ int ret; ++ ++ dev_dbg(&spihid->spidev->dev, ++ "apple_ll_raw_request: device:%u reportnum:%hhu rtype:%hhu", ++ idev->id, reportnum, rtype); ++ ++ switch (reqtype) { ++ case HID_REQ_GET_REPORT: ++ if (rtype != HID_FEATURE_REPORT) ++ return -EINVAL; ++ ++ idev->reply_len = 0; ++ ret = spihid_apple_request(spihid, idev->id, 0x32, reportnum, 0x00, len, NULL, 0); ++ if (ret < 0) ++ return ret; ++ ++ ret = wait_event_interruptible_timeout(spihid->wait, idev->reply_len, ++ SPIHID_DEF_WAIT); ++ if (ret == 0) ++ ret = -ETIMEDOUT; ++ if (ret < 0) { ++ dev_err(&spihid->spidev->dev, "waiting for get report failed: %d", ret); ++ return ret; ++ } ++ memcpy(buf, idev->reply_buf, max_t(size_t, len, idev->reply_len)); ++ return idev->reply_len; ++ ++ case HID_REQ_SET_REPORT: ++ if (buf[0] != reportnum) ++ return -EINVAL; ++ if (reportnum != idev->id) { ++ dev_warn(&spihid->spidev->dev, ++ "device:%u reportnum:" ++ "%hhu mismatch", ++ idev->id, reportnum); ++ return -EINVAL; ++ } ++ return spihid_apple_request(spihid, idev->id, 0x52, reportnum, 0x00, 2, buf, len); ++ default: ++ return -EIO; ++ } ++} ++ ++static int apple_ll_output_report(struct hid_device *hdev, __u8 *buf, ++ size_t len) ++{ ++ struct spihid_interface *idev = hdev->driver_data; ++ struct spihid_apple *spihid = spihid_get_data(idev); ++ if (!spihid) ++ return -1; ++ ++ dev_dbg(&spihid->spidev->dev, ++ "apple_ll_output_report: device:%u len:%zu:", ++ idev->id, len); ++ // second idev->id should maybe be buf[0]? ++ return spihid_apple_request(spihid, idev->id, 0x51, idev->id, 0x00, 0, buf, len); ++} ++ ++static struct hid_ll_driver apple_hid_ll = { ++ .start = &apple_ll_start, ++ .stop = &apple_ll_stop, ++ .open = &apple_ll_open, ++ .close = &apple_ll_close, ++ .parse = &apple_ll_parse, ++ .raw_request = &apple_ll_raw_request, ++ .output_report = &apple_ll_output_report, ++ .max_buffer_size = SPIHID_MAX_INPUT_REPORT_SIZE, ++}; ++ ++static struct spihid_interface *spihid_get_iface(struct spihid_apple *spihid, ++ u32 iface) ++{ ++ switch (iface) { ++ case SPIHID_DEVICE_ID_MNGT: ++ return &spihid->mngt; ++ case SPIHID_DEVICE_ID_KBD: ++ return &spihid->kbd; ++ case SPIHID_DEVICE_ID_TP: ++ return &spihid->tp; ++ default: ++ return NULL; ++ } ++} ++ ++static int spihid_verify_msg(struct spihid_apple *spihid, u8 *buf, size_t len) ++{ ++ u16 msg_crc, crc; ++ struct device *dev = &spihid->spidev->dev; ++ ++ crc = crc16(0, buf, len - sizeof(__le16)); ++ msg_crc = get_unaligned_le16(buf + len - sizeof(__le16)); ++ if (crc != msg_crc) { ++ dev_warn_ratelimited(dev, "Read message crc mismatch\n"); ++ return 0; ++ } ++ return 1; ++} ++ ++static bool spihid_status_report(struct spihid_apple *spihid, u8 *pl, ++ size_t len) ++{ ++ struct device *dev = &spihid->spidev->dev; ++ dev_dbg(dev, "%s: len: %zu", __func__, len); ++ if (len == 5 && pl[0] == 0xe0) ++ return true; ++ ++ return false; ++} ++ ++static bool spihid_process_input_report(struct spihid_apple *spihid, u32 device, ++ struct spihid_msg_hdr *hdr, u8 *payload, ++ size_t len) ++{ ++ //dev_dbg(&spihid>spidev->dev, "input report: req:%hx iface:%u ", hdr->unknown0, device); ++ if (hdr->unknown0 != 0x10) ++ return false; ++ ++ /* HID device as well but Vendor usage only, handle it internally for now */ ++ if (device == 0) { ++ if (hdr->unknown1 == 0xe0) { ++ return spihid_status_report(spihid, payload, len); ++ } ++ } else if (device < SPIHID_MAX_DEVICES) { ++ struct spihid_interface *iface = ++ spihid_get_iface(spihid, device); ++ if (iface && iface->hid && iface->ready) { ++ hid_input_report(iface->hid, HID_INPUT_REPORT, payload, ++ len, 1); ++ return true; ++ } ++ } else ++ dev_dbg(&spihid->spidev->dev, ++ "unexpected iface:%u for input report", device); ++ ++ return false; ++} ++ ++struct spihid_device_info { ++ __le16 u0[2]; ++ __le16 num_devices; ++ __le16 vendor_id; ++ __le16 product_id; ++ __le16 version_number; ++ __le16 vendor_str[2]; //< offset and string length ++ __le16 product_str[2]; //< offset and string length ++ __le16 serial_str[2]; //< offset and string length ++}; ++ ++static bool spihid_process_device_info(struct spihid_apple *spihid, u32 iface, ++ u8 *payload, size_t len) ++{ ++ struct device *dev = &spihid->spidev->dev; ++ ++ if (iface != SPIHID_DEVICE_ID_INFO) ++ return false; ++ ++ if (spihid->vendor_id == 0 && ++ len >= sizeof(struct spihid_device_info)) { ++ struct spihid_device_info *info = ++ (struct spihid_device_info *)payload; ++ u16 voff, vlen, poff, plen, soff, slen; ++ u32 num_devices; ++ ++ num_devices = __le16_to_cpu(info->num_devices); ++ ++ if (num_devices < SPIHID_MAX_DEVICES) { ++ dev_err(dev, ++ "Device info reports %u devices, expecting at least 3", ++ num_devices); ++ return false; ++ } ++ spihid->num_devices = num_devices; ++ ++ if (spihid->num_devices > SPIHID_MAX_DEVICES) { ++ dev_info( ++ dev, ++ "limiting the number of devices to mngt, kbd and mouse"); ++ spihid->num_devices = SPIHID_MAX_DEVICES; ++ } ++ ++ spihid->vendor_id = __le16_to_cpu(info->vendor_id); ++ spihid->product_id = __le16_to_cpu(info->product_id); ++ spihid->version_number = __le16_to_cpu(info->version_number); ++ ++ voff = __le16_to_cpu(info->vendor_str[0]); ++ vlen = __le16_to_cpu(info->vendor_str[1]); ++ ++ if (voff < len && vlen <= len - voff && ++ vlen < sizeof(spihid->vendor)) { ++ memcpy(spihid->vendor, payload + voff, vlen); ++ spihid->vendor[vlen] = '\0'; ++ } ++ ++ poff = __le16_to_cpu(info->product_str[0]); ++ plen = __le16_to_cpu(info->product_str[1]); ++ ++ if (poff < len && plen <= len - poff && ++ plen < sizeof(spihid->product)) { ++ memcpy(spihid->product, payload + poff, plen); ++ spihid->product[plen] = '\0'; ++ } ++ ++ soff = __le16_to_cpu(info->serial_str[0]); ++ slen = __le16_to_cpu(info->serial_str[1]); ++ ++ if (soff < len && slen <= len - soff && ++ slen < sizeof(spihid->serial)) { ++ memcpy(spihid->vendor, payload + soff, slen); ++ spihid->serial[slen] = '\0'; ++ } ++ ++ wake_up_interruptible(&spihid->wait); ++ } ++ return true; ++} ++ ++struct spihid_iface_info { ++ u8 u_0; ++ u8 interface_num; ++ u8 u_2; ++ u8 u_3; ++ u8 u_4; ++ u8 country_code; ++ __le16 max_input_report_len; ++ __le16 max_output_report_len; ++ __le16 max_control_report_len; ++ __le16 name_offset; ++ __le16 name_length; ++}; ++ ++static bool spihid_process_iface_info(struct spihid_apple *spihid, u32 num, ++ u8 *payload, size_t len) ++{ ++ struct spihid_iface_info *info; ++ struct spihid_interface *iface = spihid_get_iface(spihid, num); ++ u32 name_off, name_len; ++ ++ if (!iface) ++ return false; ++ ++ if (!iface->max_input_report_len) { ++ if (len < sizeof(*info)) ++ return false; ++ ++ info = (struct spihid_iface_info *)payload; ++ ++ iface->max_input_report_len = ++ le16_to_cpu(info->max_input_report_len); ++ iface->max_output_report_len = ++ le16_to_cpu(info->max_output_report_len); ++ iface->max_control_report_len = ++ le16_to_cpu(info->max_control_report_len); ++ iface->country = info->country_code; ++ ++ name_off = le16_to_cpu(info->name_offset); ++ name_len = le16_to_cpu(info->name_length); ++ ++ if (name_off < len && name_len <= len - name_off && ++ name_len < sizeof(iface->name)) { ++ memcpy(iface->name, payload + name_off, name_len); ++ iface->name[name_len] = '\0'; ++ } ++ ++ dev_dbg(&spihid->spidev->dev, "Info for %s, country code: 0x%x", ++ iface->name, iface->country); ++ ++ wake_up_interruptible(&spihid->wait); ++ } ++ ++ return true; ++} ++ ++static int spihid_register_hid_device(struct spihid_apple *spihid, ++ struct spihid_interface *idev, u8 device); ++ ++static bool spihid_process_iface_hid_report_desc(struct spihid_apple *spihid, ++ u32 num, u8 *payload, ++ size_t len) ++{ ++ struct spihid_interface *iface = spihid_get_iface(spihid, num); ++ ++ if (!iface) ++ return false; ++ ++ if (iface->hid_desc_len == 0) { ++ if (len > SPIHID_DESC_MAX) ++ return false; ++ memcpy(iface->hid_desc, payload, len); ++ iface->hid_desc_len = len; ++ ++ /* do not register the mngt iface as HID device */ ++ if (num > 0) ++ spihid_register_hid_device(spihid, iface, num); ++ ++ wake_up_interruptible(&spihid->wait); ++ } ++ return true; ++} ++ ++static bool spihid_process_iface_get_report(struct spihid_apple *spihid, ++ u32 device, u8 report, ++ u8 *payload, size_t len) ++{ ++ struct spihid_interface *iface = spihid_get_iface(spihid, device); ++ ++ if (!iface) ++ return false; ++ ++ if (len > sizeof(iface->reply_buf) || len < 1) ++ return false; ++ ++ memcpy(iface->reply_buf, payload, len); ++ iface->reply_len = len; ++ ++ wake_up_interruptible(&spihid->wait); ++ ++ return true; ++} ++ ++static bool spihid_process_response(struct spihid_apple *spihid, u32 device, ++ struct spihid_msg_hdr *hdr, u8 *payload, ++ size_t len) ++{ ++ if (hdr->unknown0 == 0x20) { ++ switch (hdr->unknown1) { ++ case 0x01: ++ return spihid_process_device_info(spihid, hdr->unknown2, ++ payload, len); ++ case 0x02: ++ return spihid_process_iface_info(spihid, hdr->unknown2, ++ payload, len); ++ case 0x10: ++ return spihid_process_iface_hid_report_desc( ++ spihid, hdr->unknown2, payload, len); ++ default: ++ break; ++ } ++ } ++ ++ if (hdr->unknown0 == 0x32) { ++ return spihid_process_iface_get_report(spihid, device, hdr->unknown1, payload, len); ++ } ++ ++ return false; ++} ++ ++static void spihid_process_message(struct spihid_apple *spihid, u8 *data, ++ size_t length, u8 device, u8 flags) ++{ ++ struct device *dev = &spihid->spidev->dev; ++ struct spihid_msg_hdr *hdr; ++ bool handled = false; ++ size_t payload_len; ++ u8 *payload; ++ ++ if (!spihid_verify_msg(spihid, data, length)) ++ return; ++ ++ hdr = (struct spihid_msg_hdr *)data; ++ payload_len = le16_to_cpu(hdr->length); ++ ++ if (payload_len == 0 || ++ (payload_len + sizeof(struct spihid_msg_hdr) + 2) > length) ++ return; ++ ++ payload = data + sizeof(struct spihid_msg_hdr); ++ ++ switch (flags) { ++ case SPIHID_READ_PACKET: ++ handled = spihid_process_input_report(spihid, device, hdr, ++ payload, payload_len); ++ break; ++ case SPIHID_WRITE_PACKET: ++ handled = spihid_process_response(spihid, device, hdr, payload, ++ payload_len); ++ break; ++ default: ++ break; ++ } ++ ++#if defined(DEBUG) && DEBUG > 1 ++ { ++ dev_dbg(dev, ++ "R msg: req:%02hhx rep:%02hhx dev:%02hhx id:%hu len:%hu\n", ++ hdr->unknown0, hdr->unknown1, hdr->unknown2, hdr->id, ++ hdr->length); ++ print_hex_dump_debug("spihid msg: ", DUMP_PREFIX_OFFSET, 16, 1, ++ payload, le16_to_cpu(hdr->length), true); ++ } ++#else ++ if (!handled) { ++ dev_dbg(dev, ++ "R unhandled msg: req:%02hhx rep:%02hhx dev:%02hhx id:%hu len:%hu\n", ++ hdr->unknown0, hdr->unknown1, hdr->unknown2, hdr->id, ++ hdr->length); ++ print_hex_dump_debug("spihid msg: ", DUMP_PREFIX_OFFSET, 16, 1, ++ payload, le16_to_cpu(hdr->length), true); ++ } ++#endif ++} ++ ++static void spihid_assemble_message(struct spihid_apple *spihid, ++ struct spihid_transfer_packet *pkt) ++{ ++ size_t length, offset, remain; ++ struct device *dev = &spihid->spidev->dev; ++ struct spihid_input_report *rep = &spihid->report; ++ ++ length = le16_to_cpu(pkt->length); ++ remain = le16_to_cpu(pkt->remain); ++ offset = le16_to_cpu(pkt->offset); ++ ++ if (offset + length + remain > U16_MAX) { ++ return; ++ } ++ ++ if (pkt->device != rep->device || pkt->flags != rep->flags || ++ offset != rep->offset) { ++ rep->device = 0; ++ rep->flags = 0; ++ rep->offset = 0; ++ rep->length = 0; ++ } ++ ++ if (offset == 0) { ++ if (rep->offset != 0) { ++ dev_warn(dev, "incomplete report off:%u len:%u", ++ rep->offset, rep->length); ++ } ++ memcpy(rep->buf, pkt->data, length); ++ rep->offset = length; ++ rep->length = length + remain; ++ rep->device = pkt->device; ++ rep->flags = pkt->flags; ++ } else if (offset == rep->offset) { ++ if (offset + length + remain != rep->length) { ++ dev_warn(dev, "incomplete report off:%u len:%u", ++ rep->offset, rep->length); ++ return; ++ } ++ memcpy(rep->buf + offset, pkt->data, length); ++ rep->offset += length; ++ ++ if (rep->offset == rep->length) { ++ spihid_process_message(spihid, rep->buf, rep->length, ++ rep->device, rep->flags); ++ rep->device = 0; ++ rep->flags = 0; ++ rep->offset = 0; ++ rep->length = 0; ++ } ++ } ++} ++ ++static void spihid_process_read(struct spihid_apple *spihid) ++{ ++ u16 crc; ++ size_t length; ++ struct device *dev = &spihid->spidev->dev; ++ struct spihid_transfer_packet *pkt; ++ ++ pkt = (struct spihid_transfer_packet *)spihid->rx_buf; ++ ++ /* check transfer packet crc */ ++ crc = crc16(0, spihid->rx_buf, ++ offsetof(struct spihid_transfer_packet, crc16)); ++ if (crc != le16_to_cpu(pkt->crc16)) { ++ dev_warn_ratelimited(dev, "Read package crc mismatch\n"); ++ return; ++ } ++ ++ length = le16_to_cpu(pkt->length); ++ ++ if (length < sizeof(struct spihid_msg_hdr) + 2) { ++ if (length == sizeof(spi_hid_apple_booted) && ++ !memcmp(pkt->data, spi_hid_apple_booted, length)) { ++ if (!spihid->status_booted) { ++ spihid->status_booted = true; ++ wake_up_interruptible(&spihid->wait); ++ } ++ } else { ++ dev_info(dev, "R short packet: len:%zu\n", length); ++ print_hex_dump(KERN_INFO, "spihid pkt:", ++ DUMP_PREFIX_OFFSET, 16, 1, pkt->data, ++ length, false); ++ } ++ return; ++ } ++ ++#if defined(DEBUG) && DEBUG > 1 ++ dev_dbg(dev, ++ "R pkt: flags:%02hhx dev:%02hhx off:%hu remain:%hu, len:%zu\n", ++ pkt->flags, pkt->device, pkt->offset, pkt->remain, length); ++#if defined(DEBUG) && DEBUG > 2 ++ print_hex_dump_debug("spihid pkt: ", DUMP_PREFIX_OFFSET, 16, 1, ++ spihid->rx_buf, ++ sizeof(struct spihid_transfer_packet), true); ++#endif ++#endif ++ ++ if (length > sizeof(pkt->data)) { ++ dev_warn_ratelimited(dev, "Invalid pkt len:%zu", length); ++ return; ++ } ++ ++ /* short message */ ++ if (pkt->offset == 0 && pkt->remain == 0) { ++ spihid_process_message(spihid, pkt->data, length, pkt->device, ++ pkt->flags); ++ } else { ++ spihid_assemble_message(spihid, pkt); ++ } ++} ++ ++static void spihid_read_packet_sync(struct spihid_apple *spihid) ++{ ++ int err; ++ ++ err = spi_sync(spihid->spidev, &spihid->rx_msg); ++ if (!err) { ++ spihid_process_read(spihid); ++ } else { ++ dev_warn(&spihid->spidev->dev, "RX failed: %d\n", err); ++ } ++} ++ ++irqreturn_t spihid_apple_core_irq(int irq, void *data) ++{ ++ struct spi_device *spi = data; ++ struct spihid_apple *spihid = spi_get_drvdata(spi); ++ ++ spihid_read_packet_sync(spihid); ++ ++ return IRQ_HANDLED; ++} ++EXPORT_SYMBOL_GPL(spihid_apple_core_irq); ++ ++static void spihid_apple_setup_spi_msgs(struct spihid_apple *spihid) ++{ ++ memset(&spihid->rx_transfer, 0, sizeof(spihid->rx_transfer)); ++ ++ spihid->rx_transfer.rx_buf = spihid->rx_buf; ++ spihid->rx_transfer.len = sizeof(struct spihid_transfer_packet); ++ ++ spi_message_init(&spihid->rx_msg); ++ spi_message_add_tail(&spihid->rx_transfer, &spihid->rx_msg); ++ ++ memset(&spihid->tx_transfer, 0, sizeof(spihid->rx_transfer)); ++ memset(&spihid->status_transfer, 0, sizeof(spihid->status_transfer)); ++ ++ spihid->tx_transfer.tx_buf = spihid->tx_buf; ++ spihid->tx_transfer.len = sizeof(struct spihid_transfer_packet); ++ spihid->tx_transfer.delay.unit = SPI_DELAY_UNIT_USECS; ++ spihid->tx_transfer.delay.value = SPI_RW_CHG_DELAY_US; ++ ++ spihid->status_transfer.rx_buf = spihid->status_buf; ++ spihid->status_transfer.len = sizeof(spi_hid_apple_status_ok); ++ ++ spi_message_init(&spihid->tx_msg); ++ spi_message_add_tail(&spihid->tx_transfer, &spihid->tx_msg); ++ spi_message_add_tail(&spihid->status_transfer, &spihid->tx_msg); ++} ++ ++static int spihid_apple_setup_spi(struct spihid_apple *spihid) ++{ ++ spihid_apple_setup_spi_msgs(spihid); ++ ++ return spihid->ops->power_on(spihid->ops); ++} ++ ++static int spihid_register_hid_device(struct spihid_apple *spihid, ++ struct spihid_interface *iface, u8 device) ++{ ++ int ret; ++ char *suffix; ++ struct hid_device *hid; ++ ++ iface->id = device; ++ ++ hid = hid_allocate_device(); ++ if (IS_ERR(hid)) ++ return PTR_ERR(hid); ++ ++ /* ++ * Use 'Apple SPI Keyboard' and 'Apple SPI Trackpad' as input device ++ * names. The device names need to be distinct since at least Kwin uses ++ * the tripple Vendor ID, Product ID, Name to identify devices. ++ */ ++ snprintf(hid->name, sizeof(hid->name), "Apple SPI %s", iface->name); ++ // strip ' / Boot' suffix from the name ++ suffix = strstr(hid->name, " / Boot"); ++ if (suffix) ++ suffix[0] = '\0'; ++ snprintf(hid->phys, sizeof(hid->phys), "%s (%hhx)", ++ dev_name(&spihid->spidev->dev), device); ++ strscpy(hid->uniq, spihid->serial, sizeof(hid->uniq)); ++ ++ hid->ll_driver = &apple_hid_ll; ++ hid->bus = BUS_SPI; ++ hid->vendor = spihid->vendor_id; ++ hid->product = spihid->product_id; ++ hid->version = spihid->version_number; ++ ++ if (device == SPIHID_DEVICE_ID_KBD) ++ hid->type = HID_TYPE_SPI_KEYBOARD; ++ else if (device == SPIHID_DEVICE_ID_TP) ++ hid->type = HID_TYPE_SPI_MOUSE; ++ ++ hid->country = iface->country; ++ hid->dev.parent = &spihid->spidev->dev; ++ hid->driver_data = iface; ++ ++ ret = hid_add_device(hid); ++ if (ret < 0) { ++ hid_destroy_device(hid); ++ dev_warn(&spihid->spidev->dev, ++ "Failed to register hid device %hhu", device); ++ return ret; ++ } ++ ++ iface->hid = hid; ++ ++ return 0; ++} ++ ++static void spihid_destroy_hid_device(struct spihid_interface *iface) ++{ ++ if (iface->hid) { ++ hid_destroy_device(iface->hid); ++ iface->hid = NULL; ++ } ++ iface->ready = false; ++} ++ ++int spihid_apple_core_probe(struct spi_device *spi, struct spihid_apple_ops *ops) ++{ ++ struct device *dev = &spi->dev; ++ struct spihid_apple *spihid; ++ int err, i; ++ ++ if (!ops || !ops->power_on || !ops->power_off || !ops->enable_irq || !ops->disable_irq) ++ return -EINVAL; ++ ++ spihid = devm_kzalloc(dev, sizeof(*spihid), GFP_KERNEL); ++ if (!spihid) ++ return -ENOMEM; ++ ++ spihid->ops = ops; ++ spihid->spidev = spi; ++ ++ // init spi ++ spi_set_drvdata(spi, spihid); ++ ++ /* ++ * allocate SPI buffers ++ * Overallocate the receice buffer since it passed directly into ++ * hid_input_report / hid_report_raw_event. The later expects the buffer ++ * to be HID_MAX_BUFFER_SIZE (16k) or hid_ll_driver.max_buffer_size if ++ * set. ++ */ ++ spihid->rx_buf = devm_kmalloc( ++ &spi->dev, SPIHID_MAX_INPUT_REPORT_SIZE, GFP_KERNEL); ++ spihid->tx_buf = devm_kmalloc( ++ &spi->dev, sizeof(struct spihid_transfer_packet), GFP_KERNEL); ++ spihid->status_buf = devm_kmalloc( ++ &spi->dev, sizeof(spi_hid_apple_status_ok), GFP_KERNEL); ++ ++ if (!spihid->rx_buf || !spihid->tx_buf || !spihid->status_buf) ++ return -ENOMEM; ++ ++ spihid->report.buf = ++ devm_kmalloc(dev, SPIHID_MAX_INPUT_REPORT_SIZE, GFP_KERNEL); ++ ++ spihid->kbd.hid_desc = devm_kmalloc(dev, SPIHID_DESC_MAX, GFP_KERNEL); ++ spihid->tp.hid_desc = devm_kmalloc(dev, SPIHID_DESC_MAX, GFP_KERNEL); ++ ++ if (!spihid->report.buf || !spihid->kbd.hid_desc || ++ !spihid->tp.hid_desc) ++ return -ENOMEM; ++ ++ init_waitqueue_head(&spihid->wait); ++ ++ mutex_init(&spihid->tx_lock); ++ ++ /* Init spi transfer buffers and power device on */ ++ err = spihid_apple_setup_spi(spihid); ++ if (err < 0) ++ goto error; ++ ++ /* enable HID irq */ ++ spihid->ops->enable_irq(spihid->ops); ++ ++ // wait for boot message ++ err = wait_event_interruptible_timeout(spihid->wait, ++ spihid->status_booted, ++ msecs_to_jiffies(1000)); ++ if (err == 0) ++ err = -ENODEV; ++ if (err < 0) { ++ dev_err(dev, "waiting for device boot failed: %d", err); ++ goto error; ++ } ++ ++ /* request device information */ ++ dev_dbg(dev, "request device info"); ++ spihid_apple_request(spihid, 0xd0, 0x20, 0x01, 0xd0, 0, NULL, 0); ++ err = wait_event_interruptible_timeout(spihid->wait, spihid->vendor_id, ++ SPIHID_DEF_WAIT); ++ if (err == 0) ++ err = -ENODEV; ++ if (err < 0) { ++ dev_err(dev, "waiting for device info failed: %d", err); ++ goto error; ++ } ++ ++ /* request interface information */ ++ for (i = 0; i < spihid->num_devices; i++) { ++ struct spihid_interface *iface = spihid_get_iface(spihid, i); ++ if (!iface) ++ continue; ++ dev_dbg(dev, "request interface info 0x%02x", i); ++ spihid_apple_request(spihid, 0xd0, 0x20, 0x02, i, ++ SPIHID_DESC_MAX, NULL, 0); ++ err = wait_event_interruptible_timeout( ++ spihid->wait, iface->max_input_report_len, ++ SPIHID_DEF_WAIT); ++ } ++ ++ /* request HID report descriptors */ ++ for (i = 1; i < spihid->num_devices; i++) { ++ struct spihid_interface *iface = spihid_get_iface(spihid, i); ++ if (!iface) ++ continue; ++ dev_dbg(dev, "request hid report desc 0x%02x", i); ++ spihid_apple_request(spihid, 0xd0, 0x20, 0x10, i, ++ SPIHID_DESC_MAX, NULL, 0); ++ wait_event_interruptible_timeout( ++ spihid->wait, iface->hid_desc_len, SPIHID_DEF_WAIT); ++ } ++ ++ return 0; ++error: ++ return err; ++} ++EXPORT_SYMBOL_GPL(spihid_apple_core_probe); ++ ++void spihid_apple_core_remove(struct spi_device *spi) ++{ ++ struct spihid_apple *spihid = spi_get_drvdata(spi); ++ ++ /* destroy input devices */ ++ ++ spihid_destroy_hid_device(&spihid->tp); ++ spihid_destroy_hid_device(&spihid->kbd); ++ ++ /* disable irq */ ++ spihid->ops->disable_irq(spihid->ops); ++ ++ /* power SPI device down */ ++ spihid->ops->power_off(spihid->ops); ++} ++EXPORT_SYMBOL_GPL(spihid_apple_core_remove); ++ ++void spihid_apple_core_shutdown(struct spi_device *spi) ++{ ++ struct spihid_apple *spihid = spi_get_drvdata(spi); ++ ++ /* disable irq */ ++ spihid->ops->disable_irq(spihid->ops); ++ ++ /* power SPI device down */ ++ spihid->ops->power_off(spihid->ops); ++} ++EXPORT_SYMBOL_GPL(spihid_apple_core_shutdown); ++ ++#ifdef CONFIG_PM_SLEEP ++static int spihid_apple_core_suspend(struct device *dev) ++{ ++ int ret; ++#ifdef IRQ_WAKE_SUPPORT ++ int wake_status; ++#endif ++ struct spihid_apple *spihid = spi_get_drvdata(to_spi_device(dev)); ++ ++ if (spihid->tp.hid) { ++ ret = hid_driver_suspend(spihid->tp.hid, PMSG_SUSPEND); ++ if (ret < 0) ++ return ret; ++ } ++ ++ if (spihid->kbd.hid) { ++ ret = hid_driver_suspend(spihid->kbd.hid, PMSG_SUSPEND); ++ if (ret < 0) { ++ if (spihid->tp.hid) ++ hid_driver_resume(spihid->tp.hid); ++ return ret; ++ } ++ } ++ ++ /* Save some power */ ++ spihid->ops->disable_irq(spihid->ops); ++ ++#ifdef IRQ_WAKE_SUPPORT ++ if (device_may_wakeup(dev)) { ++ wake_status = spihid->ops->enable_irq_wake(spihid->ops); ++ if (!wake_status) ++ spihid->irq_wake_enabled = true; ++ else ++ dev_warn(dev, "Failed to enable irq wake: %d\n", ++ wake_status); ++ } else { ++ spihid->ops->power_off(spihid->ops); ++ } ++#else ++ spihid->ops->power_off(spihid->ops); ++#endif ++ ++ return 0; ++} ++ ++static int spihid_apple_core_resume(struct device *dev) ++{ ++ int ret_tp = 0, ret_kbd = 0; ++ struct spihid_apple *spihid = spi_get_drvdata(to_spi_device(dev)); ++#ifdef IRQ_WAKE_SUPPORT ++ int wake_status; ++ ++ if (!device_may_wakeup(dev)) { ++ spihid->ops->power_on(spihid->ops); ++ } else if (spihid->irq_wake_enabled) { ++ wake_status = spihid->ops->disable_irq_wake(spihid->ops); ++ if (!wake_status) ++ spihid->irq_wake_enabled = false; ++ else ++ dev_warn(dev, "Failed to disable irq wake: %d\n", ++ wake_status); ++ } ++#endif ++ ++ spihid->ops->enable_irq(spihid->ops); ++ spihid->ops->power_on(spihid->ops); ++ ++ if (spihid->tp.hid) ++ ret_tp = hid_driver_reset_resume(spihid->tp.hid); ++ if (spihid->kbd.hid) ++ ret_kbd = hid_driver_reset_resume(spihid->kbd.hid); ++ ++ if (ret_tp < 0) ++ return ret_tp; ++ ++ return ret_kbd; ++} ++#endif ++ ++const struct dev_pm_ops spihid_apple_core_pm = { ++ SET_SYSTEM_SLEEP_PM_OPS(spihid_apple_core_suspend, ++ spihid_apple_core_resume) ++}; ++EXPORT_SYMBOL_GPL(spihid_apple_core_pm); ++ ++MODULE_DESCRIPTION("Apple SPI HID transport driver"); ++MODULE_AUTHOR("Janne Grunau "); ++MODULE_LICENSE("GPL"); +diff --git a/drivers/hid/spi-hid/spi-hid-apple-of.c b/drivers/hid/spi-hid/spi-hid-apple-of.c +new file mode 100644 +index 000000000000..111111111111 +--- /dev/null ++++ b/drivers/hid/spi-hid/spi-hid-apple-of.c +@@ -0,0 +1,153 @@ ++/* ++ * SPDX-License-Identifier: GPL-2.0 ++ * ++ * Apple SPI HID transport driver - Open Firmware ++ * ++ * Copyright (C) The Asahi Linux Contributors ++ */ ++ ++#include ++#include ++#include ++#include ++ ++#include "spi-hid-apple.h" ++ ++ ++struct spihid_apple_of { ++ struct spihid_apple_ops ops; ++ ++ struct gpio_desc *enable_gpio; ++ int irq; ++}; ++ ++static int spihid_apple_of_power_on(struct spihid_apple_ops *ops) ++{ ++ struct spihid_apple_of *sh_of = container_of(ops, struct spihid_apple_of, ops); ++ ++ /* reset the controller on boot */ ++ gpiod_direction_output(sh_of->enable_gpio, 1); ++ msleep(5); ++ gpiod_direction_output(sh_of->enable_gpio, 0); ++ msleep(5); ++ /* turn SPI device on */ ++ gpiod_direction_output(sh_of->enable_gpio, 1); ++ msleep(50); ++ ++ return 0; ++} ++ ++static int spihid_apple_of_power_off(struct spihid_apple_ops *ops) ++{ ++ struct spihid_apple_of *sh_of = container_of(ops, struct spihid_apple_of, ops); ++ ++ /* turn SPI device off */ ++ gpiod_direction_output(sh_of->enable_gpio, 0); ++ ++ return 0; ++} ++ ++static int spihid_apple_of_enable_irq(struct spihid_apple_ops *ops) ++{ ++ struct spihid_apple_of *sh_of = container_of(ops, struct spihid_apple_of, ops); ++ ++ enable_irq(sh_of->irq); ++ ++ return 0; ++} ++ ++static int spihid_apple_of_disable_irq(struct spihid_apple_ops *ops) ++{ ++ struct spihid_apple_of *sh_of = container_of(ops, struct spihid_apple_of, ops); ++ ++ disable_irq(sh_of->irq); ++ ++ return 0; ++} ++ ++static int spihid_apple_of_enable_irq_wake(struct spihid_apple_ops *ops) ++{ ++ struct spihid_apple_of *sh_of = container_of(ops, struct spihid_apple_of, ops); ++ ++ return enable_irq_wake(sh_of->irq); ++} ++ ++static int spihid_apple_of_disable_irq_wake(struct spihid_apple_ops *ops) ++{ ++ struct spihid_apple_of *sh_of = container_of(ops, struct spihid_apple_of, ops); ++ ++ return disable_irq_wake(sh_of->irq); ++} ++ ++static int spihid_apple_of_probe(struct spi_device *spi) ++{ ++ struct device *dev = &spi->dev; ++ struct spihid_apple_of *spihid_of; ++ int err; ++ ++ spihid_of = devm_kzalloc(dev, sizeof(*spihid_of), GFP_KERNEL); ++ if (!spihid_of) ++ return -ENOMEM; ++ ++ spihid_of->ops.power_on = spihid_apple_of_power_on; ++ spihid_of->ops.power_off = spihid_apple_of_power_off; ++ spihid_of->ops.enable_irq = spihid_apple_of_enable_irq; ++ spihid_of->ops.disable_irq = spihid_apple_of_disable_irq; ++ spihid_of->ops.enable_irq_wake = spihid_apple_of_enable_irq_wake; ++ spihid_of->ops.disable_irq_wake = spihid_apple_of_disable_irq_wake; ++ ++ spihid_of->enable_gpio = devm_gpiod_get_index(dev, "spien", 0, 0); ++ if (IS_ERR(spihid_of->enable_gpio)) { ++ err = PTR_ERR(spihid_of->enable_gpio); ++ dev_err(dev, "failed to get 'spien' gpio pin: %d", err); ++ return err; ++ } ++ ++ spihid_of->irq = of_irq_get(dev->of_node, 0); ++ if (spihid_of->irq < 0) { ++ err = spihid_of->irq; ++ dev_err(dev, "failed to get 'extended-irq': %d", err); ++ return err; ++ } ++ err = devm_request_threaded_irq(dev, spihid_of->irq, NULL, ++ spihid_apple_core_irq, IRQF_ONESHOT | IRQF_NO_AUTOEN, ++ "spi-hid-apple-irq", spi); ++ if (err < 0) { ++ dev_err(dev, "failed to request extended-irq %d: %d", ++ spihid_of->irq, err); ++ return err; ++ } ++ ++ return spihid_apple_core_probe(spi, &spihid_of->ops); ++} ++ ++static const struct of_device_id spihid_apple_of_match[] = { ++ { .compatible = "apple,spi-hid-transport" }, ++ {}, ++}; ++MODULE_DEVICE_TABLE(of, spihid_apple_of_match); ++ ++static struct spi_device_id spihid_apple_of_id[] = { ++ { "spi-hid-transport", 0 }, ++ {} ++}; ++MODULE_DEVICE_TABLE(spi, spihid_apple_of_id); ++ ++static struct spi_driver spihid_apple_of_driver = { ++ .driver = { ++ .name = "spi-hid-apple-of", ++ .pm = &spihid_apple_core_pm, ++ .of_match_table = of_match_ptr(spihid_apple_of_match), ++ }, ++ ++ .id_table = spihid_apple_of_id, ++ .probe = spihid_apple_of_probe, ++ .remove = spihid_apple_core_remove, ++ .shutdown = spihid_apple_core_shutdown, ++}; ++ ++module_spi_driver(spihid_apple_of_driver); ++ ++MODULE_DESCRIPTION("Apple SPI HID transport driver for OpenFirmware systems"); ++MODULE_AUTHOR("Janne Grunau "); ++MODULE_LICENSE("GPL"); +diff --git a/drivers/hid/spi-hid/spi-hid-apple.h b/drivers/hid/spi-hid/spi-hid-apple.h +new file mode 100644 +index 000000000000..111111111111 +--- /dev/null ++++ b/drivers/hid/spi-hid/spi-hid-apple.h +@@ -0,0 +1,35 @@ ++/* SPDX-License-Identifier: GPL-2.0-only OR MIT */ ++ ++#ifndef SPI_HID_APPLE_H ++#define SPI_HID_APPLE_H ++ ++#include ++#include ++ ++/** ++ * struct spihid_apple_ops - Ops to control the device from the core driver. ++ * ++ * @power_on: reset and power the device on. ++ * @power_off: power the device off. ++ * @enable_irq: enable irq or ACPI gpe. ++ * @disable_irq: disable irq or ACPI gpe. ++ */ ++ ++struct spihid_apple_ops { ++ int (*power_on)(struct spihid_apple_ops *ops); ++ int (*power_off)(struct spihid_apple_ops *ops); ++ int (*enable_irq)(struct spihid_apple_ops *ops); ++ int (*disable_irq)(struct spihid_apple_ops *ops); ++ int (*enable_irq_wake)(struct spihid_apple_ops *ops); ++ int (*disable_irq_wake)(struct spihid_apple_ops *ops); ++}; ++ ++irqreturn_t spihid_apple_core_irq(int irq, void *data); ++ ++int spihid_apple_core_probe(struct spi_device *spi, struct spihid_apple_ops *ops); ++void spihid_apple_core_remove(struct spi_device *spi); ++void spihid_apple_core_shutdown(struct spi_device *spi); ++ ++extern const struct dev_pm_ops spihid_apple_core_pm; ++ ++#endif /* SPI_HID_APPLE_H */ +-- +Armbian + +From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001 +From: Hector Martin +Date: Fri, 8 Jul 2022 02:09:24 +0900 +Subject: soc: apple: Add DockChannel driver + +DockChannel is a simple FIFO interface used to communicate between SoC +blocks. Add a driver that represents the shared interrupt controller for +the DockChannel block, and then exposes probe and data transfer +functions that child device drivers can use to instantiate individual +FIFOs. + +Signed-off-by: Hector Martin +--- + drivers/soc/apple/Kconfig | 10 + + drivers/soc/apple/Makefile | 3 + + drivers/soc/apple/dockchannel.c | 406 ++++++++++ + include/linux/soc/apple/dockchannel.h | 26 + + 4 files changed, 445 insertions(+) + +diff --git a/drivers/soc/apple/Kconfig b/drivers/soc/apple/Kconfig +index 111111111111..222222222222 100644 +--- a/drivers/soc/apple/Kconfig ++++ b/drivers/soc/apple/Kconfig +@@ -4,6 +4,16 @@ if ARCH_APPLE || COMPILE_TEST + + menu "Apple SoC drivers" + ++config APPLE_DOCKCHANNEL ++ tristate "Apple DockChannel FIFO" ++ depends on ARCH_APPLE || COMPILE_TEST ++ default ARCH_APPLE ++ help ++ DockChannel is a simple FIFO used on Apple SoCs for debug and inter-processor ++ communications. ++ ++ Say 'y' here if you have an Apple SoC. ++ + config APPLE_MAILBOX + tristate "Apple SoC mailboxes" + depends on PM +diff --git a/drivers/soc/apple/Makefile b/drivers/soc/apple/Makefile +index 111111111111..222222222222 100644 +--- a/drivers/soc/apple/Makefile ++++ b/drivers/soc/apple/Makefile +@@ -1,5 +1,8 @@ + # SPDX-License-Identifier: GPL-2.0-only + ++obj-$(CONFIG_APPLE_DOCKCHANNEL) += apple-dockchannel.o ++apple-dockchannel-y = dockchannel.o ++ + obj-$(CONFIG_APPLE_MAILBOX) += apple-mailbox.o + apple-mailbox-y = mailbox.o + +diff --git a/drivers/soc/apple/dockchannel.c b/drivers/soc/apple/dockchannel.c +new file mode 100644 +index 000000000000..111111111111 +--- /dev/null ++++ b/drivers/soc/apple/dockchannel.c +@@ -0,0 +1,406 @@ ++// SPDX-License-Identifier: GPL-2.0-only OR MIT ++/* ++ * Apple DockChannel FIFO driver ++ * Copyright The Asahi Linux Contributors ++ */ ++ ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++ ++#define DOCKCHANNEL_MAX_IRQ 32 ++ ++#define DOCKCHANNEL_TX_TIMEOUT_MS 1000 ++#define DOCKCHANNEL_RX_TIMEOUT_MS 1000 ++ ++#define IRQ_MASK 0x0 ++#define IRQ_FLAG 0x4 ++ ++#define IRQ_TX BIT(0) ++#define IRQ_RX BIT(1) ++ ++#define CONFIG_TX_THRESH 0x0 ++#define CONFIG_RX_THRESH 0x4 ++ ++#define DATA_TX8 0x4 ++#define DATA_TX16 0x8 ++#define DATA_TX24 0xc ++#define DATA_TX32 0x10 ++#define DATA_TX_FREE 0x14 ++#define DATA_RX8 0x1c ++#define DATA_RX16 0x20 ++#define DATA_RX24 0x24 ++#define DATA_RX32 0x28 ++#define DATA_RX_COUNT 0x2c ++ ++struct dockchannel { ++ struct device *dev; ++ int tx_irq; ++ int rx_irq; ++ ++ void __iomem *config_base; ++ void __iomem *data_base; ++ ++ u32 fifo_size; ++ bool awaiting; ++ struct completion tx_comp; ++ struct completion rx_comp; ++ ++ void *cookie; ++ void (*data_available)(void *cookie, size_t avail); ++}; ++ ++struct dockchannel_common { ++ struct device *dev; ++ struct irq_domain *domain; ++ int irq; ++ ++ void __iomem *irq_base; ++}; ++ ++/* Dockchannel FIFO functions */ ++ ++static irqreturn_t dockchannel_tx_irq(int irq, void *data) ++{ ++ struct dockchannel *dockchannel = data; ++ ++ disable_irq_nosync(irq); ++ complete(&dockchannel->tx_comp); ++ ++ return IRQ_HANDLED; ++} ++ ++static irqreturn_t dockchannel_rx_irq(int irq, void *data) ++{ ++ struct dockchannel *dockchannel = data; ++ ++ disable_irq_nosync(irq); ++ ++ if (dockchannel->awaiting) { ++ return IRQ_WAKE_THREAD; ++ } else { ++ complete(&dockchannel->rx_comp); ++ return IRQ_HANDLED; ++ } ++} ++ ++static irqreturn_t dockchannel_rx_irq_thread(int irq, void *data) ++{ ++ struct dockchannel *dockchannel = data; ++ size_t avail = readl_relaxed(dockchannel->data_base + DATA_RX_COUNT); ++ ++ dockchannel->awaiting = false; ++ dockchannel->data_available(dockchannel->cookie, avail); ++ ++ return IRQ_HANDLED; ++} ++ ++int dockchannel_send(struct dockchannel *dockchannel, const void *buf, size_t count) ++{ ++ size_t left = count; ++ const u8 *p = buf; ++ ++ while (left > 0) { ++ size_t avail = readl_relaxed(dockchannel->data_base + DATA_TX_FREE); ++ size_t block = min(left, avail); ++ ++ if (avail == 0) { ++ size_t threshold = min((size_t)(dockchannel->fifo_size / 2), left); ++ ++ writel_relaxed(threshold, dockchannel->config_base + CONFIG_TX_THRESH); ++ reinit_completion(&dockchannel->tx_comp); ++ enable_irq(dockchannel->tx_irq); ++ ++ if (!wait_for_completion_timeout(&dockchannel->tx_comp, ++ msecs_to_jiffies(DOCKCHANNEL_TX_TIMEOUT_MS))) { ++ disable_irq(dockchannel->tx_irq); ++ return -ETIMEDOUT; ++ } ++ ++ continue; ++ } ++ ++ while (block >= 4) { ++ writel_relaxed(get_unaligned_le32(p), dockchannel->data_base + DATA_TX32); ++ p += 4; ++ left -= 4; ++ block -= 4; ++ } ++ while (block > 0) { ++ writeb_relaxed(*p++, dockchannel->data_base + DATA_TX8); ++ left--; ++ block--; ++ } ++ } ++ ++ return count; ++} ++EXPORT_SYMBOL(dockchannel_send); ++ ++int dockchannel_recv(struct dockchannel *dockchannel, void *buf, size_t count) ++{ ++ size_t left = count; ++ u8 *p = buf; ++ ++ while (left > 0) { ++ size_t avail = readl_relaxed(dockchannel->data_base + DATA_RX_COUNT); ++ size_t block = min(left, avail); ++ ++ if (avail == 0) { ++ size_t threshold = min((size_t)(dockchannel->fifo_size / 2), left); ++ ++ writel_relaxed(threshold, dockchannel->config_base + CONFIG_RX_THRESH); ++ reinit_completion(&dockchannel->rx_comp); ++ enable_irq(dockchannel->rx_irq); ++ ++ if (!wait_for_completion_timeout(&dockchannel->rx_comp, ++ msecs_to_jiffies(DOCKCHANNEL_RX_TIMEOUT_MS))) { ++ disable_irq(dockchannel->rx_irq); ++ return -ETIMEDOUT; ++ } ++ ++ continue; ++ } ++ ++ while (block >= 4) { ++ put_unaligned_le32(readl_relaxed(dockchannel->data_base + DATA_RX32), p); ++ p += 4; ++ left -= 4; ++ block -= 4; ++ } ++ while (block > 0) { ++ *p++ = readl_relaxed(dockchannel->data_base + DATA_RX8) >> 8; ++ left--; ++ block--; ++ } ++ } ++ ++ return count; ++} ++EXPORT_SYMBOL(dockchannel_recv); ++ ++int dockchannel_await(struct dockchannel *dockchannel, ++ void (*callback)(void *cookie, size_t avail), ++ void *cookie, size_t count) ++{ ++ size_t threshold = min((size_t)dockchannel->fifo_size, count); ++ ++ if (!count) { ++ dockchannel->awaiting = false; ++ disable_irq(dockchannel->rx_irq); ++ return 0; ++ } ++ ++ dockchannel->data_available = callback; ++ dockchannel->cookie = cookie; ++ dockchannel->awaiting = true; ++ writel_relaxed(threshold, dockchannel->config_base + CONFIG_RX_THRESH); ++ enable_irq(dockchannel->rx_irq); ++ ++ return threshold; ++} ++EXPORT_SYMBOL(dockchannel_await); ++ ++struct dockchannel *dockchannel_init(struct platform_device *pdev) ++{ ++ struct device *dev = &pdev->dev; ++ struct dockchannel *dockchannel; ++ int ret; ++ ++ dockchannel = devm_kzalloc(dev, sizeof(*dockchannel), GFP_KERNEL); ++ if (!dockchannel) ++ return ERR_PTR(-ENOMEM); ++ ++ dockchannel->dev = dev; ++ dockchannel->config_base = devm_platform_ioremap_resource_byname(pdev, "config"); ++ if (IS_ERR(dockchannel->config_base)) ++ return (__force void *)dockchannel->config_base; ++ ++ dockchannel->data_base = devm_platform_ioremap_resource_byname(pdev, "data"); ++ if (IS_ERR(dockchannel->data_base)) ++ return (__force void *)dockchannel->data_base; ++ ++ ret = of_property_read_u32(dev->of_node, "apple,fifo-size", &dockchannel->fifo_size); ++ if (ret) ++ return ERR_PTR(dev_err_probe(dev, ret, "Missing apple,fifo-size property")); ++ ++ init_completion(&dockchannel->tx_comp); ++ init_completion(&dockchannel->rx_comp); ++ ++ dockchannel->tx_irq = platform_get_irq_byname(pdev, "tx"); ++ if (dockchannel->tx_irq <= 0) { ++ return ERR_PTR(dev_err_probe(dev, dockchannel->tx_irq, ++ "Failed to get TX IRQ")); ++ } ++ ++ dockchannel->rx_irq = platform_get_irq_byname(pdev, "rx"); ++ if (dockchannel->rx_irq <= 0) { ++ return ERR_PTR(dev_err_probe(dev, dockchannel->rx_irq, ++ "Failed to get RX IRQ")); ++ } ++ ++ ret = devm_request_irq(dev, dockchannel->tx_irq, dockchannel_tx_irq, IRQF_NO_AUTOEN, ++ "apple-dockchannel-tx", dockchannel); ++ if (ret) ++ return ERR_PTR(dev_err_probe(dev, ret, "Failed to request TX IRQ")); ++ ++ ret = devm_request_threaded_irq(dev, dockchannel->rx_irq, dockchannel_rx_irq, ++ dockchannel_rx_irq_thread, IRQF_NO_AUTOEN, ++ "apple-dockchannel-rx", dockchannel); ++ if (ret) ++ return ERR_PTR(dev_err_probe(dev, ret, "Failed to request RX IRQ")); ++ ++ return dockchannel; ++} ++EXPORT_SYMBOL(dockchannel_init); ++ ++ ++/* Dockchannel IRQchip */ ++ ++static void dockchannel_irq(struct irq_desc *desc) ++{ ++ unsigned int irq = irq_desc_get_irq(desc); ++ struct irq_chip *chip = irq_desc_get_chip(desc); ++ struct dockchannel_common *dcc = irq_get_handler_data(irq); ++ unsigned long flags = readl_relaxed(dcc->irq_base + IRQ_FLAG); ++ int bit; ++ ++ chained_irq_enter(chip, desc); ++ ++ for_each_set_bit(bit, &flags, DOCKCHANNEL_MAX_IRQ) ++ generic_handle_domain_irq(dcc->domain, bit); ++ ++ chained_irq_exit(chip, desc); ++} ++ ++static void dockchannel_irq_ack(struct irq_data *data) ++{ ++ struct dockchannel_common *dcc = irq_data_get_irq_chip_data(data); ++ unsigned int hwirq = data->hwirq; ++ ++ writel_relaxed(BIT(hwirq), dcc->irq_base + IRQ_FLAG); ++} ++ ++static void dockchannel_irq_mask(struct irq_data *data) ++{ ++ struct dockchannel_common *dcc = irq_data_get_irq_chip_data(data); ++ unsigned int hwirq = data->hwirq; ++ u32 val = readl_relaxed(dcc->irq_base + IRQ_MASK); ++ ++ writel_relaxed(val & ~BIT(hwirq), dcc->irq_base + IRQ_MASK); ++} ++ ++static void dockchannel_irq_unmask(struct irq_data *data) ++{ ++ struct dockchannel_common *dcc = irq_data_get_irq_chip_data(data); ++ unsigned int hwirq = data->hwirq; ++ u32 val = readl_relaxed(dcc->irq_base + IRQ_MASK); ++ ++ writel_relaxed(val | BIT(hwirq), dcc->irq_base + IRQ_MASK); ++} ++ ++static const struct irq_chip dockchannel_irqchip = { ++ .name = "dockchannel-irqc", ++ .irq_ack = dockchannel_irq_ack, ++ .irq_mask = dockchannel_irq_mask, ++ .irq_unmask = dockchannel_irq_unmask, ++}; ++ ++static int dockchannel_irq_domain_map(struct irq_domain *d, unsigned int virq, ++ irq_hw_number_t hw) ++{ ++ irq_set_chip_data(virq, d->host_data); ++ irq_set_chip_and_handler(virq, &dockchannel_irqchip, handle_level_irq); ++ ++ return 0; ++} ++ ++static const struct irq_domain_ops dockchannel_irq_domain_ops = { ++ .xlate = irq_domain_xlate_twocell, ++ .map = dockchannel_irq_domain_map, ++}; ++ ++static int dockchannel_probe(struct platform_device *pdev) ++{ ++ struct device *dev = &pdev->dev; ++ struct dockchannel_common *dcc; ++ struct device_node *child; ++ ++ dcc = devm_kzalloc(dev, sizeof(*dcc), GFP_KERNEL); ++ if (!dcc) ++ return -ENOMEM; ++ ++ dcc->dev = dev; ++ platform_set_drvdata(pdev, dcc); ++ ++ dcc->irq_base = devm_platform_ioremap_resource_byname(pdev, "irq"); ++ if (IS_ERR(dcc->irq_base)) ++ return PTR_ERR(dcc->irq_base); ++ ++ writel_relaxed(0, dcc->irq_base + IRQ_MASK); ++ writel_relaxed(~0, dcc->irq_base + IRQ_FLAG); ++ ++ dcc->domain = irq_domain_add_linear(dev->of_node, DOCKCHANNEL_MAX_IRQ, ++ &dockchannel_irq_domain_ops, dcc); ++ if (!dcc->domain) ++ return -ENOMEM; ++ ++ dcc->irq = platform_get_irq(pdev, 0); ++ if (dcc->irq <= 0) ++ return dev_err_probe(dev, dcc->irq, "Failed to get IRQ"); ++ ++ irq_set_handler_data(dcc->irq, dcc); ++ irq_set_chained_handler(dcc->irq, dockchannel_irq); ++ ++ for_each_child_of_node(dev->of_node, child) ++ of_platform_device_create(child, NULL, dev); ++ ++ return 0; ++} ++ ++static void dockchannel_remove(struct platform_device *pdev) ++{ ++ struct dockchannel_common *dcc = platform_get_drvdata(pdev); ++ int hwirq; ++ ++ device_for_each_child(&pdev->dev, NULL, of_platform_device_destroy); ++ ++ irq_set_chained_handler_and_data(dcc->irq, NULL, NULL); ++ ++ for (hwirq = 0; hwirq < DOCKCHANNEL_MAX_IRQ; hwirq++) ++ irq_dispose_mapping(irq_find_mapping(dcc->domain, hwirq)); ++ ++ irq_domain_remove(dcc->domain); ++ ++ writel_relaxed(0, dcc->irq_base + IRQ_MASK); ++ writel_relaxed(~0, dcc->irq_base + IRQ_FLAG); ++} ++ ++static const struct of_device_id dockchannel_of_match[] = { ++ { .compatible = "apple,dockchannel" }, ++ {}, ++}; ++MODULE_DEVICE_TABLE(of, dockchannel_of_match); ++ ++static struct platform_driver dockchannel_driver = { ++ .driver = { ++ .name = "dockchannel", ++ .of_match_table = dockchannel_of_match, ++ }, ++ .probe = dockchannel_probe, ++ .remove = dockchannel_remove, ++}; ++module_platform_driver(dockchannel_driver); ++ ++MODULE_AUTHOR("Hector Martin "); ++MODULE_LICENSE("Dual MIT/GPL"); ++MODULE_DESCRIPTION("Apple DockChannel driver"); +diff --git a/include/linux/soc/apple/dockchannel.h b/include/linux/soc/apple/dockchannel.h +new file mode 100644 +index 000000000000..111111111111 +--- /dev/null ++++ b/include/linux/soc/apple/dockchannel.h +@@ -0,0 +1,26 @@ ++/* SPDX-License-Identifier: GPL-2.0-only OR MIT */ ++/* ++ * Apple Dockchannel devices ++ * Copyright (C) The Asahi Linux Contributors ++ */ ++#ifndef _LINUX_APPLE_DOCKCHANNEL_H_ ++#define _LINUX_APPLE_DOCKCHANNEL_H_ ++ ++#include ++#include ++#include ++ ++#if IS_ENABLED(CONFIG_APPLE_DOCKCHANNEL) ++ ++struct dockchannel; ++ ++struct dockchannel *dockchannel_init(struct platform_device *pdev); ++ ++int dockchannel_send(struct dockchannel *dockchannel, const void *buf, size_t count); ++int dockchannel_recv(struct dockchannel *dockchannel, void *buf, size_t count); ++int dockchannel_await(struct dockchannel *dockchannel, ++ void (*callback)(void *cookie, size_t avail), ++ void *cookie, size_t count); ++ ++#endif ++#endif +-- +Armbian + +From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001 +From: Hector Martin +Date: Fri, 8 Jul 2022 02:11:21 +0900 +Subject: HID: Add Apple DockChannel HID transport driver + +Apple M2 devices have an MTP coprocessor embedded in the SoC that +handles HID for the integrated touchpad/keyboard, and communicates +over the DockChannel interface. This driver implements this new +interface. + +Signed-off-by: Hector Martin +--- + drivers/hid/Kconfig | 2 + + drivers/hid/Makefile | 4 + + drivers/hid/dockchannel-hid/Kconfig | 14 + + drivers/hid/dockchannel-hid/Makefile | 6 + + drivers/hid/dockchannel-hid/dockchannel-hid.c | 1213 ++++++++++ + 5 files changed, 1239 insertions(+) + +diff --git a/drivers/hid/Kconfig b/drivers/hid/Kconfig +index 111111111111..222222222222 100644 +--- a/drivers/hid/Kconfig ++++ b/drivers/hid/Kconfig +@@ -1452,4 +1452,6 @@ source "drivers/hid/usbhid/Kconfig" + + source "drivers/hid/spi-hid/Kconfig" + ++source "drivers/hid/dockchannel-hid/Kconfig" ++ + endif # HID_SUPPORT +diff --git a/drivers/hid/Makefile b/drivers/hid/Makefile +index 111111111111..222222222222 100644 +--- a/drivers/hid/Makefile ++++ b/drivers/hid/Makefile +@@ -173,8 +173,12 @@ obj-$(CONFIG_INTEL_ISH_HID) += intel-ish-hid/ + + obj-$(CONFIG_AMD_SFH_HID) += amd-sfh-hid/ + ++obj-$(CONFIG_HID_DOCKCHANNEL) += dockchannel-hid/ ++ + obj-$(CONFIG_SPI_HID_APPLE_CORE) += spi-hid/ + ++obj-$(CONFIG_HID_DOCKCHANNEL) += dockchannel-hid/ ++ + obj-$(CONFIG_SURFACE_HID_CORE) += surface-hid/ + + obj-$(CONFIG_INTEL_THC_HID) += intel-thc-hid/ +diff --git a/drivers/hid/dockchannel-hid/Kconfig b/drivers/hid/dockchannel-hid/Kconfig +new file mode 100644 +index 000000000000..111111111111 +--- /dev/null ++++ b/drivers/hid/dockchannel-hid/Kconfig +@@ -0,0 +1,14 @@ ++# SPDX-License-Identifier: GPL-2.0-only OR MIT ++menu "DockChannel HID support" ++ depends on APPLE_DOCKCHANNEL ++ ++config HID_DOCKCHANNEL ++ tristate "HID over DockChannel transport layer for Apple Silicon SoCs" ++ default ARCH_APPLE ++ depends on APPLE_DOCKCHANNEL && INPUT && OF && HID ++ help ++ Say Y here if you use an M2 or later Apple Silicon based laptop. ++ The keyboard and touchpad are HID based devices connected via the ++ proprietary DockChannel interface. ++ ++endmenu +diff --git a/drivers/hid/dockchannel-hid/Makefile b/drivers/hid/dockchannel-hid/Makefile +new file mode 100644 +index 000000000000..111111111111 +--- /dev/null ++++ b/drivers/hid/dockchannel-hid/Makefile +@@ -0,0 +1,6 @@ ++# SPDX-License-Identifier: GPL-2.0-only OR MIT ++# ++# Makefile for DockChannel HID transport drivers ++# ++ ++obj-$(CONFIG_HID_DOCKCHANNEL) += dockchannel-hid.o +diff --git a/drivers/hid/dockchannel-hid/dockchannel-hid.c b/drivers/hid/dockchannel-hid/dockchannel-hid.c +new file mode 100644 +index 000000000000..111111111111 +--- /dev/null ++++ b/drivers/hid/dockchannel-hid/dockchannel-hid.c +@@ -0,0 +1,1213 @@ ++/* ++ * SPDX-License-Identifier: GPL-2.0 OR MIT ++ * ++ * Apple DockChannel HID transport driver ++ * ++ * Copyright The Asahi Linux Contributors ++ */ ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include "../hid-ids.h" ++ ++#define COMMAND_TIMEOUT_MS 1000 ++#define START_TIMEOUT_MS 2000 ++ ++#define MAX_INTERFACES 16 ++ ++/* Data + checksum */ ++#define MAX_PKT_SIZE (0xffff + 4) ++ ++#define DCHID_CHANNEL_CMD 0x11 ++#define DCHID_CHANNEL_REPORT 0x12 ++ ++struct dchid_hdr { ++ u8 hdr_len; ++ u8 channel; ++ u16 length; ++ u8 seq; ++ u8 iface; ++ u16 pad; ++} __packed; ++ ++#define IFACE_COMM 0 ++ ++#define FLAGS_GROUP GENMASK(7, 6) ++#define FLAGS_REQ GENMASK(5, 0) ++ ++#define REQ_SET_REPORT 0 ++#define REQ_GET_REPORT 1 ++ ++struct dchid_subhdr { ++ u8 flags; ++ u8 unk; ++ u16 length; ++ u32 retcode; ++} __packed; ++ ++#define EVENT_GPIO_CMD 0xa0 ++#define EVENT_INIT 0xf0 ++#define EVENT_READY 0xf1 ++ ++struct dchid_init_hdr { ++ u8 type; ++ u8 unk1; ++ u8 unk2; ++ u8 iface; ++ char name[16]; ++ u8 more_packets; ++ u8 unkpad; ++} __packed; ++ ++#define INIT_HID_DESCRIPTOR 0 ++#define INIT_GPIO_REQUEST 1 ++#define INIT_TERMINATOR 2 ++#define INIT_PRODUCT_NAME 7 ++ ++#define CMD_RESET_INTERFACE 0x40 ++#define CMD_SEND_FIRMWARE 0x95 ++#define CMD_ENABLE_INTERFACE 0xb4 ++#define CMD_ACK_GPIO_CMD 0xa1 ++ ++struct dchid_init_block_hdr { ++ u16 type; ++ u16 length; ++} __packed; ++ ++#define MAX_GPIO_NAME 32 ++ ++struct dchid_gpio_request { ++ u16 unk; ++ u16 id; ++ char name[MAX_GPIO_NAME]; ++} __packed; ++ ++struct dchid_gpio_cmd { ++ u8 type; ++ u8 iface; ++ u8 gpio; ++ u8 unk; ++ u8 cmd; ++} __packed; ++ ++struct dchid_gpio_ack { ++ u8 type; ++ u32 retcode; ++ u8 cmd[]; ++} __packed; ++ ++#define STM_REPORT_ID 0x10 ++#define STM_REPORT_SERIAL 0x11 ++#define STM_REPORT_KEYBTYPE 0x14 ++ ++struct dchid_stm_id { ++ u8 unk; ++ u16 vendor_id; ++ u16 product_id; ++ u16 version_number; ++ u8 unk2; ++ u8 unk3; ++ u8 keyboard_type; ++ u8 serial_length; ++ /* Serial follows, but we grab it with a different report. */ ++} __packed; ++ ++#define FW_MAGIC 0x46444948 ++#define FW_VER 1 ++ ++struct fw_header { ++ u32 magic; ++ u32 version; ++ u32 hdr_length; ++ u32 data_length; ++ u32 iface_offset; ++} __packed; ++ ++struct dchid_work { ++ struct work_struct work; ++ struct dchid_iface *iface; ++ ++ struct dchid_hdr hdr; ++ u8 data[]; ++}; ++ ++struct dchid_iface { ++ struct dockchannel_hid *dchid; ++ struct hid_device *hid; ++ struct workqueue_struct *wq; ++ ++ bool creating; ++ struct work_struct create_work; ++ ++ int index; ++ const char *name; ++ const struct device_node *of_node; ++ ++ uint8_t tx_seq; ++ bool deferred; ++ bool starting; ++ bool open; ++ struct completion ready; ++ ++ void *hid_desc; ++ size_t hid_desc_len; ++ ++ struct gpio_desc *gpio; ++ char gpio_name[MAX_GPIO_NAME]; ++ int gpio_id; ++ ++ struct mutex out_mutex; ++ u32 out_flags; ++ int out_report; ++ u32 retcode; ++ void *resp_buf; ++ size_t resp_size; ++ struct completion out_complete; ++ ++ u32 keyboard_layout_id; ++}; ++ ++struct dockchannel_hid { ++ struct device *dev; ++ struct dockchannel *dc; ++ struct device_link *helper_link; ++ ++ bool id_ready; ++ struct dchid_stm_id device_id; ++ char serial[64]; ++ ++ struct dchid_iface *comm; ++ struct dchid_iface *ifaces[MAX_INTERFACES]; ++ ++ u8 pkt_buf[MAX_PKT_SIZE]; ++ ++ /* Workqueue to asynchronously create HID devices */ ++ struct workqueue_struct *new_iface_wq; ++}; ++ ++static ssize_t apple_layout_id_show(struct device *dev, ++ struct device_attribute *attr, ++ char *buf) ++{ ++ struct hid_device *hdev = to_hid_device(dev); ++ struct dchid_iface *iface = hdev->driver_data; ++ ++ return scnprintf(buf, PAGE_SIZE, "%d\n", iface->keyboard_layout_id); ++} ++ ++static DEVICE_ATTR_RO(apple_layout_id); ++ ++static struct dchid_iface * ++dchid_get_interface(struct dockchannel_hid *dchid, int index, const char *name) ++{ ++ struct dchid_iface *iface; ++ ++ if (index >= MAX_INTERFACES) { ++ dev_err(dchid->dev, "Interface index %d out of range\n", index); ++ return NULL; ++ } ++ ++ if (dchid->ifaces[index]) ++ return dchid->ifaces[index]; ++ ++ iface = devm_kzalloc(dchid->dev, sizeof(struct dchid_iface), GFP_KERNEL); ++ if (!iface) ++ return NULL; ++ ++ iface->index = index; ++ iface->name = devm_kstrdup(dchid->dev, name, GFP_KERNEL); ++ iface->dchid = dchid; ++ iface->out_report= -1; ++ init_completion(&iface->out_complete); ++ init_completion(&iface->ready); ++ mutex_init(&iface->out_mutex); ++ iface->wq = alloc_ordered_workqueue("dchid-%s", WQ_MEM_RECLAIM, iface->name); ++ if (!iface->wq) ++ return NULL; ++ ++ /* Comm is not a HID subdevice */ ++ if (!strcmp(name, "comm")) { ++ dchid->ifaces[index] = iface; ++ return iface; ++ } ++ ++ iface->of_node = of_get_child_by_name(dchid->dev->of_node, name); ++ if (!iface->of_node) { ++ dev_warn(dchid->dev, "No OF node for subdevice %s, ignoring.", name); ++ return NULL; ++ } ++ ++ dchid->ifaces[index] = iface; ++ return iface; ++} ++ ++static u32 dchid_checksum(void *p, size_t length) ++{ ++ u32 sum = 0; ++ ++ while (length >= 4) { ++ sum += get_unaligned_le32(p); ++ p += 4; ++ length -= 4; ++ } ++ ++ WARN_ON_ONCE(length); ++ return sum; ++} ++ ++static int dchid_send(struct dchid_iface *iface, u32 flags, void *msg, size_t size) ++{ ++ u32 checksum = 0xffffffff; ++ size_t wsize = round_down(size, 4); ++ size_t tsize = size - wsize; ++ int ret; ++ struct { ++ struct dchid_hdr hdr; ++ struct dchid_subhdr sub; ++ } __packed h; ++ ++ memset(&h, 0, sizeof(h)); ++ h.hdr.hdr_len = sizeof(h.hdr); ++ h.hdr.channel = DCHID_CHANNEL_CMD; ++ h.hdr.length = round_up(size, 4) + sizeof(h.sub); ++ h.hdr.seq = iface->tx_seq; ++ h.hdr.iface = iface->index; ++ h.sub.flags = flags; ++ h.sub.length = size; ++ ++ ret = dockchannel_send(iface->dchid->dc, &h, sizeof(h)); ++ if (ret < 0) ++ return ret; ++ checksum -= dchid_checksum(&h, sizeof(h)); ++ ++ ret = dockchannel_send(iface->dchid->dc, msg, wsize); ++ if (ret < 0) ++ return ret; ++ checksum -= dchid_checksum(msg, wsize); ++ ++ if (tsize) { ++ u8 tail[4] = {0, 0, 0, 0}; ++ ++ memcpy(tail, msg + wsize, tsize); ++ ret = dockchannel_send(iface->dchid->dc, tail, sizeof(tail)); ++ if (ret < 0) ++ return ret; ++ checksum -= dchid_checksum(tail, sizeof(tail)); ++ } ++ ++ ret = dockchannel_send(iface->dchid->dc, &checksum, sizeof(checksum)); ++ if (ret < 0) ++ return ret; ++ ++ return 0; ++} ++ ++static int dchid_cmd(struct dchid_iface *iface, u32 type, u32 req, ++ void *data, size_t size, void *resp_buf, size_t resp_size) ++{ ++ int ret; ++ int report_id = *(u8*)data; ++ ++ mutex_lock(&iface->out_mutex); ++ ++ WARN_ON(iface->out_report != -1); ++ iface->out_report = report_id; ++ iface->out_flags = FIELD_PREP(FLAGS_GROUP, type) | FIELD_PREP(FLAGS_REQ, req); ++ iface->resp_buf = resp_buf; ++ iface->resp_size = resp_size; ++ reinit_completion(&iface->out_complete); ++ ++ ret = dchid_send(iface, iface->out_flags, data, size); ++ if (ret < 0) ++ goto done; ++ ++ if (!wait_for_completion_timeout(&iface->out_complete, msecs_to_jiffies(COMMAND_TIMEOUT_MS))) { ++ dev_err(iface->dchid->dev, "output report 0x%x to iface %d (%s) timed out\n", ++ report_id, iface->index, iface->name); ++ ret = -ETIMEDOUT; ++ goto done; ++ } ++ ++ ret = iface->resp_size; ++ if (iface->retcode) { ++ dev_err(iface->dchid->dev, ++ "output report 0x%x to iface %d (%s) failed with err 0x%x\n", ++ report_id, iface->index, iface->name, iface->retcode); ++ ret = -EIO; ++ } ++ ++done: ++ iface->tx_seq++; ++ iface->out_report = -1; ++ iface->out_flags = 0; ++ iface->resp_buf = NULL; ++ iface->resp_size = 0; ++ mutex_unlock(&iface->out_mutex); ++ return ret; ++} ++ ++static int dchid_comm_cmd(struct dockchannel_hid *dchid, void *cmd, size_t size) ++{ ++ return dchid_cmd(dchid->comm, HID_FEATURE_REPORT, REQ_SET_REPORT, cmd, size, NULL, 0); ++} ++ ++static int dchid_enable_interface(struct dchid_iface *iface) ++{ ++ u8 msg[] = { CMD_ENABLE_INTERFACE, iface->index }; ++ ++ return dchid_comm_cmd(iface->dchid, msg, sizeof(msg)); ++} ++ ++static int dchid_reset_interface(struct dchid_iface *iface, int state) ++{ ++ u8 msg[] = { CMD_RESET_INTERFACE, 1, iface->index, state }; ++ ++ return dchid_comm_cmd(iface->dchid, msg, sizeof(msg)); ++} ++ ++static int dchid_send_firmware(struct dchid_iface *iface, void *firmware, size_t size) ++{ ++ struct { ++ u8 cmd; ++ u8 unk1; ++ u8 unk2; ++ u8 iface; ++ u64 addr; ++ u32 size; ++ } __packed msg = { ++ .cmd = CMD_SEND_FIRMWARE, ++ .unk1 = 2, ++ .unk2 = 0, ++ .iface = iface->index, ++ .size = size, ++ }; ++ dma_addr_t addr; ++ void *buf = dmam_alloc_coherent(iface->dchid->dev, size, &addr, GFP_KERNEL); ++ ++ if (IS_ERR_OR_NULL(buf)) ++ return buf ? PTR_ERR(buf) : -ENOMEM; ++ ++ msg.addr = addr; ++ memcpy(buf, firmware, size); ++ wmb(); ++ ++ return dchid_comm_cmd(iface->dchid, &msg, sizeof(msg)); ++} ++ ++static int dchid_get_firmware(struct dchid_iface *iface, void **firmware, size_t *size) ++{ ++ int ret; ++ const char *fw_name; ++ const struct firmware *fw; ++ struct fw_header *hdr; ++ u8 *fw_data; ++ ++ ret = of_property_read_string(iface->of_node, "firmware-name", &fw_name); ++ if (ret) { ++ /* Firmware is only for some devices */ ++ *firmware = NULL; ++ *size = 0; ++ return 0; ++ } ++ ++ ret = request_firmware(&fw, fw_name, iface->dchid->dev); ++ if (ret) ++ return ret; ++ ++ hdr = (struct fw_header *)fw->data; ++ ++ if (hdr->magic != FW_MAGIC || hdr->version != FW_VER || ++ hdr->hdr_length < sizeof(*hdr) || hdr->hdr_length > fw->size || ++ (hdr->hdr_length + (size_t)hdr->data_length) > fw->size || ++ hdr->iface_offset >= hdr->data_length) { ++ dev_warn(iface->dchid->dev, "%s: invalid firmware header\n", ++ fw_name); ++ ret = -EINVAL; ++ goto done; ++ } ++ ++ fw_data = devm_kmemdup(iface->dchid->dev, fw->data + hdr->hdr_length, ++ hdr->data_length, GFP_KERNEL); ++ if (!fw_data) { ++ ret = -ENOMEM; ++ goto done; ++ } ++ ++ if (hdr->iface_offset) ++ fw_data[hdr->iface_offset] = iface->index; ++ ++ *firmware = fw_data; ++ *size = hdr->data_length; ++ ++done: ++ release_firmware(fw); ++ return ret; ++} ++ ++static int dchid_request_gpio(struct dchid_iface *iface) ++{ ++ char prop_name[MAX_GPIO_NAME + 16]; ++ ++ if (iface->gpio) ++ return 0; ++ ++ dev_info(iface->dchid->dev, "Requesting GPIO %s#%d: %s\n", ++ iface->name, iface->gpio_id, iface->gpio_name); ++ ++ snprintf(prop_name, sizeof(prop_name), "apple,%s", iface->gpio_name); ++ ++ iface->gpio = devm_gpiod_get_index(iface->dchid->dev, prop_name, 0, GPIOD_OUT_LOW); ++ ++ if (IS_ERR_OR_NULL(iface->gpio)) { ++ dev_err(iface->dchid->dev, "Failed to request GPIO %s-gpios\n", prop_name); ++ iface->gpio = NULL; ++ return -1; ++ } ++ ++ return 0; ++} ++ ++static int dchid_start_interface(struct dchid_iface *iface) ++{ ++ void *fw; ++ size_t size; ++ int ret; ++ ++ if (iface->starting) { ++ dev_warn(iface->dchid->dev, "Interface %s is already starting", iface->name); ++ return -EINPROGRESS; ++ } ++ ++ dev_info(iface->dchid->dev, "Starting interface %s\n", iface->name); ++ ++ iface->starting = true; ++ ++ /* Look to see if we need firmware */ ++ ret = dchid_get_firmware(iface, &fw, &size); ++ if (ret < 0) ++ goto err; ++ ++ /* If we need a GPIO, make sure we have it. */ ++ if (iface->gpio_id) { ++ ret = dchid_request_gpio(iface); ++ if (ret < 0) ++ goto err; ++ } ++ ++ /* Only multi-touch has firmware */ ++ if (fw && size) { ++ ++ /* Send firmware to the device */ ++ dev_info(iface->dchid->dev, "Sending firmware for %s\n", iface->name); ++ ret = dchid_send_firmware(iface, fw, size); ++ if (ret < 0) { ++ dev_err(iface->dchid->dev, "Failed to send %s firmwareS", iface->name); ++ goto err; ++ } ++ ++ /* After loading firmware, multi-touch needs a reset */ ++ dev_info(iface->dchid->dev, "Resetting %s\n", iface->name); ++ dchid_reset_interface(iface, 0); ++ dchid_reset_interface(iface, 2); ++ } ++ ++ return 0; ++ ++err: ++ iface->starting = false; ++ return ret; ++} ++ ++static int dchid_start(struct hid_device *hdev) ++{ ++ struct dchid_iface *iface = hdev->driver_data; ++ ++ if (iface->keyboard_layout_id) { ++ int ret = device_create_file(&hdev->dev, &dev_attr_apple_layout_id); ++ if (ret) { ++ dev_warn(iface->dchid->dev, "Failed to create apple_layout_id: %d", ret); ++ iface->keyboard_layout_id = 0; ++ } ++ } ++ ++ return 0; ++}; ++ ++static void dchid_stop(struct hid_device *hdev) ++{ ++ struct dchid_iface *iface = hdev->driver_data; ++ ++ if (iface->keyboard_layout_id) ++ device_remove_file(&hdev->dev, &dev_attr_apple_layout_id); ++} ++ ++static int dchid_open(struct hid_device *hdev) ++{ ++ struct dchid_iface *iface = hdev->driver_data; ++ int ret; ++ ++ if (!completion_done(&iface->ready)) { ++ ret = dchid_start_interface(iface); ++ if (ret < 0) ++ return ret; ++ ++ if (!wait_for_completion_timeout(&iface->ready, msecs_to_jiffies(START_TIMEOUT_MS))) { ++ dev_err(iface->dchid->dev, "iface %s start timed out\n", iface->name); ++ return -ETIMEDOUT; ++ } ++ } ++ ++ iface->open = true; ++ return 0; ++} ++ ++static void dchid_close(struct hid_device *hdev) ++{ ++ struct dchid_iface *iface = hdev->driver_data; ++ ++ iface->open = false; ++} ++ ++static int dchid_parse(struct hid_device *hdev) ++{ ++ struct dchid_iface *iface = hdev->driver_data; ++ ++ return hid_parse_report(hdev, iface->hid_desc, iface->hid_desc_len); ++} ++ ++/* Note: buf excludes report number! For ease of fetching strings/etc. */ ++static int dchid_get_report_cmd(struct dchid_iface *iface, u8 reportnum, void *buf, size_t len) ++{ ++ int ret = dchid_cmd(iface, HID_FEATURE_REPORT, REQ_GET_REPORT, &reportnum, 1, buf, len); ++ ++ return ret <= 0 ? ret : ret - 1; ++} ++ ++/* Note: buf includes report number! */ ++static int dchid_set_report(struct dchid_iface *iface, void *buf, size_t len) ++{ ++ return dchid_cmd(iface, HID_OUTPUT_REPORT, REQ_SET_REPORT, buf, len, NULL, 0); ++} ++ ++static int dchid_raw_request(struct hid_device *hdev, ++ unsigned char reportnum, __u8 *buf, size_t len, ++ unsigned char rtype, int reqtype) ++{ ++ struct dchid_iface *iface = hdev->driver_data; ++ ++ switch (reqtype) { ++ case HID_REQ_GET_REPORT: ++ buf[0] = reportnum; ++ return dchid_cmd(iface, rtype, REQ_GET_REPORT, &reportnum, 1, buf + 1, len - 1); ++ case HID_REQ_SET_REPORT: ++ return dchid_set_report(iface, buf, len); ++ default: ++ return -EIO; ++ } ++ ++ return 0; ++} ++ ++static struct hid_ll_driver dchid_ll = { ++ .start = &dchid_start, ++ .stop = &dchid_stop, ++ .open = &dchid_open, ++ .close = &dchid_close, ++ .parse = &dchid_parse, ++ .raw_request = &dchid_raw_request, ++}; ++ ++static void dchid_create_interface_work(struct work_struct *ws) ++{ ++ struct dchid_iface *iface = container_of(ws, struct dchid_iface, create_work); ++ struct dockchannel_hid *dchid = iface->dchid; ++ struct hid_device *hid; ++ int ret; ++ ++ if (iface->hid) { ++ dev_warn(dchid->dev, "Interface %s already created!\n", ++ iface->name); ++ return; ++ } ++ ++ dev_info(dchid->dev, "New interface %s\n", iface->name); ++ ++ /* Start the interface. This is not the entire init process, as firmware is loaded later on device open. */ ++ ret = dchid_enable_interface(iface); ++ if (ret < 0) { ++ dev_warn(dchid->dev, "Failed to enable %s: %d\n", iface->name, ret); ++ return; ++ } ++ ++ iface->deferred = false; ++ ++ hid = hid_allocate_device(); ++ if (IS_ERR(hid)) ++ return; ++ ++ snprintf(hid->name, sizeof(hid->name), "Apple MTP %s", iface->name); ++ snprintf(hid->phys, sizeof(hid->phys), "%s.%d (%s)", ++ dev_name(dchid->dev), iface->index, iface->name); ++ strscpy(hid->uniq, dchid->serial, sizeof(hid->uniq)); ++ ++ hid->ll_driver = &dchid_ll; ++ hid->bus = BUS_HOST; ++ hid->vendor = dchid->device_id.vendor_id; ++ hid->product = dchid->device_id.product_id; ++ hid->version = dchid->device_id.version_number; ++ hid->type = HID_TYPE_OTHER; ++ if (!strcmp(iface->name, "multi-touch")) { ++ hid->type = HID_TYPE_SPI_MOUSE; ++ } else if (!strcmp(iface->name, "keyboard")) { ++ u32 country_code = 0; ++ ++ hid->type = HID_TYPE_SPI_KEYBOARD; ++ ++ /* ++ * We have to get the country code from the device tree, since the ++ * device provides no reliable way to get this info. ++ */ ++ if (!of_property_read_u32(iface->of_node, "hid-country-code", &country_code)) ++ hid->country = country_code; ++ ++ of_property_read_u32(iface->of_node, "apple,keyboard-layout-id", ++ &iface->keyboard_layout_id); ++ } ++ ++ hid->dev.parent = iface->dchid->dev; ++ hid->driver_data = iface; ++ ++ iface->hid = hid; ++ ++ ret = hid_add_device(hid); ++ if (ret < 0) { ++ iface->hid = NULL; ++ hid_destroy_device(hid); ++ dev_warn(iface->dchid->dev, "Failed to register hid device %s", iface->name); ++ } ++} ++ ++static int dchid_create_interface(struct dchid_iface *iface) ++{ ++ if (iface->creating) ++ return -EBUSY; ++ ++ iface->creating = true; ++ INIT_WORK(&iface->create_work, dchid_create_interface_work); ++ return queue_work(iface->dchid->new_iface_wq, &iface->create_work); ++} ++ ++static void dchid_handle_descriptor(struct dchid_iface *iface, void *hid_desc, size_t desc_len) ++{ ++ if (iface->hid) { ++ dev_warn(iface->dchid->dev, "Tried to initialize already started interface %s!\n", ++ iface->name); ++ return; ++ } ++ ++ iface->hid_desc = devm_kmemdup(iface->dchid->dev, hid_desc, desc_len, GFP_KERNEL); ++ if (!iface->hid_desc) ++ return; ++ ++ iface->hid_desc_len = desc_len; ++} ++ ++static void dchid_handle_ready(struct dockchannel_hid *dchid, void *data, size_t length) ++{ ++ struct dchid_iface *iface; ++ u8 *pkt = data; ++ u8 index; ++ int i, ret; ++ ++ if (length < 2) { ++ dev_err(dchid->dev, "Bad length for ready message: %zu\n", length); ++ return; ++ } ++ ++ index = pkt[1]; ++ ++ if (index >= MAX_INTERFACES) { ++ dev_err(dchid->dev, "Got ready notification for bad iface %d\n", index); ++ return; ++ } ++ ++ iface = dchid->ifaces[index]; ++ if (!iface) { ++ dev_err(dchid->dev, "Got ready notification for unknown iface %d\n", index); ++ return; ++ } ++ ++ dev_info(dchid->dev, "Interface %s is now ready\n", iface->name); ++ complete_all(&iface->ready); ++ ++ /* When STM is ready, grab global device info */ ++ if (!strcmp(iface->name, "stm")) { ++ ret = dchid_get_report_cmd(iface, STM_REPORT_ID, &dchid->device_id, ++ sizeof(dchid->device_id)); ++ if (ret < sizeof(dchid->device_id)) { ++ dev_warn(iface->dchid->dev, "Failed to get device ID from STM!\n"); ++ /* Fake it and keep going. Things might still work... */ ++ memset(&dchid->device_id, 0, sizeof(dchid->device_id)); ++ dchid->device_id.vendor_id = HOST_VENDOR_ID_APPLE; ++ } ++ ret = dchid_get_report_cmd(iface, STM_REPORT_SERIAL, dchid->serial, ++ sizeof(dchid->serial) - 1); ++ if (ret < 0) { ++ dev_warn(iface->dchid->dev, "Failed to get serial from STM!\n"); ++ dchid->serial[0] = 0; ++ } ++ ++ dchid->id_ready = true; ++ for (i = 0; i < MAX_INTERFACES; i++) { ++ if (!dchid->ifaces[i] || !dchid->ifaces[i]->deferred) ++ continue; ++ dchid_create_interface(dchid->ifaces[i]); ++ } ++ } ++} ++ ++static void dchid_handle_init(struct dockchannel_hid *dchid, void *data, size_t length) ++{ ++ struct dchid_init_hdr *hdr = data; ++ struct dchid_iface *iface; ++ struct dchid_init_block_hdr *blk; ++ ++ if (length < sizeof(*hdr)) ++ return; ++ ++ iface = dchid_get_interface(dchid, hdr->iface, hdr->name); ++ if (!iface) ++ return; ++ ++ data += sizeof(*hdr); ++ length -= sizeof(*hdr); ++ ++ while (length >= sizeof(*blk)) { ++ blk = data; ++ data += sizeof(*blk); ++ length -= sizeof(*blk); ++ ++ if (blk->length > length) ++ break; ++ ++ switch (blk->type) { ++ case INIT_HID_DESCRIPTOR: ++ dchid_handle_descriptor(iface, data, blk->length); ++ break; ++ ++ case INIT_GPIO_REQUEST: { ++ struct dchid_gpio_request *req = data; ++ ++ if (sizeof(*req) > length) ++ break; ++ ++ if (iface->gpio_id) { ++ dev_err(dchid->dev, ++ "Cannot request more than one GPIO per interface!\n"); ++ break; ++ } ++ ++ strscpy(iface->gpio_name, req->name, MAX_GPIO_NAME); ++ iface->gpio_id = req->id; ++ break; ++ } ++ ++ case INIT_TERMINATOR: ++ break; ++ ++ case INIT_PRODUCT_NAME: { ++ char *product = data; ++ ++ if (product[blk->length - 1] != 0) { ++ dev_warn(dchid->dev, "Unterminated product name for %s\n", ++ iface->name); ++ } else { ++ dev_info(dchid->dev, "Product name for %s: %s\n", ++ iface->name, product); ++ } ++ break; ++ } ++ ++ default: ++ dev_warn(dchid->dev, "Unknown init packet %d for %s\n", ++ blk->type, iface->name); ++ break; ++ } ++ ++ data += blk->length; ++ length -= blk->length; ++ ++ if (blk->type == INIT_TERMINATOR) ++ break; ++ } ++ ++ if (hdr->more_packets) ++ return; ++ ++ /* We need to enable STM first, since it'll give us the device IDs */ ++ if (iface->dchid->id_ready || !strcmp(iface->name, "stm")) { ++ dchid_create_interface(iface); ++ } else { ++ iface->deferred = true; ++ } ++} ++ ++static void dchid_handle_gpio(struct dockchannel_hid *dchid, void *data, size_t length) ++{ ++ struct dchid_gpio_cmd *cmd = data; ++ struct dchid_iface *iface; ++ u32 retcode = 0xe000f00d; /* Give it a random Apple-style error code */ ++ struct dchid_gpio_ack *ack; ++ ++ if (length < sizeof(*cmd)) ++ return; ++ ++ if (cmd->iface >= MAX_INTERFACES || !(iface = dchid->ifaces[cmd->iface])) { ++ dev_err(dchid->dev, "Got GPIO command for bad inteface %d\n", cmd->iface); ++ goto err; ++ } ++ ++ if (dchid_request_gpio(iface) < 0) ++ goto err; ++ ++ if (!iface->gpio || cmd->gpio != iface->gpio_id) { ++ dev_err(dchid->dev, "Got GPIO command for bad GPIO %s#%d\n", ++ iface->name, cmd->gpio); ++ goto err; ++ } ++ ++ dev_info(dchid->dev, "GPIO command: %s#%d: %d\n", iface->name, cmd->gpio, cmd->cmd); ++ ++ switch (cmd->cmd) { ++ case 3: ++ /* Pulse. */ ++ gpiod_set_value_cansleep(iface->gpio, 1); ++ msleep(10); /* Random guess... */ ++ gpiod_set_value_cansleep(iface->gpio, 0); ++ retcode = 0; ++ break; ++ default: ++ dev_err(dchid->dev, "Unknown GPIO command %d\n", cmd->cmd ); ++ break; ++ } ++ ++err: ++ /* Ack it */ ++ ack = kzalloc(sizeof(*ack) + length, GFP_KERNEL); ++ if (!ack) ++ return; ++ ++ ack->type = CMD_ACK_GPIO_CMD; ++ ack->retcode = retcode; ++ memcpy(ack->cmd, data, length); ++ ++ if (dchid_comm_cmd(dchid, ack, sizeof(*ack) + length) < 0) ++ dev_err(dchid->dev, "Failed to ACK GPIO command\n"); ++ ++ kfree(ack); ++} ++ ++static void dchid_handle_event(struct dockchannel_hid *dchid, void *data, size_t length) ++{ ++ u8 *p = data; ++ switch (*p) { ++ case EVENT_INIT: ++ dchid_handle_init(dchid, data, length); ++ break; ++ case EVENT_READY: ++ dchid_handle_ready(dchid, data, length); ++ break; ++ case EVENT_GPIO_CMD: ++ dchid_handle_gpio(dchid, data, length); ++ break; ++ } ++} ++ ++static void dchid_handle_report(struct dchid_iface *iface, void *data, size_t length) ++{ ++ struct dockchannel_hid *dchid = iface->dchid; ++ ++ if (!iface->hid) { ++ dev_warn(dchid->dev, "Report received but %s is not initialized!\n", iface->name); ++ return; ++ } ++ ++ if (!iface->open) ++ return; ++ ++ hid_input_report(iface->hid, HID_INPUT_REPORT, data, length, 1); ++} ++ ++static void dchid_packet_work(struct work_struct *ws) ++{ ++ struct dchid_work *work = container_of(ws, struct dchid_work, work); ++ struct dchid_subhdr *shdr = (void *)work->data; ++ struct dockchannel_hid *dchid = work->iface->dchid; ++ int type = FIELD_GET(FLAGS_GROUP, shdr->flags); ++ u8 *payload = work->data + sizeof(*shdr); ++ ++ if (shdr->length + sizeof(*shdr) > work->hdr.length) { ++ dev_err(dchid->dev, "Bad sub header length (%d > %zu)\n", ++ shdr->length, work->hdr.length - sizeof(*shdr)); ++ return; ++ } ++ ++ switch (type) { ++ case HID_INPUT_REPORT: ++ if (work->hdr.iface == IFACE_COMM) ++ dchid_handle_event(dchid, payload, shdr->length); ++ else ++ dchid_handle_report(work->iface, payload, shdr->length); ++ break; ++ default: ++ dev_err(dchid->dev, "Received unknown packet type %d\n", type); ++ break; ++ } ++ ++ kfree(work); ++} ++ ++static void dchid_handle_ack(struct dchid_iface *iface, struct dchid_hdr *hdr, void *data) ++{ ++ struct dchid_subhdr *shdr = (void *)data; ++ u8 *payload = data + sizeof(*shdr); ++ ++ if (shdr->length + sizeof(*shdr) > hdr->length) { ++ dev_err(iface->dchid->dev, "Bad sub header length (%d > %ld)\n", ++ shdr->length, hdr->length - sizeof(*shdr)); ++ return; ++ } ++ if (shdr->flags != iface->out_flags) { ++ dev_err(iface->dchid->dev, ++ "Received unexpected flags 0x%x on ACK channel (expFected 0x%x)\n", ++ shdr->flags, iface->out_flags); ++ return; ++ } ++ ++ if (shdr->length < 1) { ++ dev_err(iface->dchid->dev, "Received length 0 output report ack\n"); ++ return; ++ } ++ if (iface->tx_seq != hdr->seq) { ++ dev_err(iface->dchid->dev, "Received ACK with bad seq (expected %d, got %d)\n", ++ iface->tx_seq, hdr->seq); ++ return; ++ } ++ if (iface->out_report != payload[0]) { ++ dev_err(iface->dchid->dev, "Received ACK with bad report (expected %d, got %d\n", ++ iface->out_report, payload[0]); ++ return; ++ } ++ ++ if (iface->resp_buf && iface->resp_size) ++ memcpy(iface->resp_buf, payload + 1, min((size_t)shdr->length - 1, iface->resp_size)); ++ ++ iface->resp_size = shdr->length; ++ iface->out_report = -1; ++ iface->retcode = shdr->retcode; ++ complete(&iface->out_complete); ++} ++ ++static void dchid_handle_packet(void *cookie, size_t avail) ++{ ++ struct dockchannel_hid *dchid = cookie; ++ struct dchid_hdr hdr; ++ struct dchid_work *work; ++ struct dchid_iface *iface; ++ u32 checksum; ++ ++ if (dockchannel_recv(dchid->dc, &hdr, sizeof(hdr)) != sizeof(hdr)) { ++ dev_err(dchid->dev, "Read failed (header)\n"); ++ return; ++ } ++ ++ if (hdr.hdr_len != sizeof(hdr)) { ++ dev_err(dchid->dev, "Bad header length %d\n", hdr.hdr_len); ++ goto done; ++ } ++ ++ if (dockchannel_recv(dchid->dc, dchid->pkt_buf, hdr.length + 4) != (hdr.length + 4)) { ++ dev_err(dchid->dev, "Read failed (body)\n"); ++ goto done; ++ } ++ ++ checksum = dchid_checksum(&hdr, sizeof(hdr)); ++ checksum += dchid_checksum(dchid->pkt_buf, hdr.length + 4); ++ ++ if (checksum != 0xffffffff) { ++ dev_err(dchid->dev, "Checksum mismatch (iface %d): 0x%08x != 0xffffffff\n", ++ hdr.iface, checksum); ++ goto done; ++ } ++ ++ ++ if (hdr.iface >= MAX_INTERFACES) { ++ dev_err(dchid->dev, "Bad iface %d\n", hdr.iface); ++ } ++ ++ iface = dchid->ifaces[hdr.iface]; ++ ++ if (!iface) { ++ dev_err(dchid->dev, "Received packet for uninitialized iface %d\n", hdr.iface); ++ goto done; ++ } ++ ++ switch (hdr.channel) { ++ case DCHID_CHANNEL_CMD: ++ dchid_handle_ack(iface, &hdr, dchid->pkt_buf); ++ goto done; ++ case DCHID_CHANNEL_REPORT: ++ break; ++ default: ++ dev_warn(dchid->dev, "Unknown channel 0x%x, treating as report...\n", ++ hdr.channel); ++ break; ++ } ++ ++ work = kzalloc(sizeof(*work) + hdr.length, GFP_KERNEL); ++ if (!work) ++ return; ++ ++ work->hdr = hdr; ++ work->iface = iface; ++ memcpy(work->data, dchid->pkt_buf, hdr.length); ++ INIT_WORK(&work->work, dchid_packet_work); ++ ++ queue_work(iface->wq, &work->work); ++ ++done: ++ dockchannel_await(dchid->dc, dchid_handle_packet, dchid, sizeof(struct dchid_hdr)); ++} ++ ++static int dockchannel_hid_probe(struct platform_device *pdev) ++{ ++ struct device *dev = &pdev->dev; ++ struct dockchannel_hid *dchid; ++ struct device_node *child, *helper; ++ struct platform_device *helper_pdev; ++ struct property *prop; ++ int ret; ++ ++ ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)); ++ if (ret) ++ return ret; ++ ++ dchid = devm_kzalloc(dev, sizeof(*dchid), GFP_KERNEL); ++ if (!dchid) { ++ return -ENOMEM; ++ } ++ ++ dchid->dev = dev; ++ ++ /* ++ * First make sure all the GPIOs are available, in cased we need to defer. ++ * This is necessary because MTP will request them by name later, and by then ++ * it's too late to defer the probe. ++ */ ++ ++ for_each_child_of_node(dev->of_node, child) { ++ for_each_property_of_node(child, prop) { ++ size_t len = strlen(prop->name); ++ struct gpio_desc *gpio; ++ ++ if (len < 12 || strncmp("apple,", prop->name, 6) || ++ strcmp("-gpios", prop->name + len - 6)) ++ continue; ++ ++ gpio = fwnode_gpiod_get_index(&child->fwnode, prop->name, 0, GPIOD_ASIS, ++ prop->name); ++ if (IS_ERR_OR_NULL(gpio)) { ++ if (PTR_ERR(gpio) == -EPROBE_DEFER) { ++ of_node_put(child); ++ return -EPROBE_DEFER; ++ } ++ } else { ++ gpiod_put(gpio); ++ } ++ } ++ } ++ ++ /* ++ * Make sure we also have the MTP coprocessor available, and ++ * defer probe if the helper hasn't probed yet. ++ */ ++ helper = of_parse_phandle(dev->of_node, "apple,helper-cpu", 0); ++ if (!helper) { ++ dev_err(dev, "Missing apple,helper-cpu property"); ++ return -EINVAL; ++ } ++ ++ helper_pdev = of_find_device_by_node(helper); ++ of_node_put(helper); ++ if (!helper_pdev) { ++ dev_err(dev, "Failed to find helper device"); ++ return -EINVAL; ++ } ++ ++ dchid->helper_link = device_link_add(dev, &helper_pdev->dev, ++ DL_FLAG_AUTOREMOVE_CONSUMER); ++ put_device(&helper_pdev->dev); ++ if (!dchid->helper_link) { ++ dev_err(dev, "Failed to link to helper device"); ++ return -EINVAL; ++ } ++ ++ if (dchid->helper_link->supplier->links.status != DL_DEV_DRIVER_BOUND) ++ return -EPROBE_DEFER; ++ ++ /* Now it is safe to begin initializing */ ++ dchid->dc = dockchannel_init(pdev); ++ if (IS_ERR_OR_NULL(dchid->dc)) { ++ return PTR_ERR(dchid->dc); ++ } ++ dchid->new_iface_wq = alloc_workqueue("dchid-new", WQ_MEM_RECLAIM, 0); ++ if (!dchid->new_iface_wq) ++ return -ENOMEM; ++ ++ dchid->comm = dchid_get_interface(dchid, IFACE_COMM, "comm"); ++ if (!dchid->comm) { ++ dev_err(dchid->dev, "Failed to initialize comm interface"); ++ return -EIO; ++ } ++ ++ dev_info(dchid->dev, "Initialized, awaiting packets\n"); ++ dockchannel_await(dchid->dc, dchid_handle_packet, dchid, sizeof(struct dchid_hdr)); ++ ++ return 0; ++} ++ ++static void dockchannel_hid_remove(struct platform_device *pdev) ++{ ++ BUG_ON(1); ++} ++ ++static const struct of_device_id dockchannel_hid_of_match[] = { ++ { .compatible = "apple,dockchannel-hid" }, ++ {}, ++}; ++MODULE_DEVICE_TABLE(of, dockchannel_hid_of_match); ++MODULE_FIRMWARE("apple/tpmtfw-*.bin"); ++ ++static struct platform_driver dockchannel_hid_driver = { ++ .driver = { ++ .name = "dockchannel-hid", ++ .of_match_table = dockchannel_hid_of_match, ++ }, ++ .probe = dockchannel_hid_probe, ++ .remove = dockchannel_hid_remove, ++}; ++module_platform_driver(dockchannel_hid_driver); ++ ++MODULE_DESCRIPTION("Apple DockChannel HID transport driver"); ++MODULE_AUTHOR("Hector Martin "); ++MODULE_LICENSE("Dual MIT/GPL"); +-- +Armbian + +From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001 +From: Hector Martin +Date: Sun, 3 Jul 2022 23:33:37 +0900 +Subject: soc: apple: Add RTKit helper driver + +This driver can be used for coprocessors that do some background task or +communicate out-of-band, and do not do any mailbox I/O beyond the +standard RTKit initialization. + +Signed-off-by: Hector Martin +--- + drivers/soc/apple/Kconfig | 14 + + drivers/soc/apple/Makefile | 3 + + drivers/soc/apple/rtkit-helper.c | 151 ++++++++++ + 3 files changed, 168 insertions(+) + +diff --git a/drivers/soc/apple/Kconfig b/drivers/soc/apple/Kconfig +index 111111111111..222222222222 100644 +--- a/drivers/soc/apple/Kconfig ++++ b/drivers/soc/apple/Kconfig +@@ -38,6 +38,20 @@ config APPLE_RTKIT + + Say 'y' here if you have an Apple SoC. + ++config APPLE_RTKIT_HELPER ++ tristate "Apple Generic RTKit helper co-processor" ++ depends on APPLE_RTKIT ++ depends on ARCH_APPLE || COMPILE_TEST ++ default ARCH_APPLE ++ help ++ Apple SoCs such as the M1 come with various co-processors running ++ their proprietary RTKit operating system. This option enables support ++ for a generic co-processor that does not implement any additional ++ in-band communications. It can be used for testing purposes, or for ++ coprocessors such as MTP that communicate over a different interface. ++ ++ Say 'y' here if you have an Apple SoC. ++ + config APPLE_SART + tristate "Apple SART DMA address filter" + depends on ARCH_APPLE || COMPILE_TEST +diff --git a/drivers/soc/apple/Makefile b/drivers/soc/apple/Makefile +index 111111111111..222222222222 100644 +--- a/drivers/soc/apple/Makefile ++++ b/drivers/soc/apple/Makefile +@@ -9,5 +9,8 @@ apple-mailbox-y = mailbox.o + obj-$(CONFIG_APPLE_RTKIT) += apple-rtkit.o + apple-rtkit-y = rtkit.o rtkit-crashlog.o + ++obj-$(CONFIG_APPLE_RTKIT_HELPER) += apple-rtkit-helper.o ++apple-rtkit-helper-y = rtkit-helper.o ++ + obj-$(CONFIG_APPLE_SART) += apple-sart.o + apple-sart-y = sart.o +diff --git a/drivers/soc/apple/rtkit-helper.c b/drivers/soc/apple/rtkit-helper.c +new file mode 100644 +index 000000000000..111111111111 +--- /dev/null ++++ b/drivers/soc/apple/rtkit-helper.c +@@ -0,0 +1,151 @@ ++// SPDX-License-Identifier: GPL-2.0-only OR MIT ++/* ++ * Apple Generic RTKit helper coprocessor ++ * Copyright The Asahi Linux Contributors ++ */ ++ ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++ ++#define APPLE_ASC_CPU_CONTROL 0x44 ++#define APPLE_ASC_CPU_CONTROL_RUN BIT(4) ++ ++struct apple_rtkit_helper { ++ struct device *dev; ++ struct apple_rtkit *rtk; ++ ++ void __iomem *asc_base; ++ ++ struct resource *sram; ++ void __iomem *sram_base; ++}; ++ ++static int apple_rtkit_helper_shmem_setup(void *cookie, struct apple_rtkit_shmem *bfr) ++{ ++ struct apple_rtkit_helper *helper = cookie; ++ struct resource res = { ++ .start = bfr->iova, ++ .end = bfr->iova + bfr->size - 1, ++ .name = "rtkit_map", ++ }; ++ ++ if (!bfr->iova) { ++ bfr->buffer = dma_alloc_coherent(helper->dev, bfr->size, ++ &bfr->iova, GFP_KERNEL); ++ if (!bfr->buffer) ++ return -ENOMEM; ++ return 0; ++ } ++ ++ if (!helper->sram) { ++ dev_err(helper->dev, ++ "RTKit buffer request with no SRAM region: %pR", &res); ++ return -EFAULT; ++ } ++ ++ res.flags = helper->sram->flags; ++ ++ if (res.end < res.start || !resource_contains(helper->sram, &res)) { ++ dev_err(helper->dev, ++ "RTKit buffer request outside SRAM region: %pR", &res); ++ return -EFAULT; ++ } ++ ++ bfr->iomem = helper->sram_base + (res.start - helper->sram->start); ++ bfr->is_mapped = true; ++ ++ return 0; ++} ++ ++static void apple_rtkit_helper_shmem_destroy(void *cookie, struct apple_rtkit_shmem *bfr) ++{ ++ // no-op ++} ++ ++static const struct apple_rtkit_ops apple_rtkit_helper_ops = { ++ .shmem_setup = apple_rtkit_helper_shmem_setup, ++ .shmem_destroy = apple_rtkit_helper_shmem_destroy, ++}; ++ ++static int apple_rtkit_helper_probe(struct platform_device *pdev) ++{ ++ struct device *dev = &pdev->dev; ++ struct apple_rtkit_helper *helper; ++ int ret; ++ ++ /* 44 bits for addresses in standard RTKit requests */ ++ ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(44)); ++ if (ret) ++ return ret; ++ ++ helper = devm_kzalloc(dev, sizeof(*helper), GFP_KERNEL); ++ if (!helper) ++ return -ENOMEM; ++ ++ helper->dev = dev; ++ platform_set_drvdata(pdev, helper); ++ ++ helper->asc_base = devm_platform_ioremap_resource_byname(pdev, "asc"); ++ if (IS_ERR(helper->asc_base)) ++ return PTR_ERR(helper->asc_base); ++ ++ helper->sram = platform_get_resource_byname(pdev, IORESOURCE_MEM, "sram"); ++ if (helper->sram) { ++ helper->sram_base = devm_ioremap_resource(dev, helper->sram); ++ if (IS_ERR(helper->sram_base)) ++ return dev_err_probe(dev, PTR_ERR(helper->sram_base), ++ "Failed to map SRAM region"); ++ } ++ ++ helper->rtk = ++ devm_apple_rtkit_init(dev, helper, NULL, 0, &apple_rtkit_helper_ops); ++ if (IS_ERR(helper->rtk)) ++ return dev_err_probe(dev, PTR_ERR(helper->rtk), ++ "Failed to intialize RTKit"); ++ ++ writel_relaxed(APPLE_ASC_CPU_CONTROL_RUN, ++ helper->asc_base + APPLE_ASC_CPU_CONTROL); ++ ++ /* Works for both wake and boot */ ++ ret = apple_rtkit_wake(helper->rtk); ++ if (ret != 0) ++ return dev_err_probe(dev, ret, "Failed to wake up coprocessor"); ++ ++ return 0; ++} ++ ++static void apple_rtkit_helper_remove(struct platform_device *pdev) ++{ ++ struct apple_rtkit_helper *helper = platform_get_drvdata(pdev); ++ ++ if (apple_rtkit_is_running(helper->rtk)) ++ apple_rtkit_quiesce(helper->rtk); ++ ++ writel_relaxed(0, helper->asc_base + APPLE_ASC_CPU_CONTROL); ++} ++ ++static const struct of_device_id apple_rtkit_helper_of_match[] = { ++ { .compatible = "apple,rtk-helper-asc4" }, ++ {}, ++}; ++MODULE_DEVICE_TABLE(of, apple_rtkit_helper_of_match); ++ ++static struct platform_driver apple_rtkit_helper_driver = { ++ .driver = { ++ .name = "rtkit-helper", ++ .of_match_table = apple_rtkit_helper_of_match, ++ }, ++ .probe = apple_rtkit_helper_probe, ++ .remove = apple_rtkit_helper_remove, ++}; ++module_platform_driver(apple_rtkit_helper_driver); ++ ++MODULE_AUTHOR("Hector Martin "); ++MODULE_LICENSE("Dual MIT/GPL"); ++MODULE_DESCRIPTION("Apple RTKit helper driver"); +-- +Armbian + diff --git a/patch/kernel/archive/uefi-x86-6.19/4003-HID-apple-ignore-the-trackpad-on-T2-Macs.patch b/patch/kernel/archive/uefi-x86-6.19/4003-HID-apple-ignore-the-trackpad-on-T2-Macs.patch new file mode 100644 index 000000000000..343ee836edcb --- /dev/null +++ b/patch/kernel/archive/uefi-x86-6.19/4003-HID-apple-ignore-the-trackpad-on-T2-Macs.patch @@ -0,0 +1,81 @@ +From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001 +From: Aditya Garg +Date: Fri, 12 Sep 2025 12:09:01 +0000 +Subject: HID: apple: ignore the trackpad on T2 Macs + +In order to manage the trackpad on T2 Macs by hid-magicmouse driver +we need to ensure that it is not bound by the hid-apple driver. Use +the existing APPLE_IGNORE_MOUSE quirk for the same. + +Signed-off-by: Aditya Garg +--- + drivers/hid/hid-apple.c | 27 ++++++---- + 1 file changed, 17 insertions(+), 10 deletions(-) + +diff --git a/drivers/hid/hid-apple.c b/drivers/hid/hid-apple.c +index 111111111111..222222222222 100644 +--- a/drivers/hid/hid-apple.c ++++ b/drivers/hid/hid-apple.c +@@ -30,7 +30,7 @@ + #include "hid-ids.h" + + #define APPLE_RDESC_JIS BIT(0) +-/* BIT(1) reserved, was: APPLE_IGNORE_MOUSE */ ++#define APPLE_IGNORE_MOUSE BIT(1) + #define APPLE_HAS_FN BIT(2) + /* BIT(3) reserved, was: APPLE_HIDDEV */ + #define APPLE_ISO_TILDE_QUIRK BIT(4) +@@ -952,6 +952,9 @@ static int apple_probe(struct hid_device *hdev, + hdev->type != HID_TYPE_SPI_KEYBOARD) + return -ENODEV; + ++ if (quirks & APPLE_IGNORE_MOUSE && hdev->type == HID_TYPE_USBMOUSE) ++ return -ENODEV; ++ + asc = devm_kzalloc(&hdev->dev, sizeof(*asc), GFP_KERNEL); + if (asc == NULL) { + hid_err(hdev, "can't alloc apple descriptor\n"); +@@ -1174,27 +1177,31 @@ static const struct hid_device_id apple_devices[] = { + { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING9_JIS), + .driver_data = APPLE_HAS_FN | APPLE_RDESC_JIS }, + { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRINGT2_J140K), +- .driver_data = APPLE_HAS_FN | APPLE_BACKLIGHT_CTL | APPLE_ISO_TILDE_QUIRK }, ++ .driver_data = APPLE_HAS_FN | APPLE_BACKLIGHT_CTL | APPLE_ISO_TILDE_QUIRK | ++ APPLE_IGNORE_MOUSE }, + { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRINGT2_J132), + .driver_data = APPLE_HAS_FN | APPLE_BACKLIGHT_CTL | APPLE_ISO_TILDE_QUIRK | +- APPLE_DISABLE_FKEYS }, ++ APPLE_DISABLE_FKEYS | APPLE_IGNORE_MOUSE }, + { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRINGT2_J680), + .driver_data = APPLE_HAS_FN | APPLE_BACKLIGHT_CTL | APPLE_ISO_TILDE_QUIRK | +- APPLE_DISABLE_FKEYS }, ++ APPLE_DISABLE_FKEYS | APPLE_IGNORE_MOUSE }, + { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRINGT2_J680_ALT), + .driver_data = APPLE_HAS_FN | APPLE_BACKLIGHT_CTL | APPLE_ISO_TILDE_QUIRK | +- APPLE_DISABLE_FKEYS }, ++ APPLE_DISABLE_FKEYS | APPLE_IGNORE_MOUSE }, + { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRINGT2_J213), + .driver_data = APPLE_HAS_FN | APPLE_BACKLIGHT_CTL | APPLE_ISO_TILDE_QUIRK | +- APPLE_DISABLE_FKEYS }, ++ APPLE_DISABLE_FKEYS | APPLE_IGNORE_MOUSE }, + { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRINGT2_J214K), +- .driver_data = APPLE_HAS_FN | APPLE_ISO_TILDE_QUIRK | APPLE_DISABLE_FKEYS }, ++ .driver_data = APPLE_HAS_FN | APPLE_ISO_TILDE_QUIRK | APPLE_DISABLE_FKEYS | ++ APPLE_IGNORE_MOUSE }, + { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRINGT2_J223), +- .driver_data = APPLE_HAS_FN | APPLE_ISO_TILDE_QUIRK | APPLE_DISABLE_FKEYS }, ++ .driver_data = APPLE_HAS_FN | APPLE_ISO_TILDE_QUIRK | APPLE_DISABLE_FKEYS | ++ APPLE_IGNORE_MOUSE }, + { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRINGT2_J230K), +- .driver_data = APPLE_HAS_FN | APPLE_ISO_TILDE_QUIRK }, ++ .driver_data = APPLE_HAS_FN | APPLE_ISO_TILDE_QUIRK | APPLE_IGNORE_MOUSE }, + { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRINGT2_J152F), +- .driver_data = APPLE_HAS_FN | APPLE_ISO_TILDE_QUIRK | APPLE_DISABLE_FKEYS }, ++ .driver_data = APPLE_HAS_FN | APPLE_ISO_TILDE_QUIRK | APPLE_DISABLE_FKEYS | ++ APPLE_IGNORE_MOUSE }, + { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_2009_ANSI), + .driver_data = APPLE_NUMLOCK_EMULATION | APPLE_HAS_FN }, + { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_2009_ISO), +-- +Armbian + diff --git a/patch/kernel/archive/uefi-x86-6.19/4004-HID-magicmouse-Add-support-for-trackpads-found-on-T2.patch b/patch/kernel/archive/uefi-x86-6.19/4004-HID-magicmouse-Add-support-for-trackpads-found-on-T2.patch new file mode 100644 index 000000000000..1ce0e2d7679e --- /dev/null +++ b/patch/kernel/archive/uefi-x86-6.19/4004-HID-magicmouse-Add-support-for-trackpads-found-on-T2.patch @@ -0,0 +1,417 @@ +From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001 +From: Aditya Garg +Date: Tue, 11 Mar 2025 18:44:06 +0530 +Subject: HID: magicmouse: Add support for trackpads found on T2 Macs + +This patch adds support for trackpads found on Macs with the T2 +Security Chip. The touch report format differs from other trackpads. +It is the same format as type 4 in bcm5974.c + +Signed-off-by: Aditya Garg +--- + drivers/hid/hid-magicmouse.c | 296 +++++++++- + 1 file changed, 275 insertions(+), 21 deletions(-) + +diff --git a/drivers/hid/hid-magicmouse.c b/drivers/hid/hid-magicmouse.c +index 111111111111..222222222222 100644 +--- a/drivers/hid/hid-magicmouse.c ++++ b/drivers/hid/hid-magicmouse.c +@@ -118,6 +118,105 @@ MODULE_PARM_DESC(report_undeciphered, "Report undeciphered multi-touch state fie + #define TRACKPAD2_RES_Y \ + ((TRACKPAD2_MAX_Y - TRACKPAD2_MIN_Y) / (TRACKPAD2_DIMENSION_Y / 100)) + ++#define J140K_TP_DIMENSION_X (float)12100 ++#define J140K_TP_MIN_X -5318 ++#define J140K_TP_MAX_X 5787 ++#define J140K_TP_RES_X \ ++ ((J140K_TP_MAX_X - J140K_TP_MIN_X) / (J140K_TP_DIMENSION_X / 100)) ++#define J140K_TP_DIMENSION_Y (float)8200 ++#define J140K_TP_MIN_Y -157 ++#define J140K_TP_MAX_Y 7102 ++#define J140K_TP_RES_Y \ ++ ((J140K_TP_MAX_Y - J140K_TP_MIN_Y) / (J140K_TP_DIMENSION_Y / 100)) ++ ++#define J132_TP_DIMENSION_X (float)13500 ++#define J132_TP_MIN_X -6243 ++#define J132_TP_MAX_X 6749 ++#define J132_TP_RES_X \ ++ ((J132_TP_MAX_X - J132_TP_MIN_X) / (J132_TP_DIMENSION_X / 100)) ++#define J132_TP_DIMENSION_Y (float)8400 ++#define J132_TP_MIN_Y -170 ++#define J132_TP_MAX_Y 7685 ++#define J132_TP_RES_Y \ ++ ((J132_TP_MAX_Y - J132_TP_MIN_Y) / (J132_TP_DIMENSION_Y / 100)) ++ ++#define J680_TP_DIMENSION_X (float)16000 ++#define J680_TP_MIN_X -7456 ++#define J680_TP_MAX_X 7976 ++#define J680_TP_RES_X \ ++ ((J680_TP_MAX_X - J680_TP_MIN_X) / (J680_TP_DIMENSION_X / 100)) ++#define J680_TP_DIMENSION_Y (float)10000 ++#define J680_TP_MIN_Y -163 ++#define J680_TP_MAX_Y 9283 ++#define J680_TP_RES_Y \ ++ ((J680_TP_MAX_Y - J680_TP_MIN_Y) / (J680_TP_DIMENSION_Y / 100)) ++ ++#define J680_ALT_TP_DIMENSION_X (float)16000 ++#define J680_ALT_TP_MIN_X -7456 ++#define J680_ALT_TP_MAX_X 7976 ++#define J680_ALT_TP_RES_X \ ++ ((J680_ALT_TP_MAX_X - J680_ALT_TP_MIN_X) / (J680_ALT_TP_DIMENSION_X / 100)) ++#define J680_ALT_TP_DIMENSION_Y (float)10000 ++#define J680_ALT_TP_MIN_Y -163 ++#define J680_ALT_TP_MAX_Y 9283 ++#define J680_ALT_TP_RES_Y \ ++ ((J680_ALT_TP_MAX_Y - J680_ALT_TP_MIN_Y) / (J680_ALT_TP_DIMENSION_Y / 100)) ++ ++#define J213_TP_DIMENSION_X (float)13500 ++#define J213_TP_MIN_X -6243 ++#define J213_TP_MAX_X 6749 ++#define J213_TP_RES_X \ ++ ((J213_TP_MAX_X - J213_TP_MIN_X) / (J213_TP_DIMENSION_X / 100)) ++#define J213_TP_DIMENSION_Y (float)8400 ++#define J213_TP_MIN_Y -170 ++#define J213_TP_MAX_Y 7685 ++#define J213_TP_RES_Y \ ++ ((J213_TP_MAX_Y - J213_TP_MIN_Y) / (J213_TP_DIMENSION_Y / 100)) ++ ++#define J214K_TP_DIMENSION_X (float)13200 ++#define J214K_TP_MIN_X -6046 ++#define J214K_TP_MAX_X 6536 ++#define J214K_TP_RES_X \ ++ ((J214K_TP_MAX_X - J214K_TP_MIN_X) / (J214K_TP_DIMENSION_X / 100)) ++#define J214K_TP_DIMENSION_Y (float)8200 ++#define J214K_TP_MIN_Y -164 ++#define J214K_TP_MAX_Y 7439 ++#define J214K_TP_RES_Y \ ++ ((J214K_TP_MAX_Y - J214K_TP_MIN_Y) / (J214K_TP_DIMENSION_Y / 100)) ++ ++#define J223_TP_DIMENSION_X (float)13200 ++#define J223_TP_MIN_X -6046 ++#define J223_TP_MAX_X 6536 ++#define J223_TP_RES_X \ ++ ((J223_TP_MAX_X - J223_TP_MIN_X) / (J223_TP_DIMENSION_X / 100)) ++#define J223_TP_DIMENSION_Y (float)8200 ++#define J223_TP_MIN_Y -164 ++#define J223_TP_MAX_Y 7439 ++#define J223_TP_RES_Y \ ++ ((J223_TP_MAX_Y - J223_TP_MIN_Y) / (J223_TP_DIMENSION_Y / 100)) ++ ++#define J230K_TP_DIMENSION_X (float)12100 ++#define J230K_TP_MIN_X -5318 ++#define J230K_TP_MAX_X 5787 ++#define J230K_TP_RES_X \ ++ ((J230K_TP_MAX_X - J230K_TP_MIN_X) / (J230K_TP_DIMENSION_X / 100)) ++#define J230K_TP_DIMENSION_Y (float)8200 ++#define J230K_TP_MIN_Y -157 ++#define J230K_TP_MAX_Y 7102 ++#define J230K_TP_RES_Y \ ++ ((J230K_TP_MAX_Y - J230K_TP_MIN_Y) / (J230K_TP_DIMENSION_Y / 100)) ++ ++#define J152F_TP_DIMENSION_X (float)16000 ++#define J152F_TP_MIN_X -7456 ++#define J152F_TP_MAX_X 7976 ++#define J152F_TP_RES_X \ ++ ((J152F_TP_MAX_X - J152F_TP_MIN_X) / (J152F_TP_DIMENSION_X / 100)) ++#define J152F_TP_DIMENSION_Y (float)10000 ++#define J152F_TP_MIN_Y -163 ++#define J152F_TP_MAX_Y 9283 ++#define J152F_TP_RES_Y \ ++ ((J152F_TP_MAX_Y - J152F_TP_MIN_Y) / (J152F_TP_DIMENSION_Y / 100)) ++ + /* These are fallback values, since the real values will be queried from the device. */ + #define J314_TP_DIMENSION_X (float)13000 + #define J314_TP_MIN_X -5900 +@@ -130,7 +229,11 @@ MODULE_PARM_DESC(report_undeciphered, "Report undeciphered multi-touch state fie + #define J314_TP_RES_Y \ + ((J314_TP_MAX_Y - J314_TP_MIN_Y) / (J314_TP_DIMENSION_Y / 100)) + +-#define J314_TP_MAX_FINGER_ORIENTATION 16384 ++#define T2_TOUCHPAD_ENTRY(model) \ ++ { USB_DEVICE_ID_APPLE_WELLSPRINGT2_##model, model##_TP_MIN_X, model##_TP_MIN_Y, \ ++model##_TP_MAX_X, model##_TP_MAX_Y, model##_TP_RES_X, model##_TP_RES_Y } ++ ++#define INTERNAL_TP_MAX_FINGER_ORIENTATION 16384 + + struct magicmouse_input_ops { + int (*raw_event)(struct hid_device *hdev, +@@ -733,7 +836,7 @@ static void report_finger_data(struct input_dev *input, int slot, + input_report_abs(input, ABS_MT_WIDTH_MINOR, + le16_to_int(f->tool_minor) << 1); + input_report_abs(input, ABS_MT_ORIENTATION, +- J314_TP_MAX_FINGER_ORIENTATION - le16_to_int(f->orientation)); ++ INTERNAL_TP_MAX_FINGER_ORIENTATION - le16_to_int(f->orientation)); + input_report_abs(input, ABS_MT_PRESSURE, le16_to_int(f->pressure)); + input_report_abs(input, ABS_MT_POSITION_X, pos->x); + input_report_abs(input, ABS_MT_POSITION_Y, pos->y); +@@ -821,6 +924,20 @@ static int magicmouse_raw_event_spi(struct hid_device *hdev, + return magicmouse_raw_event_mtp(hdev, report, data + hdr_sz, size - hdr_sz); + } + ++static int magicmouse_raw_event_t2(struct hid_device *hdev, ++ struct hid_report *report, u8 *data, int size) ++{ ++ const size_t hdr_sz = sizeof(struct tp_mouse_report); ++ ++ if (!size) ++ return 0; ++ ++ if (data[0] != TRACKPAD2_USB_REPORT_ID || size < hdr_sz) ++ return 0; ++ ++ return magicmouse_raw_event_mtp(hdev, report, data + hdr_sz, size - hdr_sz); ++} ++ + static int magicmouse_event(struct hid_device *hdev, struct hid_field *field, + struct hid_usage *usage, __s32 value) + { +@@ -1018,8 +1135,32 @@ static int magicmouse_setup_input_usb(struct input_dev *input, + return 0; + } + +-static int magicmouse_setup_input_mtp(struct input_dev *input, +- struct hid_device *hdev) ++struct magicmouse_t2_properties { ++ u32 id; ++ int min_x; ++ int min_y; ++ int max_x; ++ int max_y; ++ int res_x; ++ int res_y; ++}; ++ ++static const struct magicmouse_t2_properties magicmouse_t2_configs[] = { ++ T2_TOUCHPAD_ENTRY(J140K), ++ T2_TOUCHPAD_ENTRY(J132), ++ T2_TOUCHPAD_ENTRY(J680), ++ T2_TOUCHPAD_ENTRY(J680_ALT), ++ T2_TOUCHPAD_ENTRY(J213), ++ T2_TOUCHPAD_ENTRY(J214K), ++ T2_TOUCHPAD_ENTRY(J223), ++ T2_TOUCHPAD_ENTRY(J230K), ++ T2_TOUCHPAD_ENTRY(J152F), ++}; ++ ++static int magicmouse_setup_input_int_tpd(struct input_dev *input, ++ struct hid_device *hdev, int min_x, int min_y, ++ int max_x, int max_y, int res_x, int res_y, ++ bool query_dimensions) + { + int error; + int mt_flags = 0; +@@ -1060,19 +1201,17 @@ static int magicmouse_setup_input_mtp(struct input_dev *input, + input_abs_set_res(input, ABS_MT_PRESSURE, 1); + + /* finger orientation */ +- input_set_abs_params(input, ABS_MT_ORIENTATION, -J314_TP_MAX_FINGER_ORIENTATION, +- J314_TP_MAX_FINGER_ORIENTATION, 0, 0); ++ input_set_abs_params(input, ABS_MT_ORIENTATION, -INTERNAL_TP_MAX_FINGER_ORIENTATION, ++ INTERNAL_TP_MAX_FINGER_ORIENTATION, 0, 0); + + /* finger position */ +- input_set_abs_params(input, ABS_MT_POSITION_X, J314_TP_MIN_X, J314_TP_MAX_X, +- 0, 0); ++ input_set_abs_params(input, ABS_MT_POSITION_X, min_x, max_x, 0, 0); + /* Y axis is inverted */ +- input_set_abs_params(input, ABS_MT_POSITION_Y, -J314_TP_MAX_Y, -J314_TP_MIN_Y, +- 0, 0); ++ input_set_abs_params(input, ABS_MT_POSITION_Y, -max_y, -min_y, 0, 0); + + /* X/Y resolution */ +- input_abs_set_res(input, ABS_MT_POSITION_X, J314_TP_RES_X); +- input_abs_set_res(input, ABS_MT_POSITION_Y, J314_TP_RES_Y); ++ input_abs_set_res(input, ABS_MT_POSITION_X, res_x); ++ input_abs_set_res(input, ABS_MT_POSITION_Y, res_y); + + input_set_events_per_packet(input, 60); + +@@ -1099,7 +1238,20 @@ static int magicmouse_setup_input_mtp(struct input_dev *input, + */ + input->open = magicmouse_open; + input->close = magicmouse_close; +- msc->query_dimensions = true; ++ msc->query_dimensions = query_dimensions; ++ ++ return 0; ++} ++ ++static int magicmouse_setup_input_mtp(struct input_dev *input, ++ struct hid_device *hdev) ++{ ++ int ret = magicmouse_setup_input_int_tpd(input, hdev, J314_TP_MIN_X, ++ J314_TP_MIN_Y, J314_TP_MAX_X, ++ J314_TP_MAX_Y, J314_TP_RES_X, ++ J314_TP_RES_Y, true); ++ if (ret) ++ return ret; + + return 0; + } +@@ -1107,7 +1259,34 @@ static int magicmouse_setup_input_mtp(struct input_dev *input, + static int magicmouse_setup_input_spi(struct input_dev *input, + struct hid_device *hdev) + { +- int ret = magicmouse_setup_input_mtp(input, hdev); ++ int ret = magicmouse_setup_input_int_tpd(input, hdev, J314_TP_MIN_X, ++ J314_TP_MIN_Y, J314_TP_MAX_X, ++ J314_TP_MAX_Y, J314_TP_RES_X, ++ J314_TP_RES_Y, true); ++ if (ret) ++ return ret; ++ ++ return 0; ++} ++ ++static int magicmouse_setup_input_t2(struct input_dev *input, ++ struct hid_device *hdev) ++{ ++ int min_x, min_y, max_x, max_y, res_x, res_y; ++ ++ for (size_t i = 0; i < ARRAY_SIZE(magicmouse_t2_configs); i++) { ++ if (magicmouse_t2_configs[i].id == hdev->product) { ++ min_x = magicmouse_t2_configs[i].min_x; ++ min_y = magicmouse_t2_configs[i].min_y; ++ max_x = magicmouse_t2_configs[i].max_x; ++ max_y = magicmouse_t2_configs[i].max_y; ++ res_x = magicmouse_t2_configs[i].res_x; ++ res_y = magicmouse_t2_configs[i].res_y; ++ } ++ } ++ ++ int ret = magicmouse_setup_input_int_tpd(input, hdev, min_x, min_y, ++ max_x, max_y, res_x, res_y, false); + if (ret) + return ret; + +@@ -1180,6 +1359,18 @@ static int magicmouse_enable_multitouch(struct hid_device *hdev) + feature = feature_mt_trackpad2_usb; + } + break; ++ case USB_DEVICE_ID_APPLE_WELLSPRINGT2_J140K: ++ case USB_DEVICE_ID_APPLE_WELLSPRINGT2_J132: ++ case USB_DEVICE_ID_APPLE_WELLSPRINGT2_J680: ++ case USB_DEVICE_ID_APPLE_WELLSPRINGT2_J680_ALT: ++ case USB_DEVICE_ID_APPLE_WELLSPRINGT2_J213: ++ case USB_DEVICE_ID_APPLE_WELLSPRINGT2_J214K: ++ case USB_DEVICE_ID_APPLE_WELLSPRINGT2_J223: ++ case USB_DEVICE_ID_APPLE_WELLSPRINGT2_J230K: ++ case USB_DEVICE_ID_APPLE_WELLSPRINGT2_J152F: ++ feature_size = sizeof(feature_mt_trackpad2_usb); ++ feature = feature_mt_trackpad2_usb; ++ break; + case USB_DEVICE_ID_APPLE_MAGICMOUSE2: + case USB_DEVICE_ID_APPLE_MAGICMOUSE2_USBC: + feature_size = sizeof(feature_mt_mouse2); +@@ -1273,8 +1464,23 @@ static int magicmouse_probe(struct hid_device *hdev, + int ret; + + if ((id->bus == BUS_SPI || id->bus == BUS_HOST) && id->vendor == SPI_VENDOR_ID_APPLE && +- hdev->type != HID_TYPE_SPI_MOUSE) +- return -ENODEV; ++ hdev->type != HID_TYPE_SPI_MOUSE) ++ return -ENODEV; ++ ++ switch (id->product) { ++ case USB_DEVICE_ID_APPLE_WELLSPRINGT2_J140K: ++ case USB_DEVICE_ID_APPLE_WELLSPRINGT2_J132: ++ case USB_DEVICE_ID_APPLE_WELLSPRINGT2_J680: ++ case USB_DEVICE_ID_APPLE_WELLSPRINGT2_J680_ALT: ++ case USB_DEVICE_ID_APPLE_WELLSPRINGT2_J213: ++ case USB_DEVICE_ID_APPLE_WELLSPRINGT2_J214K: ++ case USB_DEVICE_ID_APPLE_WELLSPRINGT2_J223: ++ case USB_DEVICE_ID_APPLE_WELLSPRINGT2_J230K: ++ case USB_DEVICE_ID_APPLE_WELLSPRINGT2_J152F: ++ if (hdev->type != HID_TYPE_USBMOUSE) ++ return -ENODEV; ++ break; ++ } + + msc = devm_kzalloc(&hdev->dev, sizeof(*msc), GFP_KERNEL); + if (msc == NULL) { +@@ -1284,15 +1490,33 @@ static int magicmouse_probe(struct hid_device *hdev, + + // internal trackpad use a data format use input ops to avoid + // conflicts with the report ID. +- if (id->bus == BUS_HOST) { ++ switch (id->bus) { ++ case BUS_HOST: + msc->input_ops.raw_event = magicmouse_raw_event_mtp; + msc->input_ops.setup_input = magicmouse_setup_input_mtp; +- } else if (id->bus == BUS_SPI) { ++ break; ++ case BUS_SPI: + msc->input_ops.raw_event = magicmouse_raw_event_spi; + msc->input_ops.setup_input = magicmouse_setup_input_spi; +- } else { +- msc->input_ops.raw_event = magicmouse_raw_event_usb; +- msc->input_ops.setup_input = magicmouse_setup_input_usb; ++ break; ++ default: ++ switch (id->product) { ++ case USB_DEVICE_ID_APPLE_WELLSPRINGT2_J140K: ++ case USB_DEVICE_ID_APPLE_WELLSPRINGT2_J132: ++ case USB_DEVICE_ID_APPLE_WELLSPRINGT2_J680: ++ case USB_DEVICE_ID_APPLE_WELLSPRINGT2_J680_ALT: ++ case USB_DEVICE_ID_APPLE_WELLSPRINGT2_J213: ++ case USB_DEVICE_ID_APPLE_WELLSPRINGT2_J214K: ++ case USB_DEVICE_ID_APPLE_WELLSPRINGT2_J223: ++ case USB_DEVICE_ID_APPLE_WELLSPRINGT2_J230K: ++ case USB_DEVICE_ID_APPLE_WELLSPRINGT2_J152F: ++ msc->input_ops.raw_event = magicmouse_raw_event_t2; ++ msc->input_ops.setup_input = magicmouse_setup_input_t2; ++ break; ++ default: ++ msc->input_ops.raw_event = magicmouse_raw_event_usb; ++ msc->input_ops.setup_input = magicmouse_setup_input_usb; ++ } + } + + msc->scroll_accel = SCROLL_ACCEL_DEFAULT; +@@ -1353,6 +1577,18 @@ static int magicmouse_probe(struct hid_device *hdev, + TRACKPAD2_USB_REPORT_ID, 0); + } + break; ++ case USB_DEVICE_ID_APPLE_WELLSPRINGT2_J140K: ++ case USB_DEVICE_ID_APPLE_WELLSPRINGT2_J132: ++ case USB_DEVICE_ID_APPLE_WELLSPRINGT2_J680: ++ case USB_DEVICE_ID_APPLE_WELLSPRINGT2_J680_ALT: ++ case USB_DEVICE_ID_APPLE_WELLSPRINGT2_J213: ++ case USB_DEVICE_ID_APPLE_WELLSPRINGT2_J214K: ++ case USB_DEVICE_ID_APPLE_WELLSPRINGT2_J223: ++ case USB_DEVICE_ID_APPLE_WELLSPRINGT2_J230K: ++ case USB_DEVICE_ID_APPLE_WELLSPRINGT2_J152F: ++ report = hid_register_report(hdev, HID_INPUT_REPORT, ++ TRACKPAD2_USB_REPORT_ID, 0); ++ break; + case HID_ANY_ID: + switch (id->bus) { + case BUS_HOST: +@@ -1464,6 +1700,24 @@ static const struct hid_device_id magic_mice[] = { + USB_DEVICE_ID_APPLE_MAGICTRACKPAD2_USBC), .driver_data = 0 }, + { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, + USB_DEVICE_ID_APPLE_MAGICTRACKPAD2_USBC), .driver_data = 0 }, ++ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, ++ USB_DEVICE_ID_APPLE_WELLSPRINGT2_J140K), .driver_data = 0 }, ++ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, ++ USB_DEVICE_ID_APPLE_WELLSPRINGT2_J132), .driver_data = 0 }, ++ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, ++ USB_DEVICE_ID_APPLE_WELLSPRINGT2_J680), .driver_data = 0 }, ++ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, ++ USB_DEVICE_ID_APPLE_WELLSPRINGT2_J680_ALT), .driver_data = 0 }, ++ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, ++ USB_DEVICE_ID_APPLE_WELLSPRINGT2_J213), .driver_data = 0 }, ++ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, ++ USB_DEVICE_ID_APPLE_WELLSPRINGT2_J214K), .driver_data = 0 }, ++ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, ++ USB_DEVICE_ID_APPLE_WELLSPRINGT2_J223), .driver_data = 0 }, ++ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, ++ USB_DEVICE_ID_APPLE_WELLSPRINGT2_J230K), .driver_data = 0 }, ++ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, ++ USB_DEVICE_ID_APPLE_WELLSPRINGT2_J152F), .driver_data = 0 }, + { HID_SPI_DEVICE(SPI_VENDOR_ID_APPLE, HID_ANY_ID), + .driver_data = 0 }, + { HID_DEVICE(BUS_HOST, HID_GROUP_ANY, HOST_VENDOR_ID_APPLE, +-- +Armbian + diff --git a/patch/kernel/archive/uefi-x86-6.19/4005-HID-magicmouse-fix-regression-breaking-support-for-M.patch b/patch/kernel/archive/uefi-x86-6.19/4005-HID-magicmouse-fix-regression-breaking-support-for-M.patch new file mode 100644 index 000000000000..26a59fbfda76 --- /dev/null +++ b/patch/kernel/archive/uefi-x86-6.19/4005-HID-magicmouse-fix-regression-breaking-support-for-M.patch @@ -0,0 +1,53 @@ +From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001 +From: Aditya Garg +Date: Wed, 8 Oct 2025 01:48:18 +0000 +Subject: HID: magicmouse: fix regression breaking support for Magic Trackpad 1 + +The case HID_ANY_ID and default are technically the same, but the first +one was assigning no report to the Magic Trackpad 1, while the second +one assigns the correct report. Since the first case is matched first, +the Magic Trackpad 1 was not being assigned any report, breaking +support for it. + +Signed-off-by: Aditya Garg +--- + drivers/hid/hid-magicmouse.c | 15 ++++------ + 1 file changed, 6 insertions(+), 9 deletions(-) + +diff --git a/drivers/hid/hid-magicmouse.c b/drivers/hid/hid-magicmouse.c +index 111111111111..222222222222 100644 +--- a/drivers/hid/hid-magicmouse.c ++++ b/drivers/hid/hid-magicmouse.c +@@ -1589,7 +1589,7 @@ static int magicmouse_probe(struct hid_device *hdev, + report = hid_register_report(hdev, HID_INPUT_REPORT, + TRACKPAD2_USB_REPORT_ID, 0); + break; +- case HID_ANY_ID: ++ default: + switch (id->bus) { + case BUS_HOST: + report = hid_register_report(hdev, HID_INPUT_REPORT, MTP_REPORT_ID, 0); +@@ -1597,15 +1597,12 @@ static int magicmouse_probe(struct hid_device *hdev, + case BUS_SPI: + report = hid_register_report(hdev, HID_INPUT_REPORT, SPI_REPORT_ID, 0); + break; +- default: +- break; ++ default: /* USB_DEVICE_ID_APPLE_MAGICTRACKPAD */ ++ report = hid_register_report(hdev, HID_INPUT_REPORT, ++ TRACKPAD_REPORT_ID, 0); ++ report = hid_register_report(hdev, HID_INPUT_REPORT, ++ DOUBLE_REPORT_ID, 0); + } +- break; +- default: /* USB_DEVICE_ID_APPLE_MAGICTRACKPAD */ +- report = hid_register_report(hdev, HID_INPUT_REPORT, +- TRACKPAD_REPORT_ID, 0); +- report = hid_register_report(hdev, HID_INPUT_REPORT, +- DOUBLE_REPORT_ID, 0); + } + + if (!report) { +-- +Armbian + diff --git a/patch/kernel/archive/uefi-x86-6.19/7001-drm-i915-fbdev-Discard-BIOS-framebuffers-exceeding-h.patch b/patch/kernel/archive/uefi-x86-6.19/7001-drm-i915-fbdev-Discard-BIOS-framebuffers-exceeding-h.patch new file mode 100644 index 000000000000..d4b01d7bb819 --- /dev/null +++ b/patch/kernel/archive/uefi-x86-6.19/7001-drm-i915-fbdev-Discard-BIOS-framebuffers-exceeding-h.patch @@ -0,0 +1,34 @@ +From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001 +From: Ashish Arora +Date: Sat, 8 Jan 2022 21:43:18 +1100 +Subject: drm/i915: Discard large BIOS framebuffers causing display corruption + +On certain 4k panels, the BIOS framebuffer is larger than what panel +requires causing display corruption. Introduce a check for the same. + +Signed-off-by: Ashish Arora +--- + drivers/gpu/drm/i915/display/intel_fbdev.c | 6 +++--- + 1 file changed, 3 insertions(+), 3 deletions(-) + +diff --git a/drivers/gpu/drm/i915/display/intel_fbdev.c b/drivers/gpu/drm/i915/display/intel_fbdev.c +index 111111111111..222222222222 100644 +--- a/drivers/gpu/drm/i915/display/intel_fbdev.c ++++ b/drivers/gpu/drm/i915/display/intel_fbdev.c +@@ -278,10 +278,10 @@ int intel_fbdev_driver_fbdev_probe(struct drm_fb_helper *helper, + ifbdev->fb = NULL; + + if (fb && +- (sizes->fb_width > fb->base.width || +- sizes->fb_height > fb->base.height)) { ++ (sizes->fb_width != fb->base.width || ++ sizes->fb_height != fb->base.height)) { + drm_dbg_kms(display->drm, +- "BIOS fb too small (%dx%d), we require (%dx%d)," ++ "BIOS fb not valid (%dx%d), we require (%dx%d)," + " releasing it\n", + fb->base.width, fb->base.height, + sizes->fb_width, sizes->fb_height); +-- +Armbian + diff --git a/patch/kernel/archive/wsl2-arm64-6.1/1666-Hyper-V-ARM64-Always-use-the-Hyper-V-hypercall-interface.patch b/patch/kernel/archive/wsl2-arm64-6.1/1666-Hyper-V-ARM64-Always-use-the-Hyper-V-hypercall-interface.patch deleted file mode 100644 index 6e573327fdab..000000000000 --- a/patch/kernel/archive/wsl2-arm64-6.1/1666-Hyper-V-ARM64-Always-use-the-Hyper-V-hypercall-interface.patch +++ /dev/null @@ -1,239 +0,0 @@ -From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001 -From: Sunil Muthuswamy -Date: Mon, 3 May 2021 14:17:52 -0700 -Subject: Hyper-V: ARM64: Always use the Hyper-V hypercall interface - -This patch forces the use of the Hyper-V hypercall interface, -instead of the architectural SMCCC interface on ARM64 because -not all versions of Windows support the SMCCC interface. All -versions of Windows will support the Hyper-V hypercall interface, -so this change should be both forward and backward compatible. - -Signed-off-by: Sunil Muthuswamy - -[tyhicks: Forward ported to v5.15] -Signed-off-by: Tyler Hicks -[kms: Forward ported to v6.1] -Signed-off-by: Kelsey Steele ---- - arch/arm64/hyperv/Makefile | 2 +- - arch/arm64/hyperv/hv_core.c | 57 ++++----- - arch/arm64/hyperv/hv_hvc.S | 61 ++++++++++ - arch/arm64/include/asm/mshyperv.h | 4 + - 4 files changed, 91 insertions(+), 33 deletions(-) - -diff --git a/arch/arm64/hyperv/Makefile b/arch/arm64/hyperv/Makefile -index 111111111111..222222222222 100644 ---- a/arch/arm64/hyperv/Makefile -+++ b/arch/arm64/hyperv/Makefile -@@ -1,2 +1,2 @@ - # SPDX-License-Identifier: GPL-2.0 --obj-y := hv_core.o mshyperv.o -+obj-y := hv_core.o mshyperv.o hv_hvc.o -diff --git a/arch/arm64/hyperv/hv_core.c b/arch/arm64/hyperv/hv_core.c -index 111111111111..222222222222 100644 ---- a/arch/arm64/hyperv/hv_core.c -+++ b/arch/arm64/hyperv/hv_core.c -@@ -23,16 +23,13 @@ - */ - u64 hv_do_hypercall(u64 control, void *input, void *output) - { -- struct arm_smccc_res res; - u64 input_address; - u64 output_address; - - input_address = input ? virt_to_phys(input) : 0; - output_address = output ? virt_to_phys(output) : 0; - -- arm_smccc_1_1_hvc(HV_FUNC_ID, control, -- input_address, output_address, &res); -- return res.a0; -+ return hv_do_hvc(control, input_address, output_address); - } - EXPORT_SYMBOL_GPL(hv_do_hypercall); - -@@ -41,27 +38,33 @@ EXPORT_SYMBOL_GPL(hv_do_hypercall); - * with arguments in registers instead of physical memory. - * Avoids the overhead of virt_to_phys for simple hypercalls. - */ -- - u64 hv_do_fast_hypercall8(u16 code, u64 input) - { -- struct arm_smccc_res res; - u64 control; - - control = (u64)code | HV_HYPERCALL_FAST_BIT; -- -- arm_smccc_1_1_hvc(HV_FUNC_ID, control, input, &res); -- return res.a0; -+ return hv_do_hvc(control, input); - } - EXPORT_SYMBOL_GPL(hv_do_fast_hypercall8); - -+union hv_hypercall_status { -+ u64 as_uint64; -+ struct { -+ u16 status; -+ u16 reserved; -+ u16 reps_completed; /* Low 12 bits */ -+ u16 reserved2; -+ }; -+}; -+ - /* - * Set a single VP register to a 64-bit value. - */ - void hv_set_vpreg(u32 msr, u64 value) - { -- struct arm_smccc_res res; -+ union hv_hypercall_status status; - -- arm_smccc_1_1_hvc(HV_FUNC_ID, -+ status.as_uint64 = hv_do_hvc( - HVCALL_SET_VP_REGISTERS | HV_HYPERCALL_FAST_BIT | - HV_HYPERCALL_REP_COMP_1, - HV_PARTITION_ID_SELF, -@@ -69,15 +72,14 @@ void hv_set_vpreg(u32 msr, u64 value) - msr, - 0, - value, -- 0, -- &res); -+ 0); - - /* - * Something is fundamentally broken in the hypervisor if - * setting a VP register fails. There's really no way to - * continue as a guest VM, so panic. - */ -- BUG_ON(!hv_result_success(res.a0)); -+ BUG_ON(status.status != HV_STATUS_SUCCESS); - } - EXPORT_SYMBOL_GPL(hv_set_vpreg); - -@@ -90,31 +92,22 @@ EXPORT_SYMBOL_GPL(hv_set_vpreg); - - void hv_get_vpreg_128(u32 msr, struct hv_get_vp_registers_output *result) - { -- struct arm_smccc_1_2_regs args; -- struct arm_smccc_1_2_regs res; -- -- args.a0 = HV_FUNC_ID; -- args.a1 = HVCALL_GET_VP_REGISTERS | HV_HYPERCALL_FAST_BIT | -- HV_HYPERCALL_REP_COMP_1; -- args.a2 = HV_PARTITION_ID_SELF; -- args.a3 = HV_VP_INDEX_SELF; -- args.a4 = msr; -+ u64 status; - -- /* -- * Use the SMCCC 1.2 interface because the results are in registers -- * beyond X0-X3. -- */ -- arm_smccc_1_2_hvc(&args, &res); -+ status = hv_do_hvc_fast_get( -+ HVCALL_GET_VP_REGISTERS | HV_HYPERCALL_FAST_BIT | -+ HV_HYPERCALL_REP_COMP_1, -+ HV_PARTITION_ID_SELF, -+ HV_VP_INDEX_SELF, -+ msr, -+ result); - - /* - * Something is fundamentally broken in the hypervisor if - * getting a VP register fails. There's really no way to - * continue as a guest VM, so panic. - */ -- BUG_ON(!hv_result_success(res.a0)); -- -- result->as64.low = res.a6; -- result->as64.high = res.a7; -+ BUG_ON((status & HV_HYPERCALL_RESULT_MASK) != HV_STATUS_SUCCESS); - } - EXPORT_SYMBOL_GPL(hv_get_vpreg_128); - -diff --git a/arch/arm64/hyperv/hv_hvc.S b/arch/arm64/hyperv/hv_hvc.S -new file mode 100644 -index 000000000000..111111111111 ---- /dev/null -+++ b/arch/arm64/hyperv/hv_hvc.S -@@ -0,0 +1,61 @@ -+/* SPDX-License-Identifier: GPL-2.0 */ -+ -+/* -+ * Microsoft Hyper-V hypervisor invocation routines -+ * -+ * Copyright (C) 2018, Microsoft, Inc. -+ * -+ * Author : Michael Kelley -+ * -+ * This program is free software; you can redistribute it and/or modify it -+ * under the terms of the GNU General Public License version 2 as published -+ * by the Free Software Foundation. -+ * -+ * This program is distributed in the hope that it will be useful, but -+ * WITHOUT ANY WARRANTY; without even the implied warranty of -+ * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or -+ * NON INFRINGEMENT. See the GNU General Public License for more -+ * details. -+ */ -+ -+#include -+#include -+ -+ .text -+/* -+ * Do the HVC instruction. For Hyper-V the argument is always 1. -+ * x0 contains the hypercall control value, while additional registers -+ * vary depending on the hypercall, and whether the hypercall arguments -+ * are in memory or in registers (a "fast" hypercall per the Hyper-V -+ * TLFS). When the arguments are in memory x1 is the guest physical -+ * address of the input arguments, and x2 is the guest physical -+ * address of the output arguments. When the arguments are in -+ * registers, the register values depends on the hypercall. Note -+ * that this version cannot return any values in registers. -+ */ -+SYM_FUNC_START(hv_do_hvc) -+ hvc #1 -+ ret -+SYM_FUNC_END(hv_do_hvc) -+ -+/* -+ * This variant of HVC invocation is for hv_get_vpreg and -+ * hv_get_vpreg_128. The input parameters are passed in registers -+ * along with a pointer in x4 to where the output result should -+ * be stored. The output is returned in x15 and x16. x19 is used as -+ * scratch space to avoid buildng a stack frame, as Hyper-V does -+ * not preserve registers x0-x17. -+ */ -+SYM_FUNC_START(hv_do_hvc_fast_get) -+ /* -+ * Stash away x19 register so that it can be used as a scratch -+ * register and pop it at the end. -+ */ -+ str x19, [sp, #-16]! -+ mov x19, x4 -+ hvc #1 -+ str x15,[x19] -+ str x16,[x19,#8] -+ ldr x19, [sp], #16 -+ ret -+SYM_FUNC_END(hv_do_hvc_fast_get) -diff --git a/arch/arm64/include/asm/mshyperv.h b/arch/arm64/include/asm/mshyperv.h -index 111111111111..222222222222 100644 ---- a/arch/arm64/include/asm/mshyperv.h -+++ b/arch/arm64/include/asm/mshyperv.h -@@ -22,6 +22,10 @@ - #include - #include - -+extern u64 hv_do_hvc(u64 control, ...); -+extern u64 hv_do_hvc_fast_get(u64 control, u64 input1, u64 input2, u64 input3, -+ struct hv_get_vp_registers_output *output); -+ - /* - * Declare calls to get and set Hyper-V VP register values on ARM64, which - * requires a hypercall. --- -Armbian - diff --git a/patch/kernel/archive/wsl2-arm64-6.1/1667-arm64-hyperv-Enable-Hyper-V-synthetic-clocks-timers.patch b/patch/kernel/archive/wsl2-arm64-6.1/1667-arm64-hyperv-Enable-Hyper-V-synthetic-clocks-timers.patch deleted file mode 100644 index 3b78f86578d4..000000000000 --- a/patch/kernel/archive/wsl2-arm64-6.1/1667-arm64-hyperv-Enable-Hyper-V-synthetic-clocks-timers.patch +++ /dev/null @@ -1,185 +0,0 @@ -From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001 -From: Michael Kelley -Date: Mon, 28 Feb 2022 08:41:24 -0800 -Subject: arm64: hyperv: Enable Hyper-V synthetic clocks/timers - -This patch adds support for Hyper-V synthetic clocks and timers on -ARM64. Upstream code assumes changes to Hyper-V that were made -in Fall 2021 that fully virtualize the ARM64 architectural counter -and timer so that the driver in drivers/clocksource/arm_arch_timer.c -can be used. But older versions of Hyper-V don't have this -support and must use the Hyper-V synthetic clocks and timers. -As such, this patch is out-of-tree code. - -This patch does two related things. First it splits the general -Hyper-V initialization code to create hyperv_early_init() that runs -much earlier during kernel boot. This early init function is needed -so that core Hyper-V functionality is ready before the synthetic clocks -and timers are initialized. Second, it adds Hyper-V clock and timer -initialization via TIMER_ACPI_DECLARE() and hyperv_timer_init() -in the Hyper-V clocksource driver in drivers/clocksource/hyperv_timer.c. - -Signed-off-by: Michael Kelley -[tyhicks: Forward port around a minor text conflict caused by commit - 245b993d8f6c ("clocksource: hyper-v: unexport __init-annotated - hv_init_clocksource()") -Signed-off-by: Tyler Hicks -[kms: Forward port to 6.1] -Signed-off-by: Kelsey Steele ---- - arch/arm64/hyperv/mshyperv.c | 15 +++++--- - arch/arm64/include/asm/mshyperv.h | 18 ++++++++++ - arch/arm64/kernel/setup.c | 4 +++ - drivers/clocksource/hyperv_timer.c | 14 ++++++++ - drivers/hv/Kconfig | 2 +- - 5 files changed, 47 insertions(+), 6 deletions(-) - -diff --git a/arch/arm64/hyperv/mshyperv.c b/arch/arm64/hyperv/mshyperv.c -index 111111111111..222222222222 100644 ---- a/arch/arm64/hyperv/mshyperv.c -+++ b/arch/arm64/hyperv/mshyperv.c -@@ -19,12 +19,11 @@ - - static bool hyperv_initialized; - --static int __init hyperv_init(void) -+void __init hyperv_early_init(void) - { - struct hv_get_vp_registers_output result; - u32 a, b, c, d; - u64 guest_id; -- int ret; - - /* - * Allow for a kernel built with CONFIG_HYPERV to be running in -@@ -32,10 +31,10 @@ static int __init hyperv_init(void) - * In such cases, do nothing and return success. - */ - if (acpi_disabled) -- return 0; -+ return; - - if (strncmp((char *)&acpi_gbl_FADT.hypervisor_id, "MsHyperV", 8)) -- return 0; -+ return; - - /* Setup the guest ID */ - guest_id = hv_generate_guest_id(LINUX_VERSION_CODE); -@@ -63,6 +62,13 @@ static int __init hyperv_init(void) - pr_info("Hyper-V: Host Build %d.%d.%d.%d-%d-%d\n", - b >> 16, b & 0xFFFF, a, d & 0xFFFFFF, c, d >> 24); - -+ hyperv_initialized = true; -+} -+ -+static int __init hyperv_init(void) -+{ -+ int ret; -+ - ret = hv_common_init(); - if (ret) - return ret; -@@ -74,7 +80,6 @@ static int __init hyperv_init(void) - return ret; - } - -- hyperv_initialized = true; - return 0; - } - -diff --git a/arch/arm64/include/asm/mshyperv.h b/arch/arm64/include/asm/mshyperv.h -index 111111111111..222222222222 100644 ---- a/arch/arm64/include/asm/mshyperv.h -+++ b/arch/arm64/include/asm/mshyperv.h -@@ -21,6 +21,13 @@ - #include - #include - #include -+#include -+ -+#if IS_ENABLED(CONFIG_HYPERV) -+void __init hyperv_early_init(void); -+#else -+static inline void hyperv_early_init(void) {}; -+#endif - - extern u64 hv_do_hvc(u64 control, ...); - extern u64 hv_do_hvc_fast_get(u64 control, u64 input1, u64 input2, u64 input3, -@@ -45,6 +52,17 @@ static inline u64 hv_get_register(unsigned int reg) - return hv_get_vpreg(reg); - } - -+/* Define the interrupt ID used by STIMER0 Direct Mode interrupts. This -+ * value can't come from ACPI tables because it is needed before the -+ * Linux ACPI subsystem is initialized. -+ */ -+#define HYPERV_STIMER0_VECTOR 31 -+ -+static inline u64 hv_get_raw_timer(void) -+{ -+ return arch_timer_read_counter(); -+} -+ - /* SMCCC hypercall parameters */ - #define HV_SMCCC_FUNC_NUMBER 1 - #define HV_FUNC_ID ARM_SMCCC_CALL_VAL( \ -diff --git a/arch/arm64/kernel/setup.c b/arch/arm64/kernel/setup.c -index 111111111111..222222222222 100644 ---- a/arch/arm64/kernel/setup.c -+++ b/arch/arm64/kernel/setup.c -@@ -50,6 +50,7 @@ - #include - #include - #include -+#include - #include - - static int num_standard_resources; -@@ -343,6 +344,9 @@ void __init __no_sanitize_address setup_arch(char **cmdline_p) - if (acpi_disabled) - unflatten_device_tree(); - -+ /* Do after acpi_boot_table_init() so local FADT is available */ -+ hyperv_early_init(); -+ - bootmem_init(); - - kasan_init(); -diff --git a/drivers/clocksource/hyperv_timer.c b/drivers/clocksource/hyperv_timer.c -index 111111111111..222222222222 100644 ---- a/drivers/clocksource/hyperv_timer.c -+++ b/drivers/clocksource/hyperv_timer.c -@@ -566,3 +566,17 @@ void __init hv_init_clocksource(void) - hv_sched_clock_offset = hv_read_reference_counter(); - hv_setup_sched_clock(read_hv_sched_clock_msr); - } -+ -+/* Initialize everything on ARM64 */ -+static int __init hyperv_timer_init(struct acpi_table_header *table) -+{ -+ if (!hv_is_hyperv_initialized()) -+ return -EINVAL; -+ -+ hv_init_clocksource(); -+ if (hv_stimer_alloc(true)) -+ return -EINVAL; -+ -+ return 0; -+} -+TIMER_ACPI_DECLARE(hyperv, ACPI_SIG_GTDT, hyperv_timer_init); -diff --git a/drivers/hv/Kconfig b/drivers/hv/Kconfig -index 111111111111..222222222222 100644 ---- a/drivers/hv/Kconfig -+++ b/drivers/hv/Kconfig -@@ -14,7 +14,7 @@ config HYPERV - system. - - config HYPERV_TIMER -- def_bool HYPERV && X86 -+ def_bool HYPERV - - config HYPERV_UTILS - tristate "Microsoft Hyper-V Utilities driver" --- -Armbian - diff --git a/patch/kernel/archive/wsl2-arm64-6.1/1668-drivers-hv-dxgkrnl-Add-virtual-compute-device-VMBus-channel-guids.patch b/patch/kernel/archive/wsl2-arm64-6.1/1668-drivers-hv-dxgkrnl-Add-virtual-compute-device-VMBus-channel-guids.patch deleted file mode 100644 index aeca83965755..000000000000 --- a/patch/kernel/archive/wsl2-arm64-6.1/1668-drivers-hv-dxgkrnl-Add-virtual-compute-device-VMBus-channel-guids.patch +++ /dev/null @@ -1,45 +0,0 @@ -From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001 -From: Iouri Tarassov -Date: Tue, 15 Feb 2022 18:11:52 -0800 -Subject: drivers: hv: dxgkrnl: Add virtual compute device VMBus channel guids - -Add VMBus channel guids, which are used by hyper-v virtual compute -device driver. - -Signed-off-by: Iouri Tarassov -[kms: Forward port to v6.1] -Signed-off-by: Kelsey Steele ---- - include/linux/hyperv.h | 16 ++++++++++ - 1 file changed, 16 insertions(+) - -diff --git a/include/linux/hyperv.h b/include/linux/hyperv.h -index 111111111111..222222222222 100644 ---- a/include/linux/hyperv.h -+++ b/include/linux/hyperv.h -@@ -1478,6 +1478,22 @@ void vmbus_free_mmio(resource_size_t start, resource_size_t size); - .guid = GUID_INIT(0xda0a7802, 0xe377, 0x4aac, 0x8e, 0x77, \ - 0x05, 0x58, 0xeb, 0x10, 0x73, 0xf8) - -+/* -+ * GPU paravirtualization global DXGK channel -+ * {DDE9CBC0-5060-4436-9448-EA1254A5D177} -+ */ -+#define HV_GPUP_DXGK_GLOBAL_GUID \ -+ .guid = GUID_INIT(0xdde9cbc0, 0x5060, 0x4436, 0x94, 0x48, \ -+ 0xea, 0x12, 0x54, 0xa5, 0xd1, 0x77) -+ -+/* -+ * GPU paravirtualization per virtual GPU DXGK channel -+ * {6E382D18-3336-4F4B-ACC4-2B7703D4DF4A} -+ */ -+#define HV_GPUP_DXGK_VGPU_GUID \ -+ .guid = GUID_INIT(0x6e382d18, 0x3336, 0x4f4b, 0xac, 0xc4, \ -+ 0x2b, 0x77, 0x3, 0xd4, 0xdf, 0x4a) -+ - /* - * Synthetic FC GUID - * {2f9bcc4a-0069-4af3-b76b-6fd0be528cda} --- -Armbian - diff --git a/patch/kernel/archive/wsl2-arm64-6.1/1669-drivers-hv-dxgkrnl-Driver-initialization-and-loading.patch b/patch/kernel/archive/wsl2-arm64-6.1/1669-drivers-hv-dxgkrnl-Driver-initialization-and-loading.patch deleted file mode 100644 index 6af1fd6c0d14..000000000000 --- a/patch/kernel/archive/wsl2-arm64-6.1/1669-drivers-hv-dxgkrnl-Driver-initialization-and-loading.patch +++ /dev/null @@ -1,966 +0,0 @@ -From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001 -From: Iouri Tarassov -Date: Wed, 24 Mar 2021 11:10:28 -0700 -Subject: drivers: hv: dxgkrnl: Driver initialization and loading - -- Create skeleton and add basic functionality for the Hyper-V -compute device driver (dxgkrnl). - -- Register for PCI and VMBus driver notifications and handle -initialization of VMBus channels. - -- Connect the dxgkrnl module to the drivers/hv/ Makefile and Kconfig - -- Create a MAINTAINERS entry - -A VMBus channel is a communication interface between the Hyper-V guest -and the host. The are two type of VMBus channels, used in the driver: - - the global channel - - per virtual compute device channel - -A PCI device is created for each virtual compute device, projected -by the host. The device vendor is PCI_VENDOR_ID_MICROSOFT and device -id is PCI_DEVICE_ID_VIRTUAL_RENDER. dxg_pci_probe_device handles -arrival of such devices. The PCI config space of the virtual compute -device has luid of the corresponding virtual compute device VM -bus channel. This is how the compute device adapter objects are -linked to VMBus channels. - -VMBus interface version is exchanged by reading/writing the PCI config -space of the virtual compute device. - -The IO space is used to handle CPU accessible compute device -allocations. Hyper-V allocates IO space for the global VMBus channel. - -Signed-off-by: Iouri Tarassov -[kms: Forward port to v6.1] -Signed-off-by: Kelsey Steele ---- - MAINTAINERS | 7 + - drivers/hv/Kconfig | 2 + - drivers/hv/Makefile | 1 + - drivers/hv/dxgkrnl/Kconfig | 26 + - drivers/hv/dxgkrnl/Makefile | 5 + - drivers/hv/dxgkrnl/dxgkrnl.h | 155 +++ - drivers/hv/dxgkrnl/dxgmodule.c | 506 ++++++++++ - drivers/hv/dxgkrnl/dxgvmbus.c | 92 ++ - drivers/hv/dxgkrnl/dxgvmbus.h | 19 + - include/uapi/misc/d3dkmthk.h | 27 + - 10 files changed, 840 insertions(+) - -diff --git a/MAINTAINERS b/MAINTAINERS -index 111111111111..222222222222 100644 ---- a/MAINTAINERS -+++ b/MAINTAINERS -@@ -9551,6 +9551,13 @@ F: Documentation/devicetree/bindings/mtd/ti,am654-hbmc.yaml - F: drivers/mtd/hyperbus/ - F: include/linux/mtd/hyperbus.h - -+Hyper-V vGPU DRIVER -+M: Iouri Tarassov -+L: linux-hyperv@vger.kernel.org -+S: Supported -+F: drivers/hv/dxgkrnl/ -+F: include/uapi/misc/d3dkmthk.h -+ - HYPERVISOR VIRTUAL CONSOLE DRIVER - L: linuxppc-dev@lists.ozlabs.org - S: Odd Fixes -diff --git a/drivers/hv/Kconfig b/drivers/hv/Kconfig -index 111111111111..222222222222 100644 ---- a/drivers/hv/Kconfig -+++ b/drivers/hv/Kconfig -@@ -30,4 +30,6 @@ config HYPERV_BALLOON - help - Select this option to enable Hyper-V Balloon driver. - -+source "drivers/hv/dxgkrnl/Kconfig" -+ - endmenu -diff --git a/drivers/hv/Makefile b/drivers/hv/Makefile -index 111111111111..222222222222 100644 ---- a/drivers/hv/Makefile -+++ b/drivers/hv/Makefile -@@ -2,6 +2,7 @@ - obj-$(CONFIG_HYPERV) += hv_vmbus.o - obj-$(CONFIG_HYPERV_UTILS) += hv_utils.o - obj-$(CONFIG_HYPERV_BALLOON) += hv_balloon.o -+obj-$(CONFIG_DXGKRNL) += dxgkrnl/ - - CFLAGS_hv_trace.o = -I$(src) - CFLAGS_hv_balloon.o = -I$(src) -diff --git a/drivers/hv/dxgkrnl/Kconfig b/drivers/hv/dxgkrnl/Kconfig -new file mode 100644 -index 000000000000..111111111111 ---- /dev/null -+++ b/drivers/hv/dxgkrnl/Kconfig -@@ -0,0 +1,26 @@ -+# SPDX-License-Identifier: GPL-2.0 -+# Configuration for the hyper-v virtual compute driver (dxgkrnl) -+# -+ -+config DXGKRNL -+ tristate "Microsoft Paravirtualized GPU support" -+ depends on HYPERV -+ depends on 64BIT || COMPILE_TEST -+ help -+ This driver supports paravirtualized virtual compute devices, exposed -+ by Microsoft Hyper-V when Linux is running inside of a virtual machine -+ hosted by Windows. The virtual machines needs to be configured to use -+ host compute adapters. The driver name is dxgkrnl. -+ -+ An example of such virtual machine is a Windows Subsystem for -+ Linux container. When such container is instantiated, the Windows host -+ assigns compatible host GPU adapters to the container. The corresponding -+ virtual GPU devices appear on the PCI bus in the container. These -+ devices are enumerated and accessed by this driver. -+ -+ Communications with the driver are done by using the Microsoft libdxcore -+ library, which translates the D3DKMT interface -+ -+ to the driver IOCTLs. The virtual GPU devices are paravirtualized, -+ which means that access to the hardware is done in the host. The driver -+ communicates with the host using Hyper-V VM bus communication channels. -diff --git a/drivers/hv/dxgkrnl/Makefile b/drivers/hv/dxgkrnl/Makefile -new file mode 100644 -index 000000000000..111111111111 ---- /dev/null -+++ b/drivers/hv/dxgkrnl/Makefile -@@ -0,0 +1,5 @@ -+# SPDX-License-Identifier: GPL-2.0 -+# Makefile for the hyper-v compute device driver (dxgkrnl). -+ -+obj-$(CONFIG_DXGKRNL) += dxgkrnl.o -+dxgkrnl-y := dxgmodule.o dxgvmbus.o -diff --git a/drivers/hv/dxgkrnl/dxgkrnl.h b/drivers/hv/dxgkrnl/dxgkrnl.h -new file mode 100644 -index 000000000000..111111111111 ---- /dev/null -+++ b/drivers/hv/dxgkrnl/dxgkrnl.h -@@ -0,0 +1,155 @@ -+/* SPDX-License-Identifier: GPL-2.0 */ -+ -+/* -+ * Copyright (c) 2022, Microsoft Corporation. -+ * -+ * Author: -+ * Iouri Tarassov -+ * -+ * Dxgkrnl Graphics Driver -+ * Headers for internal objects -+ * -+ */ -+ -+#ifndef _DXGKRNL_H -+#define _DXGKRNL_H -+ -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+ -+struct dxgadapter; -+ -+/* -+ * Driver private data. -+ * A single /dev/dxg device is created per virtual machine. -+ */ -+struct dxgdriver{ -+ struct dxgglobal *dxgglobal; -+ struct device *dxgdev; -+ struct pci_driver pci_drv; -+ struct hv_driver vmbus_drv; -+}; -+extern struct dxgdriver dxgdrv; -+ -+#define DXGDEV dxgdrv.dxgdev -+ -+struct dxgvmbuschannel { -+ struct vmbus_channel *channel; -+ struct hv_device *hdev; -+ spinlock_t packet_list_mutex; -+ struct list_head packet_list_head; -+ struct kmem_cache *packet_cache; -+ atomic64_t packet_request_id; -+}; -+ -+int dxgvmbuschannel_init(struct dxgvmbuschannel *ch, struct hv_device *hdev); -+void dxgvmbuschannel_destroy(struct dxgvmbuschannel *ch); -+void dxgvmbuschannel_receive(void *ctx); -+ -+/* -+ * The structure defines an offered vGPU vm bus channel. -+ */ -+struct dxgvgpuchannel { -+ struct list_head vgpu_ch_list_entry; -+ struct winluid adapter_luid; -+ struct hv_device *hdev; -+}; -+ -+struct dxgglobal { -+ struct dxgdriver *drvdata; -+ struct dxgvmbuschannel channel; -+ struct hv_device *hdev; -+ u32 num_adapters; -+ u32 vmbus_ver; /* Interface version */ -+ struct resource *mem; -+ u64 mmiospace_base; -+ u64 mmiospace_size; -+ struct miscdevice dxgdevice; -+ struct mutex device_mutex; -+ -+ /* -+ * List of the vGPU VM bus channels (dxgvgpuchannel) -+ * Protected by device_mutex -+ */ -+ struct list_head vgpu_ch_list_head; -+ -+ /* protects acces to the global VM bus channel */ -+ struct rw_semaphore channel_lock; -+ -+ bool global_channel_initialized; -+ bool async_msg_enabled; -+ bool misc_registered; -+ bool pci_registered; -+ bool vmbus_registered; -+}; -+ -+static inline struct dxgglobal *dxggbl(void) -+{ -+ return dxgdrv.dxgglobal; -+} -+ -+struct dxgprocess { -+ /* Placeholder */ -+}; -+ -+/* -+ * The convention is that VNBus instance id is a GUID, but the host sets -+ * the lower part of the value to the host adapter LUID. The function -+ * provides the necessary conversion. -+ */ -+static inline void guid_to_luid(guid_t *guid, struct winluid *luid) -+{ -+ *luid = *(struct winluid *)&guid->b[0]; -+} -+ -+/* -+ * VM bus interface -+ * -+ */ -+ -+/* -+ * The interface version is used to ensure that the host and the guest use the -+ * same VM bus protocol. It needs to be incremented every time the VM bus -+ * interface changes. DXGK_VMBUS_LAST_COMPATIBLE_INTERFACE_VERSION is -+ * incremented each time the earlier versions of the interface are no longer -+ * compatible with the current version. -+ */ -+#define DXGK_VMBUS_INTERFACE_VERSION_OLD 27 -+#define DXGK_VMBUS_INTERFACE_VERSION 40 -+#define DXGK_VMBUS_LAST_COMPATIBLE_INTERFACE_VERSION 16 -+ -+#ifdef DEBUG -+ -+void dxgk_validate_ioctls(void); -+ -+#define DXG_TRACE(fmt, ...) do { \ -+ trace_printk(dev_fmt(fmt) "\n", ##__VA_ARGS__); \ -+} while (0) -+ -+#define DXG_ERR(fmt, ...) do { \ -+ dev_err(DXGDEV, fmt, ##__VA_ARGS__); \ -+ trace_printk("*** dxgkerror *** " dev_fmt(fmt) "\n", ##__VA_ARGS__); \ -+} while (0) -+ -+#else -+ -+#define DXG_TRACE(...) -+#define DXG_ERR(fmt, ...) do { \ -+ dev_err(DXGDEV, fmt, ##__VA_ARGS__); \ -+} while (0) -+ -+#endif /* DEBUG */ -+ -+#endif -diff --git a/drivers/hv/dxgkrnl/dxgmodule.c b/drivers/hv/dxgkrnl/dxgmodule.c -new file mode 100644 -index 000000000000..111111111111 ---- /dev/null -+++ b/drivers/hv/dxgkrnl/dxgmodule.c -@@ -0,0 +1,506 @@ -+// SPDX-License-Identifier: GPL-2.0 -+ -+/* -+ * Copyright (c) 2022, Microsoft Corporation. -+ * -+ * Author: -+ * Iouri Tarassov -+ * -+ * Dxgkrnl Graphics Driver -+ * Interface with Linux kernel, PCI driver and the VM bus driver -+ * -+ */ -+ -+#include -+#include -+#include -+#include -+#include "dxgkrnl.h" -+ -+#define PCI_VENDOR_ID_MICROSOFT 0x1414 -+#define PCI_DEVICE_ID_VIRTUAL_RENDER 0x008E -+ -+#undef pr_fmt -+#define pr_fmt(fmt) "dxgk: " fmt -+ -+/* -+ * Interface from dxgglobal -+ */ -+ -+struct vmbus_channel *dxgglobal_get_vmbus(void) -+{ -+ return dxggbl()->channel.channel; -+} -+ -+struct dxgvmbuschannel *dxgglobal_get_dxgvmbuschannel(void) -+{ -+ return &dxggbl()->channel; -+} -+ -+int dxgglobal_acquire_channel_lock(void) -+{ -+ struct dxgglobal *dxgglobal = dxggbl(); -+ -+ down_read(&dxgglobal->channel_lock); -+ if (dxgglobal->channel.channel == NULL) { -+ DXG_ERR("Failed to acquire global channel lock"); -+ return -ENODEV; -+ } else { -+ return 0; -+ } -+} -+ -+void dxgglobal_release_channel_lock(void) -+{ -+ up_read(&dxggbl()->channel_lock); -+} -+ -+const struct file_operations dxgk_fops = { -+ .owner = THIS_MODULE, -+}; -+ -+/* -+ * Interface with the PCI driver -+ */ -+ -+/* -+ * Part of the PCI config space of the compute device is used for -+ * configuration data. Reading/writing of the PCI config space is forwarded -+ * to the host. -+ * -+ * Below are offsets in the PCI config spaces for various configuration values. -+ */ -+ -+/* Compute device VM bus channel instance ID */ -+#define DXGK_VMBUS_CHANNEL_ID_OFFSET 192 -+ -+/* DXGK_VMBUS_INTERFACE_VERSION (u32) */ -+#define DXGK_VMBUS_VERSION_OFFSET (DXGK_VMBUS_CHANNEL_ID_OFFSET + \ -+ sizeof(guid_t)) -+ -+/* Luid of the virtual GPU on the host (struct winluid) */ -+#define DXGK_VMBUS_VGPU_LUID_OFFSET (DXGK_VMBUS_VERSION_OFFSET + \ -+ sizeof(u32)) -+ -+/* The guest writes its capabilities to this address */ -+#define DXGK_VMBUS_GUESTCAPS_OFFSET (DXGK_VMBUS_VERSION_OFFSET + \ -+ sizeof(u32)) -+ -+/* Capabilities of the guest driver, reported to the host */ -+struct dxgk_vmbus_guestcaps { -+ union { -+ struct { -+ u32 wsl2 : 1; -+ u32 reserved : 31; -+ }; -+ u32 guest_caps; -+ }; -+}; -+ -+/* -+ * A helper function to read PCI config space. -+ */ -+static int dxg_pci_read_dwords(struct pci_dev *dev, int offset, int size, -+ void *val) -+{ -+ int off = offset; -+ int ret; -+ int i; -+ -+ /* Make sure the offset and size are 32 bit aligned */ -+ if (offset & 3 || size & 3) -+ return -EINVAL; -+ -+ for (i = 0; i < size / sizeof(int); i++) { -+ ret = pci_read_config_dword(dev, off, &((int *)val)[i]); -+ if (ret) { -+ DXG_ERR("Failed to read PCI config: %d", off); -+ return ret; -+ } -+ off += sizeof(int); -+ } -+ return 0; -+} -+ -+static int dxg_pci_probe_device(struct pci_dev *dev, -+ const struct pci_device_id *id) -+{ -+ int ret; -+ guid_t guid; -+ u32 vmbus_interface_ver = DXGK_VMBUS_INTERFACE_VERSION; -+ struct winluid vgpu_luid = {}; -+ struct dxgk_vmbus_guestcaps guest_caps = {.wsl2 = 1}; -+ struct dxgglobal *dxgglobal = dxggbl(); -+ -+ mutex_lock(&dxgglobal->device_mutex); -+ -+ if (dxgglobal->vmbus_ver == 0) { -+ /* Report capabilities to the host */ -+ -+ ret = pci_write_config_dword(dev, DXGK_VMBUS_GUESTCAPS_OFFSET, -+ guest_caps.guest_caps); -+ if (ret) -+ goto cleanup; -+ -+ /* Negotiate the VM bus version */ -+ -+ ret = pci_read_config_dword(dev, DXGK_VMBUS_VERSION_OFFSET, -+ &vmbus_interface_ver); -+ if (ret == 0 && vmbus_interface_ver != 0) -+ dxgglobal->vmbus_ver = vmbus_interface_ver; -+ else -+ dxgglobal->vmbus_ver = DXGK_VMBUS_INTERFACE_VERSION_OLD; -+ -+ if (dxgglobal->vmbus_ver < DXGK_VMBUS_INTERFACE_VERSION) -+ goto read_channel_id; -+ -+ ret = pci_write_config_dword(dev, DXGK_VMBUS_VERSION_OFFSET, -+ DXGK_VMBUS_INTERFACE_VERSION); -+ if (ret) -+ goto cleanup; -+ -+ if (dxgglobal->vmbus_ver > DXGK_VMBUS_INTERFACE_VERSION) -+ dxgglobal->vmbus_ver = DXGK_VMBUS_INTERFACE_VERSION; -+ } -+ -+read_channel_id: -+ -+ /* Get the VM bus channel ID for the virtual GPU */ -+ ret = dxg_pci_read_dwords(dev, DXGK_VMBUS_CHANNEL_ID_OFFSET, -+ sizeof(guid), (int *)&guid); -+ if (ret) -+ goto cleanup; -+ -+ if (dxgglobal->vmbus_ver >= DXGK_VMBUS_INTERFACE_VERSION) { -+ ret = dxg_pci_read_dwords(dev, DXGK_VMBUS_VGPU_LUID_OFFSET, -+ sizeof(vgpu_luid), &vgpu_luid); -+ if (ret) -+ goto cleanup; -+ } -+ -+ DXG_TRACE("Adapter channel: %pUb", &guid); -+ DXG_TRACE("Vmbus interface version: %d", dxgglobal->vmbus_ver); -+ DXG_TRACE("Host luid: %x-%x", vgpu_luid.b, vgpu_luid.a); -+ -+cleanup: -+ -+ mutex_unlock(&dxgglobal->device_mutex); -+ -+ if (ret) -+ DXG_TRACE("err: %d", ret); -+ return ret; -+} -+ -+static void dxg_pci_remove_device(struct pci_dev *dev) -+{ -+ /* Placeholder */ -+} -+ -+static struct pci_device_id dxg_pci_id_table[] = { -+ { -+ .vendor = PCI_VENDOR_ID_MICROSOFT, -+ .device = PCI_DEVICE_ID_VIRTUAL_RENDER, -+ .subvendor = PCI_ANY_ID, -+ .subdevice = PCI_ANY_ID -+ }, -+ { 0 } -+}; -+ -+/* -+ * Interface with the VM bus driver -+ */ -+ -+static int dxgglobal_getiospace(struct dxgglobal *dxgglobal) -+{ -+ /* Get mmio space for the global channel */ -+ struct hv_device *hdev = dxgglobal->hdev; -+ struct vmbus_channel *channel = hdev->channel; -+ resource_size_t pot_start = 0; -+ resource_size_t pot_end = -1; -+ int ret; -+ -+ dxgglobal->mmiospace_size = channel->offermsg.offer.mmio_megabytes; -+ if (dxgglobal->mmiospace_size == 0) { -+ DXG_TRACE("Zero mmio space is offered"); -+ return -ENOMEM; -+ } -+ dxgglobal->mmiospace_size <<= 20; -+ DXG_TRACE("mmio offered: %llx", dxgglobal->mmiospace_size); -+ -+ ret = vmbus_allocate_mmio(&dxgglobal->mem, hdev, pot_start, pot_end, -+ dxgglobal->mmiospace_size, 0x10000, false); -+ if (ret) { -+ DXG_ERR("Unable to allocate mmio memory: %d", ret); -+ return ret; -+ } -+ dxgglobal->mmiospace_size = dxgglobal->mem->end - -+ dxgglobal->mem->start + 1; -+ dxgglobal->mmiospace_base = dxgglobal->mem->start; -+ DXG_TRACE("mmio allocated %llx %llx %llx %llx", -+ dxgglobal->mmiospace_base, dxgglobal->mmiospace_size, -+ dxgglobal->mem->start, dxgglobal->mem->end); -+ -+ return 0; -+} -+ -+int dxgglobal_init_global_channel(void) -+{ -+ int ret = 0; -+ struct dxgglobal *dxgglobal = dxggbl(); -+ -+ ret = dxgvmbuschannel_init(&dxgglobal->channel, dxgglobal->hdev); -+ if (ret) { -+ DXG_ERR("dxgvmbuschannel_init failed: %d", ret); -+ goto error; -+ } -+ -+ ret = dxgglobal_getiospace(dxgglobal); -+ if (ret) { -+ DXG_ERR("getiospace failed: %d", ret); -+ goto error; -+ } -+ -+ hv_set_drvdata(dxgglobal->hdev, dxgglobal); -+ -+error: -+ return ret; -+} -+ -+void dxgglobal_destroy_global_channel(void) -+{ -+ struct dxgglobal *dxgglobal = dxggbl(); -+ -+ down_write(&dxgglobal->channel_lock); -+ -+ dxgglobal->global_channel_initialized = false; -+ -+ if (dxgglobal->mem) { -+ vmbus_free_mmio(dxgglobal->mmiospace_base, -+ dxgglobal->mmiospace_size); -+ dxgglobal->mem = NULL; -+ } -+ -+ dxgvmbuschannel_destroy(&dxgglobal->channel); -+ -+ if (dxgglobal->hdev) { -+ hv_set_drvdata(dxgglobal->hdev, NULL); -+ dxgglobal->hdev = NULL; -+ } -+ -+ up_write(&dxgglobal->channel_lock); -+} -+ -+static const struct hv_vmbus_device_id dxg_vmbus_id_table[] = { -+ /* Per GPU Device GUID */ -+ { HV_GPUP_DXGK_VGPU_GUID }, -+ /* Global Dxgkgnl channel for the virtual machine */ -+ { HV_GPUP_DXGK_GLOBAL_GUID }, -+ { } -+}; -+ -+static int dxg_probe_vmbus(struct hv_device *hdev, -+ const struct hv_vmbus_device_id *dev_id) -+{ -+ int ret = 0; -+ struct winluid luid; -+ struct dxgvgpuchannel *vgpuch; -+ struct dxgglobal *dxgglobal = dxggbl(); -+ -+ mutex_lock(&dxgglobal->device_mutex); -+ -+ if (uuid_le_cmp(hdev->dev_type, dxg_vmbus_id_table[0].guid) == 0) { -+ /* This is a new virtual GPU channel */ -+ guid_to_luid(&hdev->channel->offermsg.offer.if_instance, &luid); -+ DXG_TRACE("vGPU channel: %pUb", -+ &hdev->channel->offermsg.offer.if_instance); -+ vgpuch = kzalloc(sizeof(struct dxgvgpuchannel), GFP_KERNEL); -+ if (vgpuch == NULL) { -+ ret = -ENOMEM; -+ goto error; -+ } -+ vgpuch->adapter_luid = luid; -+ vgpuch->hdev = hdev; -+ list_add_tail(&vgpuch->vgpu_ch_list_entry, -+ &dxgglobal->vgpu_ch_list_head); -+ } else if (uuid_le_cmp(hdev->dev_type, -+ dxg_vmbus_id_table[1].guid) == 0) { -+ /* This is the global Dxgkgnl channel */ -+ DXG_TRACE("Global channel: %pUb", -+ &hdev->channel->offermsg.offer.if_instance); -+ if (dxgglobal->hdev) { -+ /* This device should appear only once */ -+ DXG_ERR("global channel already exists"); -+ ret = -EBADE; -+ goto error; -+ } -+ dxgglobal->hdev = hdev; -+ } else { -+ /* Unknown device type */ -+ DXG_ERR("Unknown VM bus device type"); -+ ret = -ENODEV; -+ } -+ -+error: -+ -+ mutex_unlock(&dxgglobal->device_mutex); -+ -+ return ret; -+} -+ -+static int dxg_remove_vmbus(struct hv_device *hdev) -+{ -+ int ret = 0; -+ struct dxgvgpuchannel *vgpu_channel; -+ struct dxgglobal *dxgglobal = dxggbl(); -+ -+ mutex_lock(&dxgglobal->device_mutex); -+ -+ if (uuid_le_cmp(hdev->dev_type, dxg_vmbus_id_table[0].guid) == 0) { -+ DXG_TRACE("Remove virtual GPU channel"); -+ list_for_each_entry(vgpu_channel, -+ &dxgglobal->vgpu_ch_list_head, -+ vgpu_ch_list_entry) { -+ if (vgpu_channel->hdev == hdev) { -+ list_del(&vgpu_channel->vgpu_ch_list_entry); -+ kfree(vgpu_channel); -+ break; -+ } -+ } -+ } else if (uuid_le_cmp(hdev->dev_type, -+ dxg_vmbus_id_table[1].guid) == 0) { -+ DXG_TRACE("Remove global channel device"); -+ dxgglobal_destroy_global_channel(); -+ } else { -+ /* Unknown device type */ -+ DXG_ERR("Unknown device type"); -+ ret = -ENODEV; -+ } -+ -+ mutex_unlock(&dxgglobal->device_mutex); -+ -+ return ret; -+} -+ -+MODULE_DEVICE_TABLE(vmbus, dxg_vmbus_id_table); -+MODULE_DEVICE_TABLE(pci, dxg_pci_id_table); -+ -+/* -+ * Global driver data -+ */ -+ -+struct dxgdriver dxgdrv = { -+ .vmbus_drv.name = KBUILD_MODNAME, -+ .vmbus_drv.id_table = dxg_vmbus_id_table, -+ .vmbus_drv.probe = dxg_probe_vmbus, -+ .vmbus_drv.remove = dxg_remove_vmbus, -+ .vmbus_drv.driver = { -+ .probe_type = PROBE_PREFER_ASYNCHRONOUS, -+ }, -+ .pci_drv.name = KBUILD_MODNAME, -+ .pci_drv.id_table = dxg_pci_id_table, -+ .pci_drv.probe = dxg_pci_probe_device, -+ .pci_drv.remove = dxg_pci_remove_device -+}; -+ -+static struct dxgglobal *dxgglobal_create(void) -+{ -+ struct dxgglobal *dxgglobal; -+ -+ dxgglobal = kzalloc(sizeof(struct dxgglobal), GFP_KERNEL); -+ if (!dxgglobal) -+ return NULL; -+ -+ mutex_init(&dxgglobal->device_mutex); -+ -+ INIT_LIST_HEAD(&dxgglobal->vgpu_ch_list_head); -+ -+ init_rwsem(&dxgglobal->channel_lock); -+ -+ return dxgglobal; -+} -+ -+static void dxgglobal_destroy(struct dxgglobal *dxgglobal) -+{ -+ if (dxgglobal) { -+ mutex_lock(&dxgglobal->device_mutex); -+ dxgglobal_destroy_global_channel(); -+ mutex_unlock(&dxgglobal->device_mutex); -+ -+ if (dxgglobal->vmbus_registered) -+ vmbus_driver_unregister(&dxgdrv.vmbus_drv); -+ -+ dxgglobal_destroy_global_channel(); -+ -+ if (dxgglobal->pci_registered) -+ pci_unregister_driver(&dxgdrv.pci_drv); -+ -+ if (dxgglobal->misc_registered) -+ misc_deregister(&dxgglobal->dxgdevice); -+ -+ dxgglobal->drvdata->dxgdev = NULL; -+ -+ kfree(dxgglobal); -+ dxgglobal = NULL; -+ } -+} -+ -+static int __init dxg_drv_init(void) -+{ -+ int ret; -+ struct dxgglobal *dxgglobal = NULL; -+ -+ dxgglobal = dxgglobal_create(); -+ if (dxgglobal == NULL) { -+ pr_err("dxgglobal_init failed"); -+ ret = -ENOMEM; -+ goto error; -+ } -+ dxgglobal->drvdata = &dxgdrv; -+ -+ dxgglobal->dxgdevice.minor = MISC_DYNAMIC_MINOR; -+ dxgglobal->dxgdevice.name = "dxg"; -+ dxgglobal->dxgdevice.fops = &dxgk_fops; -+ dxgglobal->dxgdevice.mode = 0666; -+ ret = misc_register(&dxgglobal->dxgdevice); -+ if (ret) { -+ pr_err("misc_register failed: %d", ret); -+ goto error; -+ } -+ dxgglobal->misc_registered = true; -+ dxgdrv.dxgdev = dxgglobal->dxgdevice.this_device; -+ dxgdrv.dxgglobal = dxgglobal; -+ -+ ret = vmbus_driver_register(&dxgdrv.vmbus_drv); -+ if (ret) { -+ DXG_ERR("vmbus_driver_register failed: %d", ret); -+ goto error; -+ } -+ dxgglobal->vmbus_registered = true; -+ -+ ret = pci_register_driver(&dxgdrv.pci_drv); -+ if (ret) { -+ DXG_ERR("pci_driver_register failed: %d", ret); -+ goto error; -+ } -+ dxgglobal->pci_registered = true; -+ -+ return 0; -+ -+error: -+ /* This function does the cleanup */ -+ dxgglobal_destroy(dxgglobal); -+ dxgdrv.dxgglobal = NULL; -+ -+ return ret; -+} -+ -+static void __exit dxg_drv_exit(void) -+{ -+ dxgglobal_destroy(dxgdrv.dxgglobal); -+} -+ -+module_init(dxg_drv_init); -+module_exit(dxg_drv_exit); -+ -+MODULE_LICENSE("GPL"); -+MODULE_DESCRIPTION("Microsoft Dxgkrnl virtual compute device Driver"); -diff --git a/drivers/hv/dxgkrnl/dxgvmbus.c b/drivers/hv/dxgkrnl/dxgvmbus.c -new file mode 100644 -index 000000000000..111111111111 ---- /dev/null -+++ b/drivers/hv/dxgkrnl/dxgvmbus.c -@@ -0,0 +1,92 @@ -+// SPDX-License-Identifier: GPL-2.0 -+ -+/* -+ * Copyright (c) 2022, Microsoft Corporation. -+ * -+ * Author: -+ * Iouri Tarassov -+ * -+ * Dxgkrnl Graphics Driver -+ * VM bus interface implementation -+ * -+ */ -+ -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include "dxgkrnl.h" -+#include "dxgvmbus.h" -+ -+#undef pr_fmt -+#define pr_fmt(fmt) "dxgk: " fmt -+ -+#define RING_BUFSIZE (256 * 1024) -+ -+/* -+ * The structure is used to track VM bus packets, waiting for completion. -+ */ -+struct dxgvmbuspacket { -+ struct list_head packet_list_entry; -+ u64 request_id; -+ struct completion wait; -+ void *buffer; -+ u32 buffer_length; -+ int status; -+ bool completed; -+}; -+ -+int dxgvmbuschannel_init(struct dxgvmbuschannel *ch, struct hv_device *hdev) -+{ -+ int ret; -+ -+ ch->hdev = hdev; -+ spin_lock_init(&ch->packet_list_mutex); -+ INIT_LIST_HEAD(&ch->packet_list_head); -+ atomic64_set(&ch->packet_request_id, 0); -+ -+ ch->packet_cache = kmem_cache_create("DXGK packet cache", -+ sizeof(struct dxgvmbuspacket), 0, -+ 0, NULL); -+ if (ch->packet_cache == NULL) { -+ DXG_ERR("packet_cache alloc failed"); -+ ret = -ENOMEM; -+ goto cleanup; -+ } -+ -+#if LINUX_VERSION_CODE >= KERNEL_VERSION(5,15,0) -+ hdev->channel->max_pkt_size = DXG_MAX_VM_BUS_PACKET_SIZE; -+#endif -+ ret = vmbus_open(hdev->channel, RING_BUFSIZE, RING_BUFSIZE, -+ NULL, 0, dxgvmbuschannel_receive, ch); -+ if (ret) { -+ DXG_ERR("vmbus_open failed: %d", ret); -+ goto cleanup; -+ } -+ -+ ch->channel = hdev->channel; -+ -+cleanup: -+ -+ return ret; -+} -+ -+void dxgvmbuschannel_destroy(struct dxgvmbuschannel *ch) -+{ -+ kmem_cache_destroy(ch->packet_cache); -+ ch->packet_cache = NULL; -+ -+ if (ch->channel) { -+ vmbus_close(ch->channel); -+ ch->channel = NULL; -+ } -+} -+ -+/* Receive callback for messages from the host */ -+void dxgvmbuschannel_receive(void *ctx) -+{ -+} -diff --git a/drivers/hv/dxgkrnl/dxgvmbus.h b/drivers/hv/dxgkrnl/dxgvmbus.h -new file mode 100644 -index 000000000000..111111111111 ---- /dev/null -+++ b/drivers/hv/dxgkrnl/dxgvmbus.h -@@ -0,0 +1,19 @@ -+/* SPDX-License-Identifier: GPL-2.0 */ -+ -+/* -+ * Copyright (c) 2022, Microsoft Corporation. -+ * -+ * Author: -+ * Iouri Tarassov -+ * -+ * Dxgkrnl Graphics Driver -+ * VM bus interface with the host definitions -+ * -+ */ -+ -+#ifndef _DXGVMBUS_H -+#define _DXGVMBUS_H -+ -+#define DXG_MAX_VM_BUS_PACKET_SIZE (1024 * 128) -+ -+#endif /* _DXGVMBUS_H */ -diff --git a/include/uapi/misc/d3dkmthk.h b/include/uapi/misc/d3dkmthk.h -new file mode 100644 -index 000000000000..111111111111 ---- /dev/null -+++ b/include/uapi/misc/d3dkmthk.h -@@ -0,0 +1,27 @@ -+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ -+ -+/* -+ * Copyright (c) 2019, Microsoft Corporation. -+ * -+ * Author: -+ * Iouri Tarassov -+ * -+ * Dxgkrnl Graphics Driver -+ * User mode WDDM interface definitions -+ * -+ */ -+ -+#ifndef _D3DKMTHK_H -+#define _D3DKMTHK_H -+ -+/* -+ * Matches the Windows LUID definition. -+ * LUID is a locally unique identifier (similar to GUID, but not global), -+ * which is guaranteed to be unique intil the computer is rebooted. -+ */ -+struct winluid { -+ __u32 a; -+ __u32 b; -+}; -+ -+#endif /* _D3DKMTHK_H */ --- -Armbian - diff --git a/patch/kernel/archive/wsl2-arm64-6.1/1670-drivers-hv-dxgkrnl-Add-VMBus-message-support-initialize-VMBus-channels.patch b/patch/kernel/archive/wsl2-arm64-6.1/1670-drivers-hv-dxgkrnl-Add-VMBus-message-support-initialize-VMBus-channels.patch deleted file mode 100644 index 88a353371e27..000000000000 --- a/patch/kernel/archive/wsl2-arm64-6.1/1670-drivers-hv-dxgkrnl-Add-VMBus-message-support-initialize-VMBus-channels.patch +++ /dev/null @@ -1,660 +0,0 @@ -From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001 -From: Iouri Tarassov -Date: Tue, 15 Feb 2022 18:53:07 -0800 -Subject: drivers: hv: dxgkrnl: Add VMBus message support, initialize VMBus - channels. - -Implement support for sending/receiving VMBus messages between -the host and the guest. - -Initialize the VMBus channels and notify the host about IO space -settings of the VMBus global channel. - -Signed-off-by: Iouri Tarassov -[kms: Forward port to v6.1] -Signed-off-by: Kelsey Steele ---- - drivers/hv/dxgkrnl/dxgkrnl.h | 14 + - drivers/hv/dxgkrnl/dxgmodule.c | 9 +- - drivers/hv/dxgkrnl/dxgvmbus.c | 318 ++++++++++ - drivers/hv/dxgkrnl/dxgvmbus.h | 67 ++ - drivers/hv/dxgkrnl/ioctl.c | 24 + - drivers/hv/dxgkrnl/misc.h | 72 +++ - include/uapi/misc/d3dkmthk.h | 34 + - 7 files changed, 536 insertions(+), 2 deletions(-) - -diff --git a/drivers/hv/dxgkrnl/dxgkrnl.h b/drivers/hv/dxgkrnl/dxgkrnl.h -index 111111111111..222222222222 100644 ---- a/drivers/hv/dxgkrnl/dxgkrnl.h -+++ b/drivers/hv/dxgkrnl/dxgkrnl.h -@@ -28,6 +28,8 @@ - #include - #include - #include -+#include "misc.h" -+#include - - struct dxgadapter; - -@@ -100,6 +102,13 @@ static inline struct dxgglobal *dxggbl(void) - return dxgdrv.dxgglobal; - } - -+int dxgglobal_init_global_channel(void); -+void dxgglobal_destroy_global_channel(void); -+struct vmbus_channel *dxgglobal_get_vmbus(void); -+struct dxgvmbuschannel *dxgglobal_get_dxgvmbuschannel(void); -+int dxgglobal_acquire_channel_lock(void); -+void dxgglobal_release_channel_lock(void); -+ - struct dxgprocess { - /* Placeholder */ - }; -@@ -130,6 +139,11 @@ static inline void guid_to_luid(guid_t *guid, struct winluid *luid) - #define DXGK_VMBUS_INTERFACE_VERSION 40 - #define DXGK_VMBUS_LAST_COMPATIBLE_INTERFACE_VERSION 16 - -+void dxgvmb_initialize(void); -+int dxgvmb_send_set_iospace_region(u64 start, u64 len); -+ -+int ntstatus2int(struct ntstatus status); -+ - #ifdef DEBUG - - void dxgk_validate_ioctls(void); -diff --git a/drivers/hv/dxgkrnl/dxgmodule.c b/drivers/hv/dxgkrnl/dxgmodule.c -index 111111111111..222222222222 100644 ---- a/drivers/hv/dxgkrnl/dxgmodule.c -+++ b/drivers/hv/dxgkrnl/dxgmodule.c -@@ -260,6 +260,13 @@ int dxgglobal_init_global_channel(void) - goto error; - } - -+ ret = dxgvmb_send_set_iospace_region(dxgglobal->mmiospace_base, -+ dxgglobal->mmiospace_size); -+ if (ret < 0) { -+ DXG_ERR("send_set_iospace_region failed"); -+ goto error; -+ } -+ - hv_set_drvdata(dxgglobal->hdev, dxgglobal); - - error: -@@ -429,8 +436,6 @@ static void dxgglobal_destroy(struct dxgglobal *dxgglobal) - if (dxgglobal->vmbus_registered) - vmbus_driver_unregister(&dxgdrv.vmbus_drv); - -- dxgglobal_destroy_global_channel(); -- - if (dxgglobal->pci_registered) - pci_unregister_driver(&dxgdrv.pci_drv); - -diff --git a/drivers/hv/dxgkrnl/dxgvmbus.c b/drivers/hv/dxgkrnl/dxgvmbus.c -index 111111111111..222222222222 100644 ---- a/drivers/hv/dxgkrnl/dxgvmbus.c -+++ b/drivers/hv/dxgkrnl/dxgvmbus.c -@@ -40,6 +40,121 @@ struct dxgvmbuspacket { - bool completed; - }; - -+struct dxgvmb_ext_header { -+ /* Offset from the start of the message to DXGKVMB_COMMAND_BASE */ -+ u32 command_offset; -+ u32 reserved; -+ struct winluid vgpu_luid; -+}; -+ -+#define VMBUSMESSAGEONSTACK 64 -+ -+struct dxgvmbusmsg { -+/* Points to the allocated buffer */ -+ struct dxgvmb_ext_header *hdr; -+/* Points to dxgkvmb_command_vm_to_host or dxgkvmb_command_vgpu_to_host */ -+ void *msg; -+/* The vm bus channel, used to pass the message to the host */ -+ struct dxgvmbuschannel *channel; -+/* Message size in bytes including the header and the payload */ -+ u32 size; -+/* Buffer used for small messages */ -+ char msg_on_stack[VMBUSMESSAGEONSTACK]; -+}; -+ -+struct dxgvmbusmsgres { -+/* Points to the allocated buffer */ -+ struct dxgvmb_ext_header *hdr; -+/* Points to dxgkvmb_command_vm_to_host or dxgkvmb_command_vgpu_to_host */ -+ void *msg; -+/* The vm bus channel, used to pass the message to the host */ -+ struct dxgvmbuschannel *channel; -+/* Message size in bytes including the header, the payload and the result */ -+ u32 size; -+/* Result buffer size in bytes */ -+ u32 res_size; -+/* Points to the result within the allocated buffer */ -+ void *res; -+}; -+ -+static int init_message(struct dxgvmbusmsg *msg, -+ struct dxgprocess *process, u32 size) -+{ -+ struct dxgglobal *dxgglobal = dxggbl(); -+ -+ bool use_ext_header = dxgglobal->vmbus_ver >= -+ DXGK_VMBUS_INTERFACE_VERSION; -+ -+ if (use_ext_header) -+ size += sizeof(struct dxgvmb_ext_header); -+ msg->size = size; -+ if (size <= VMBUSMESSAGEONSTACK) { -+ msg->hdr = (void *)msg->msg_on_stack; -+ memset(msg->hdr, 0, size); -+ } else { -+ msg->hdr = vzalloc(size); -+ if (msg->hdr == NULL) -+ return -ENOMEM; -+ } -+ if (use_ext_header) { -+ msg->msg = (char *)&msg->hdr[1]; -+ msg->hdr->command_offset = sizeof(msg->hdr[0]); -+ } else { -+ msg->msg = (char *)msg->hdr; -+ } -+ msg->channel = &dxgglobal->channel; -+ return 0; -+} -+ -+static void free_message(struct dxgvmbusmsg *msg, struct dxgprocess *process) -+{ -+ if (msg->hdr && (char *)msg->hdr != msg->msg_on_stack) -+ vfree(msg->hdr); -+} -+ -+/* -+ * Helper functions -+ */ -+ -+int ntstatus2int(struct ntstatus status) -+{ -+ if (NT_SUCCESS(status)) -+ return (int)status.v; -+ switch (status.v) { -+ case STATUS_OBJECT_NAME_COLLISION: -+ return -EEXIST; -+ case STATUS_NO_MEMORY: -+ return -ENOMEM; -+ case STATUS_INVALID_PARAMETER: -+ return -EINVAL; -+ case STATUS_OBJECT_NAME_INVALID: -+ case STATUS_OBJECT_NAME_NOT_FOUND: -+ return -ENOENT; -+ case STATUS_TIMEOUT: -+ return -EAGAIN; -+ case STATUS_BUFFER_TOO_SMALL: -+ return -EOVERFLOW; -+ case STATUS_DEVICE_REMOVED: -+ return -ENODEV; -+ case STATUS_ACCESS_DENIED: -+ return -EACCES; -+ case STATUS_NOT_SUPPORTED: -+ return -EPERM; -+ case STATUS_ILLEGAL_INSTRUCTION: -+ return -EOPNOTSUPP; -+ case STATUS_INVALID_HANDLE: -+ return -EBADF; -+ case STATUS_GRAPHICS_ALLOCATION_BUSY: -+ return -EINPROGRESS; -+ case STATUS_OBJECT_TYPE_MISMATCH: -+ return -EPROTOTYPE; -+ case STATUS_NOT_IMPLEMENTED: -+ return -EPERM; -+ default: -+ return -EINVAL; -+ } -+} -+ - int dxgvmbuschannel_init(struct dxgvmbuschannel *ch, struct hv_device *hdev) - { - int ret; -@@ -86,7 +201,210 @@ void dxgvmbuschannel_destroy(struct dxgvmbuschannel *ch) - } - } - -+static void command_vm_to_host_init1(struct dxgkvmb_command_vm_to_host *command, -+ enum dxgkvmb_commandtype_global type) -+{ -+ command->command_type = type; -+ command->process.v = 0; -+ command->command_id = 0; -+ command->channel_type = DXGKVMB_VM_TO_HOST; -+} -+ -+static void process_inband_packet(struct dxgvmbuschannel *channel, -+ struct vmpacket_descriptor *desc) -+{ -+ u32 packet_length = hv_pkt_datalen(desc); -+ struct dxgkvmb_command_host_to_vm *packet; -+ -+ if (packet_length < sizeof(struct dxgkvmb_command_host_to_vm)) { -+ DXG_ERR("Invalid global packet"); -+ } else { -+ packet = hv_pkt_data(desc); -+ DXG_TRACE("global packet %d", -+ packet->command_type); -+ switch (packet->command_type) { -+ case DXGK_VMBCOMMAND_SIGNALGUESTEVENT: -+ case DXGK_VMBCOMMAND_SIGNALGUESTEVENTPASSIVE: -+ break; -+ case DXGK_VMBCOMMAND_SENDWNFNOTIFICATION: -+ break; -+ default: -+ DXG_ERR("unexpected host message %d", -+ packet->command_type); -+ } -+ } -+} -+ -+static void process_completion_packet(struct dxgvmbuschannel *channel, -+ struct vmpacket_descriptor *desc) -+{ -+ struct dxgvmbuspacket *packet = NULL; -+ struct dxgvmbuspacket *entry; -+ u32 packet_length = hv_pkt_datalen(desc); -+ unsigned long flags; -+ -+ spin_lock_irqsave(&channel->packet_list_mutex, flags); -+ list_for_each_entry(entry, &channel->packet_list_head, -+ packet_list_entry) { -+ if (desc->trans_id == entry->request_id) { -+ packet = entry; -+ list_del(&packet->packet_list_entry); -+ packet->completed = true; -+ break; -+ } -+ } -+ spin_unlock_irqrestore(&channel->packet_list_mutex, flags); -+ if (packet) { -+ if (packet->buffer_length) { -+ if (packet_length < packet->buffer_length) { -+ DXG_TRACE("invalid size %d Expected:%d", -+ packet_length, -+ packet->buffer_length); -+ packet->status = -EOVERFLOW; -+ } else { -+ memcpy(packet->buffer, hv_pkt_data(desc), -+ packet->buffer_length); -+ } -+ } -+ complete(&packet->wait); -+ } else { -+ DXG_ERR("did not find packet to complete"); -+ } -+} -+ - /* Receive callback for messages from the host */ - void dxgvmbuschannel_receive(void *ctx) - { -+ struct dxgvmbuschannel *channel = ctx; -+ struct vmpacket_descriptor *desc; -+ u32 packet_length = 0; -+ -+ foreach_vmbus_pkt(desc, channel->channel) { -+ packet_length = hv_pkt_datalen(desc); -+ DXG_TRACE("next packet (id, size, type): %llu %d %d", -+ desc->trans_id, packet_length, desc->type); -+ if (desc->type == VM_PKT_COMP) { -+ process_completion_packet(channel, desc); -+ } else { -+ if (desc->type != VM_PKT_DATA_INBAND) -+ DXG_ERR("unexpected packet type"); -+ else -+ process_inband_packet(channel, desc); -+ } -+ } -+} -+ -+int dxgvmb_send_sync_msg(struct dxgvmbuschannel *channel, -+ void *command, -+ u32 cmd_size, -+ void *result, -+ u32 result_size) -+{ -+ int ret; -+ struct dxgvmbuspacket *packet = NULL; -+ -+ if (cmd_size > DXG_MAX_VM_BUS_PACKET_SIZE || -+ result_size > DXG_MAX_VM_BUS_PACKET_SIZE) { -+ DXG_ERR("%s invalid data size", __func__); -+ return -EINVAL; -+ } -+ -+ packet = kmem_cache_alloc(channel->packet_cache, 0); -+ if (packet == NULL) { -+ DXG_ERR("kmem_cache_alloc failed"); -+ return -ENOMEM; -+ } -+ -+ packet->request_id = atomic64_inc_return(&channel->packet_request_id); -+ init_completion(&packet->wait); -+ packet->buffer = result; -+ packet->buffer_length = result_size; -+ packet->status = 0; -+ packet->completed = false; -+ spin_lock_irq(&channel->packet_list_mutex); -+ list_add_tail(&packet->packet_list_entry, &channel->packet_list_head); -+ spin_unlock_irq(&channel->packet_list_mutex); -+ -+ ret = vmbus_sendpacket(channel->channel, command, cmd_size, -+ packet->request_id, VM_PKT_DATA_INBAND, -+ VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED); -+ if (ret) { -+ DXG_ERR("vmbus_sendpacket failed: %x", ret); -+ spin_lock_irq(&channel->packet_list_mutex); -+ list_del(&packet->packet_list_entry); -+ spin_unlock_irq(&channel->packet_list_mutex); -+ goto cleanup; -+ } -+ -+ DXG_TRACE("waiting completion: %llu", packet->request_id); -+ ret = wait_for_completion_killable(&packet->wait); -+ if (ret) { -+ DXG_ERR("wait_for_completion failed: %x", ret); -+ spin_lock_irq(&channel->packet_list_mutex); -+ if (!packet->completed) -+ list_del(&packet->packet_list_entry); -+ spin_unlock_irq(&channel->packet_list_mutex); -+ goto cleanup; -+ } -+ DXG_TRACE("completion done: %llu %x", -+ packet->request_id, packet->status); -+ ret = packet->status; -+ -+cleanup: -+ -+ kmem_cache_free(channel->packet_cache, packet); -+ if (ret < 0) -+ DXG_TRACE("Error: %x", ret); -+ return ret; -+} -+ -+static int -+dxgvmb_send_sync_msg_ntstatus(struct dxgvmbuschannel *channel, -+ void *command, u32 cmd_size) -+{ -+ struct ntstatus status; -+ int ret; -+ -+ ret = dxgvmb_send_sync_msg(channel, command, cmd_size, -+ &status, sizeof(status)); -+ if (ret >= 0) -+ ret = ntstatus2int(status); -+ return ret; -+} -+ -+/* -+ * Global messages to the host -+ */ -+ -+int dxgvmb_send_set_iospace_region(u64 start, u64 len) -+{ -+ int ret; -+ struct dxgkvmb_command_setiospaceregion *command; -+ struct dxgvmbusmsg msg; -+ struct dxgglobal *dxgglobal = dxggbl(); -+ -+ ret = init_message(&msg, NULL, sizeof(*command)); -+ if (ret) -+ return ret; -+ command = (void *)msg.msg; -+ -+ ret = dxgglobal_acquire_channel_lock(); -+ if (ret < 0) -+ goto cleanup; -+ -+ command_vm_to_host_init1(&command->hdr, -+ DXGK_VMBCOMMAND_SETIOSPACEREGION); -+ command->start = start; -+ command->length = len; -+ ret = dxgvmb_send_sync_msg_ntstatus(&dxgglobal->channel, msg.hdr, -+ msg.size); -+ if (ret < 0) -+ DXG_ERR("send_set_iospace_region failed %x", ret); -+ -+ dxgglobal_release_channel_lock(); -+cleanup: -+ free_message(&msg, NULL); -+ if (ret) -+ DXG_TRACE("Error: %d", ret); -+ return ret; - } -diff --git a/drivers/hv/dxgkrnl/dxgvmbus.h b/drivers/hv/dxgkrnl/dxgvmbus.h -index 111111111111..222222222222 100644 ---- a/drivers/hv/dxgkrnl/dxgvmbus.h -+++ b/drivers/hv/dxgkrnl/dxgvmbus.h -@@ -16,4 +16,71 @@ - - #define DXG_MAX_VM_BUS_PACKET_SIZE (1024 * 128) - -+enum dxgkvmb_commandchanneltype { -+ DXGKVMB_VGPU_TO_HOST, -+ DXGKVMB_VM_TO_HOST, -+ DXGKVMB_HOST_TO_VM -+}; -+ -+/* -+ * -+ * Commands, sent to the host via the guest global VM bus channel -+ * DXG_GUEST_GLOBAL_VMBUS -+ * -+ */ -+ -+enum dxgkvmb_commandtype_global { -+ DXGK_VMBCOMMAND_VM_TO_HOST_FIRST = 1000, -+ DXGK_VMBCOMMAND_CREATEPROCESS = DXGK_VMBCOMMAND_VM_TO_HOST_FIRST, -+ DXGK_VMBCOMMAND_DESTROYPROCESS = 1001, -+ DXGK_VMBCOMMAND_OPENSYNCOBJECT = 1002, -+ DXGK_VMBCOMMAND_DESTROYSYNCOBJECT = 1003, -+ DXGK_VMBCOMMAND_CREATENTSHAREDOBJECT = 1004, -+ DXGK_VMBCOMMAND_DESTROYNTSHAREDOBJECT = 1005, -+ DXGK_VMBCOMMAND_SIGNALFENCE = 1006, -+ DXGK_VMBCOMMAND_NOTIFYPROCESSFREEZE = 1007, -+ DXGK_VMBCOMMAND_NOTIFYPROCESSTHAW = 1008, -+ DXGK_VMBCOMMAND_QUERYETWSESSION = 1009, -+ DXGK_VMBCOMMAND_SETIOSPACEREGION = 1010, -+ DXGK_VMBCOMMAND_COMPLETETRANSACTION = 1011, -+ DXGK_VMBCOMMAND_SHAREOBJECTWITHHOST = 1021, -+ DXGK_VMBCOMMAND_INVALID_VM_TO_HOST -+}; -+ -+/* -+ * Commands, sent by the host to the VM -+ */ -+enum dxgkvmb_commandtype_host_to_vm { -+ DXGK_VMBCOMMAND_SIGNALGUESTEVENT, -+ DXGK_VMBCOMMAND_PROPAGATEPRESENTHISTORYTOKEN, -+ DXGK_VMBCOMMAND_SETGUESTDATA, -+ DXGK_VMBCOMMAND_SIGNALGUESTEVENTPASSIVE, -+ DXGK_VMBCOMMAND_SENDWNFNOTIFICATION, -+ DXGK_VMBCOMMAND_INVALID_HOST_TO_VM -+}; -+ -+struct dxgkvmb_command_vm_to_host { -+ u64 command_id; -+ struct d3dkmthandle process; -+ enum dxgkvmb_commandchanneltype channel_type; -+ enum dxgkvmb_commandtype_global command_type; -+}; -+ -+struct dxgkvmb_command_host_to_vm { -+ u64 command_id; -+ struct d3dkmthandle process; -+ u32 channel_type : 8; -+ u32 async_msg : 1; -+ u32 reserved : 23; -+ enum dxgkvmb_commandtype_host_to_vm command_type; -+}; -+ -+/* Returns ntstatus */ -+struct dxgkvmb_command_setiospaceregion { -+ struct dxgkvmb_command_vm_to_host hdr; -+ u64 start; -+ u64 length; -+ u32 shared_page_gpadl; -+}; -+ - #endif /* _DXGVMBUS_H */ -diff --git a/drivers/hv/dxgkrnl/ioctl.c b/drivers/hv/dxgkrnl/ioctl.c -new file mode 100644 -index 000000000000..111111111111 ---- /dev/null -+++ b/drivers/hv/dxgkrnl/ioctl.c -@@ -0,0 +1,24 @@ -+// SPDX-License-Identifier: GPL-2.0 -+ -+/* -+ * Copyright (c) 2022, Microsoft Corporation. -+ * -+ * Author: -+ * Iouri Tarassov -+ * -+ * Dxgkrnl Graphics Driver -+ * Ioctl implementation -+ * -+ */ -+ -+#include -+#include -+#include -+#include -+#include -+ -+#include "dxgkrnl.h" -+#include "dxgvmbus.h" -+ -+#undef pr_fmt -+#define pr_fmt(fmt) "dxgk: " fmt -diff --git a/drivers/hv/dxgkrnl/misc.h b/drivers/hv/dxgkrnl/misc.h -new file mode 100644 -index 000000000000..111111111111 ---- /dev/null -+++ b/drivers/hv/dxgkrnl/misc.h -@@ -0,0 +1,72 @@ -+/* SPDX-License-Identifier: GPL-2.0 */ -+ -+/* -+ * Copyright (c) 2022, Microsoft Corporation. -+ * -+ * Author: -+ * Iouri Tarassov -+ * -+ * Dxgkrnl Graphics Driver -+ * Misc definitions -+ * -+ */ -+ -+#ifndef _MISC_H_ -+#define _MISC_H_ -+ -+extern const struct d3dkmthandle zerohandle; -+ -+/* -+ * Synchronization lock hierarchy. -+ * -+ * The higher enum value, the higher is the lock order. -+ * When a lower lock ois held, the higher lock should not be acquired. -+ * -+ * channel_lock -+ * device_mutex -+ */ -+ -+/* -+ * Some of the Windows return codes, which needs to be translated to Linux -+ * IOCTL return codes. Positive values are success codes and need to be -+ * returned from the driver IOCTLs. libdxcore.so depends on returning -+ * specific return codes. -+ */ -+#define STATUS_SUCCESS ((int)(0)) -+#define STATUS_OBJECT_NAME_INVALID ((int)(0xC0000033L)) -+#define STATUS_DEVICE_REMOVED ((int)(0xC00002B6L)) -+#define STATUS_INVALID_HANDLE ((int)(0xC0000008L)) -+#define STATUS_ILLEGAL_INSTRUCTION ((int)(0xC000001DL)) -+#define STATUS_NOT_IMPLEMENTED ((int)(0xC0000002L)) -+#define STATUS_PENDING ((int)(0x00000103L)) -+#define STATUS_ACCESS_DENIED ((int)(0xC0000022L)) -+#define STATUS_BUFFER_TOO_SMALL ((int)(0xC0000023L)) -+#define STATUS_OBJECT_TYPE_MISMATCH ((int)(0xC0000024L)) -+#define STATUS_GRAPHICS_ALLOCATION_BUSY ((int)(0xC01E0102L)) -+#define STATUS_NOT_SUPPORTED ((int)(0xC00000BBL)) -+#define STATUS_TIMEOUT ((int)(0x00000102L)) -+#define STATUS_INVALID_PARAMETER ((int)(0xC000000DL)) -+#define STATUS_NO_MEMORY ((int)(0xC0000017L)) -+#define STATUS_OBJECT_NAME_COLLISION ((int)(0xC0000035L)) -+#define STATUS_OBJECT_NAME_NOT_FOUND ((int)(0xC0000034L)) -+ -+ -+#define NT_SUCCESS(status) (status.v >= 0) -+ -+#ifndef DEBUG -+ -+#define DXGKRNL_ASSERT(exp) -+ -+#else -+ -+#define DXGKRNL_ASSERT(exp) \ -+do { \ -+ if (!(exp)) { \ -+ dump_stack(); \ -+ BUG_ON(true); \ -+ } \ -+} while (0) -+ -+#endif /* DEBUG */ -+ -+#endif /* _MISC_H_ */ -diff --git a/include/uapi/misc/d3dkmthk.h b/include/uapi/misc/d3dkmthk.h -index 111111111111..222222222222 100644 ---- a/include/uapi/misc/d3dkmthk.h -+++ b/include/uapi/misc/d3dkmthk.h -@@ -14,6 +14,40 @@ - #ifndef _D3DKMTHK_H - #define _D3DKMTHK_H - -+/* -+ * This structure matches the definition of D3DKMTHANDLE in Windows. -+ * The handle is opaque in user mode. It is used by user mode applications to -+ * represent kernel mode objects, created by dxgkrnl. -+ */ -+struct d3dkmthandle { -+ union { -+ struct { -+ __u32 instance : 6; -+ __u32 index : 24; -+ __u32 unique : 2; -+ }; -+ __u32 v; -+ }; -+}; -+ -+/* -+ * VM bus messages return Windows' NTSTATUS, which is integer and only negative -+ * value indicates a failure. A positive number is a success and needs to be -+ * returned to user mode as the IOCTL return code. Negative status codes are -+ * converted to Linux error codes. -+ */ -+struct ntstatus { -+ union { -+ struct { -+ int code : 16; -+ int facility : 13; -+ int customer : 1; -+ int severity : 2; -+ }; -+ int v; -+ }; -+}; -+ - /* - * Matches the Windows LUID definition. - * LUID is a locally unique identifier (similar to GUID, but not global), --- -Armbian - diff --git a/patch/kernel/archive/wsl2-arm64-6.1/1671-drivers-hv-dxgkrnl-Creation-of-dxgadapter-object.patch b/patch/kernel/archive/wsl2-arm64-6.1/1671-drivers-hv-dxgkrnl-Creation-of-dxgadapter-object.patch deleted file mode 100644 index 90df8654ad1b..000000000000 --- a/patch/kernel/archive/wsl2-arm64-6.1/1671-drivers-hv-dxgkrnl-Creation-of-dxgadapter-object.patch +++ /dev/null @@ -1,1160 +0,0 @@ -From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001 -From: Iouri Tarassov -Date: Tue, 15 Feb 2022 19:00:38 -0800 -Subject: drivers: hv: dxgkrnl: Creation of dxgadapter object - -Handle creation and destruction of dxgadapter object, which -represents a virtual compute device, projected to the VM by -the host. The dxgadapter object is created when the -corresponding VMBus channel is offered by Hyper-V. - -There could be multiple virtual compute device objects, projected -by the host to VM. They are enumerated by issuing IOCTLs to -the /dev/dxg device. - -The adapter object can start functioning only when the global VMBus -channel and the corresponding per device VMBus channel are -initialized. Notifications about arrival of a virtual compute PCI -device and VMBus channels can happen in any order. Therefore, -the initial dxgadapter object state is DXGADAPTER_STATE_WAITING_VMBUS. -A list of VMBus channels and a list of waiting dxgadapter objects -are maintained. When dxgkrnl is notified about a VMBus channel -arrival, if tries to start all adapters, which are not started yet. - -Properties of the adapter object are determined by sending VMBus -messages to the host to the corresponding VMBus channel. - -When the per virtual compute device VMBus channel or the global -channel are destroyed, the adapter object is destroyed. - -Signed-off-by: Iouri Tarassov -[kms: Forward port to v6.1] -Signed-off-by: Kelsey Steele ---- - drivers/hv/dxgkrnl/Makefile | 2 +- - drivers/hv/dxgkrnl/dxgadapter.c | 170 ++++++++ - drivers/hv/dxgkrnl/dxgkrnl.h | 85 ++++ - drivers/hv/dxgkrnl/dxgmodule.c | 204 ++++++++- - drivers/hv/dxgkrnl/dxgvmbus.c | 217 +++++++++- - drivers/hv/dxgkrnl/dxgvmbus.h | 128 ++++++ - drivers/hv/dxgkrnl/misc.c | 37 ++ - drivers/hv/dxgkrnl/misc.h | 24 +- - 8 files changed, 844 insertions(+), 23 deletions(-) - -diff --git a/drivers/hv/dxgkrnl/Makefile b/drivers/hv/dxgkrnl/Makefile -index 111111111111..222222222222 100644 ---- a/drivers/hv/dxgkrnl/Makefile -+++ b/drivers/hv/dxgkrnl/Makefile -@@ -2,4 +2,4 @@ - # Makefile for the hyper-v compute device driver (dxgkrnl). - - obj-$(CONFIG_DXGKRNL) += dxgkrnl.o --dxgkrnl-y := dxgmodule.o dxgvmbus.o -+dxgkrnl-y := dxgmodule.o misc.o dxgadapter.o ioctl.o dxgvmbus.o -diff --git a/drivers/hv/dxgkrnl/dxgadapter.c b/drivers/hv/dxgkrnl/dxgadapter.c -new file mode 100644 -index 000000000000..111111111111 ---- /dev/null -+++ b/drivers/hv/dxgkrnl/dxgadapter.c -@@ -0,0 +1,170 @@ -+// SPDX-License-Identifier: GPL-2.0 -+ -+/* -+ * Copyright (c) 2022, Microsoft Corporation. -+ * -+ * Author: -+ * Iouri Tarassov -+ * -+ * Dxgkrnl Graphics Driver -+ * Implementation of dxgadapter and its objects -+ * -+ */ -+ -+#include -+#include -+#include -+#include -+ -+#include "dxgkrnl.h" -+ -+#undef pr_fmt -+#define pr_fmt(fmt) "dxgk: " fmt -+ -+int dxgadapter_set_vmbus(struct dxgadapter *adapter, struct hv_device *hdev) -+{ -+ int ret; -+ -+ guid_to_luid(&hdev->channel->offermsg.offer.if_instance, -+ &adapter->luid); -+ DXG_TRACE("%x:%x %p %pUb", -+ adapter->luid.b, adapter->luid.a, hdev->channel, -+ &hdev->channel->offermsg.offer.if_instance); -+ -+ ret = dxgvmbuschannel_init(&adapter->channel, hdev); -+ if (ret) -+ goto cleanup; -+ -+ adapter->channel.adapter = adapter; -+ adapter->hv_dev = hdev; -+ -+ ret = dxgvmb_send_open_adapter(adapter); -+ if (ret < 0) { -+ DXG_ERR("dxgvmb_send_open_adapter failed: %d", ret); -+ goto cleanup; -+ } -+ -+ ret = dxgvmb_send_get_internal_adapter_info(adapter); -+ -+cleanup: -+ if (ret) -+ DXG_ERR("Failed to set vmbus: %d", ret); -+ return ret; -+} -+ -+void dxgadapter_start(struct dxgadapter *adapter) -+{ -+ struct dxgvgpuchannel *ch = NULL; -+ struct dxgvgpuchannel *entry; -+ int ret; -+ struct dxgglobal *dxgglobal = dxggbl(); -+ -+ DXG_TRACE("%x-%x", adapter->luid.a, adapter->luid.b); -+ -+ /* Find the corresponding vGPU vm bus channel */ -+ list_for_each_entry(entry, &dxgglobal->vgpu_ch_list_head, -+ vgpu_ch_list_entry) { -+ if (memcmp(&adapter->luid, -+ &entry->adapter_luid, -+ sizeof(struct winluid)) == 0) { -+ ch = entry; -+ break; -+ } -+ } -+ if (ch == NULL) { -+ DXG_TRACE("vGPU chanel is not ready"); -+ return; -+ } -+ -+ /* The global channel is initialized when the first adapter starts */ -+ if (!dxgglobal->global_channel_initialized) { -+ ret = dxgglobal_init_global_channel(); -+ if (ret) { -+ dxgglobal_destroy_global_channel(); -+ return; -+ } -+ dxgglobal->global_channel_initialized = true; -+ } -+ -+ /* Initialize vGPU vm bus channel */ -+ ret = dxgadapter_set_vmbus(adapter, ch->hdev); -+ if (ret) { -+ DXG_ERR("Failed to start adapter %p", adapter); -+ adapter->adapter_state = DXGADAPTER_STATE_STOPPED; -+ return; -+ } -+ -+ adapter->adapter_state = DXGADAPTER_STATE_ACTIVE; -+ DXG_TRACE("Adapter started %p", adapter); -+} -+ -+void dxgadapter_stop(struct dxgadapter *adapter) -+{ -+ bool adapter_stopped = false; -+ -+ down_write(&adapter->core_lock); -+ if (!adapter->stopping_adapter) -+ adapter->stopping_adapter = true; -+ else -+ adapter_stopped = true; -+ up_write(&adapter->core_lock); -+ -+ if (adapter_stopped) -+ return; -+ -+ if (dxgadapter_acquire_lock_exclusive(adapter) == 0) { -+ dxgvmb_send_close_adapter(adapter); -+ dxgadapter_release_lock_exclusive(adapter); -+ } -+ dxgvmbuschannel_destroy(&adapter->channel); -+ -+ adapter->adapter_state = DXGADAPTER_STATE_STOPPED; -+} -+ -+void dxgadapter_release(struct kref *refcount) -+{ -+ struct dxgadapter *adapter; -+ -+ adapter = container_of(refcount, struct dxgadapter, adapter_kref); -+ DXG_TRACE("%p", adapter); -+ kfree(adapter); -+} -+ -+bool dxgadapter_is_active(struct dxgadapter *adapter) -+{ -+ return adapter->adapter_state == DXGADAPTER_STATE_ACTIVE; -+} -+ -+int dxgadapter_acquire_lock_exclusive(struct dxgadapter *adapter) -+{ -+ down_write(&adapter->core_lock); -+ if (adapter->adapter_state != DXGADAPTER_STATE_ACTIVE) { -+ dxgadapter_release_lock_exclusive(adapter); -+ return -ENODEV; -+ } -+ return 0; -+} -+ -+void dxgadapter_acquire_lock_forced(struct dxgadapter *adapter) -+{ -+ down_write(&adapter->core_lock); -+} -+ -+void dxgadapter_release_lock_exclusive(struct dxgadapter *adapter) -+{ -+ up_write(&adapter->core_lock); -+} -+ -+int dxgadapter_acquire_lock_shared(struct dxgadapter *adapter) -+{ -+ down_read(&adapter->core_lock); -+ if (adapter->adapter_state == DXGADAPTER_STATE_ACTIVE) -+ return 0; -+ dxgadapter_release_lock_shared(adapter); -+ return -ENODEV; -+} -+ -+void dxgadapter_release_lock_shared(struct dxgadapter *adapter) -+{ -+ up_read(&adapter->core_lock); -+} -diff --git a/drivers/hv/dxgkrnl/dxgkrnl.h b/drivers/hv/dxgkrnl/dxgkrnl.h -index 111111111111..222222222222 100644 ---- a/drivers/hv/dxgkrnl/dxgkrnl.h -+++ b/drivers/hv/dxgkrnl/dxgkrnl.h -@@ -47,9 +47,39 @@ extern struct dxgdriver dxgdrv; - - #define DXGDEV dxgdrv.dxgdev - -+struct dxgk_device_types { -+ u32 post_device:1; -+ u32 post_device_certain:1; -+ u32 software_device:1; -+ u32 soft_gpu_device:1; -+ u32 warp_device:1; -+ u32 bdd_device:1; -+ u32 support_miracast:1; -+ u32 mismatched_lda:1; -+ u32 indirect_display_device:1; -+ u32 xbox_one_device:1; -+ u32 child_id_support_dwm_clone:1; -+ u32 child_id_support_dwm_clone2:1; -+ u32 has_internal_panel:1; -+ u32 rfx_vgpu_device:1; -+ u32 virtual_render_device:1; -+ u32 support_preserve_boot_display:1; -+ u32 is_uefi_frame_buffer:1; -+ u32 removable_device:1; -+ u32 virtual_monitor_device:1; -+}; -+ -+enum dxgobjectstate { -+ DXGOBJECTSTATE_CREATED, -+ DXGOBJECTSTATE_ACTIVE, -+ DXGOBJECTSTATE_STOPPED, -+ DXGOBJECTSTATE_DESTROYED, -+}; -+ - struct dxgvmbuschannel { - struct vmbus_channel *channel; - struct hv_device *hdev; -+ struct dxgadapter *adapter; - spinlock_t packet_list_mutex; - struct list_head packet_list_head; - struct kmem_cache *packet_cache; -@@ -81,6 +111,10 @@ struct dxgglobal { - struct miscdevice dxgdevice; - struct mutex device_mutex; - -+ /* list of created adapters */ -+ struct list_head adapter_list_head; -+ struct rw_semaphore adapter_list_lock; -+ - /* - * List of the vGPU VM bus channels (dxgvgpuchannel) - * Protected by device_mutex -@@ -102,6 +136,10 @@ static inline struct dxgglobal *dxggbl(void) - return dxgdrv.dxgglobal; - } - -+int dxgglobal_create_adapter(struct pci_dev *dev, guid_t *guid, -+ struct winluid host_vgpu_luid); -+void dxgglobal_acquire_adapter_list_lock(enum dxglockstate state); -+void dxgglobal_release_adapter_list_lock(enum dxglockstate state); - int dxgglobal_init_global_channel(void); - void dxgglobal_destroy_global_channel(void); - struct vmbus_channel *dxgglobal_get_vmbus(void); -@@ -113,6 +151,47 @@ struct dxgprocess { - /* Placeholder */ - }; - -+enum dxgadapter_state { -+ DXGADAPTER_STATE_ACTIVE = 0, -+ DXGADAPTER_STATE_STOPPED = 1, -+ DXGADAPTER_STATE_WAITING_VMBUS = 2, -+}; -+ -+/* -+ * This object represents the grapchis adapter. -+ * Objects, which take reference on the adapter: -+ * - dxgglobal -+ * - adapter handle (struct d3dkmthandle) -+ */ -+struct dxgadapter { -+ struct rw_semaphore core_lock; -+ struct kref adapter_kref; -+ /* Entry in the list of adapters in dxgglobal */ -+ struct list_head adapter_list_entry; -+ struct pci_dev *pci_dev; -+ struct hv_device *hv_dev; -+ struct dxgvmbuschannel channel; -+ struct d3dkmthandle host_handle; -+ enum dxgadapter_state adapter_state; -+ struct winluid host_adapter_luid; -+ struct winluid host_vgpu_luid; -+ struct winluid luid; /* VM bus channel luid */ -+ u16 device_description[80]; -+ u16 device_instance_id[WIN_MAX_PATH]; -+ bool stopping_adapter; -+}; -+ -+int dxgadapter_set_vmbus(struct dxgadapter *adapter, struct hv_device *hdev); -+bool dxgadapter_is_active(struct dxgadapter *adapter); -+void dxgadapter_start(struct dxgadapter *adapter); -+void dxgadapter_stop(struct dxgadapter *adapter); -+void dxgadapter_release(struct kref *refcount); -+int dxgadapter_acquire_lock_shared(struct dxgadapter *adapter); -+void dxgadapter_release_lock_shared(struct dxgadapter *adapter); -+int dxgadapter_acquire_lock_exclusive(struct dxgadapter *adapter); -+void dxgadapter_acquire_lock_forced(struct dxgadapter *adapter); -+void dxgadapter_release_lock_exclusive(struct dxgadapter *adapter); -+ - /* - * The convention is that VNBus instance id is a GUID, but the host sets - * the lower part of the value to the host adapter LUID. The function -@@ -141,6 +220,12 @@ static inline void guid_to_luid(guid_t *guid, struct winluid *luid) - - void dxgvmb_initialize(void); - int dxgvmb_send_set_iospace_region(u64 start, u64 len); -+int dxgvmb_send_open_adapter(struct dxgadapter *adapter); -+int dxgvmb_send_close_adapter(struct dxgadapter *adapter); -+int dxgvmb_send_get_internal_adapter_info(struct dxgadapter *adapter); -+int dxgvmb_send_async_msg(struct dxgvmbuschannel *channel, -+ void *command, -+ u32 cmd_size); - - int ntstatus2int(struct ntstatus status); - -diff --git a/drivers/hv/dxgkrnl/dxgmodule.c b/drivers/hv/dxgkrnl/dxgmodule.c -index 111111111111..222222222222 100644 ---- a/drivers/hv/dxgkrnl/dxgmodule.c -+++ b/drivers/hv/dxgkrnl/dxgmodule.c -@@ -55,6 +55,156 @@ void dxgglobal_release_channel_lock(void) - up_read(&dxggbl()->channel_lock); - } - -+void dxgglobal_acquire_adapter_list_lock(enum dxglockstate state) -+{ -+ struct dxgglobal *dxgglobal = dxggbl(); -+ -+ if (state == DXGLOCK_EXCL) -+ down_write(&dxgglobal->adapter_list_lock); -+ else -+ down_read(&dxgglobal->adapter_list_lock); -+} -+ -+void dxgglobal_release_adapter_list_lock(enum dxglockstate state) -+{ -+ struct dxgglobal *dxgglobal = dxggbl(); -+ -+ if (state == DXGLOCK_EXCL) -+ up_write(&dxgglobal->adapter_list_lock); -+ else -+ up_read(&dxgglobal->adapter_list_lock); -+} -+ -+/* -+ * Returns a pointer to dxgadapter object, which corresponds to the given PCI -+ * device, or NULL. -+ */ -+static struct dxgadapter *find_pci_adapter(struct pci_dev *dev) -+{ -+ struct dxgadapter *entry; -+ struct dxgadapter *adapter = NULL; -+ struct dxgglobal *dxgglobal = dxggbl(); -+ -+ dxgglobal_acquire_adapter_list_lock(DXGLOCK_EXCL); -+ -+ list_for_each_entry(entry, &dxgglobal->adapter_list_head, -+ adapter_list_entry) { -+ if (dev == entry->pci_dev) { -+ adapter = entry; -+ break; -+ } -+ } -+ -+ dxgglobal_release_adapter_list_lock(DXGLOCK_EXCL); -+ return adapter; -+} -+ -+/* -+ * Returns a pointer to dxgadapter object, which has the givel LUID -+ * device, or NULL. -+ */ -+static struct dxgadapter *find_adapter(struct winluid *luid) -+{ -+ struct dxgadapter *entry; -+ struct dxgadapter *adapter = NULL; -+ struct dxgglobal *dxgglobal = dxggbl(); -+ -+ dxgglobal_acquire_adapter_list_lock(DXGLOCK_EXCL); -+ -+ list_for_each_entry(entry, &dxgglobal->adapter_list_head, -+ adapter_list_entry) { -+ if (memcmp(luid, &entry->luid, sizeof(struct winluid)) == 0) { -+ adapter = entry; -+ break; -+ } -+ } -+ -+ dxgglobal_release_adapter_list_lock(DXGLOCK_EXCL); -+ return adapter; -+} -+ -+/* -+ * Creates a new dxgadapter object, which represents a virtual GPU, projected -+ * by the host. -+ * The adapter is in the waiting state. It will become active when the global -+ * VM bus channel and the adapter VM bus channel are created. -+ */ -+int dxgglobal_create_adapter(struct pci_dev *dev, guid_t *guid, -+ struct winluid host_vgpu_luid) -+{ -+ struct dxgadapter *adapter; -+ int ret = 0; -+ struct dxgglobal *dxgglobal = dxggbl(); -+ -+ adapter = kzalloc(sizeof(struct dxgadapter), GFP_KERNEL); -+ if (adapter == NULL) { -+ ret = -ENOMEM; -+ goto cleanup; -+ } -+ -+ adapter->adapter_state = DXGADAPTER_STATE_WAITING_VMBUS; -+ adapter->host_vgpu_luid = host_vgpu_luid; -+ kref_init(&adapter->adapter_kref); -+ init_rwsem(&adapter->core_lock); -+ -+ adapter->pci_dev = dev; -+ guid_to_luid(guid, &adapter->luid); -+ -+ dxgglobal_acquire_adapter_list_lock(DXGLOCK_EXCL); -+ -+ list_add_tail(&adapter->adapter_list_entry, -+ &dxgglobal->adapter_list_head); -+ dxgglobal->num_adapters++; -+ dxgglobal_release_adapter_list_lock(DXGLOCK_EXCL); -+ -+ DXG_TRACE("new adapter added %p %x-%x", adapter, -+ adapter->luid.a, adapter->luid.b); -+cleanup: -+ return ret; -+} -+ -+/* -+ * Attempts to start dxgadapter objects, which are not active yet. -+ */ -+static void dxgglobal_start_adapters(void) -+{ -+ struct dxgadapter *adapter; -+ struct dxgglobal *dxgglobal = dxggbl(); -+ -+ if (dxgglobal->hdev == NULL) { -+ DXG_TRACE("Global channel is not ready"); -+ return; -+ } -+ dxgglobal_acquire_adapter_list_lock(DXGLOCK_EXCL); -+ list_for_each_entry(adapter, &dxgglobal->adapter_list_head, -+ adapter_list_entry) { -+ if (adapter->adapter_state == DXGADAPTER_STATE_WAITING_VMBUS) -+ dxgadapter_start(adapter); -+ } -+ dxgglobal_release_adapter_list_lock(DXGLOCK_EXCL); -+} -+ -+/* -+ * Stopsthe active dxgadapter objects. -+ */ -+static void dxgglobal_stop_adapters(void) -+{ -+ struct dxgadapter *adapter; -+ struct dxgglobal *dxgglobal = dxggbl(); -+ -+ if (dxgglobal->hdev == NULL) { -+ DXG_TRACE("Global channel is not ready"); -+ return; -+ } -+ dxgglobal_acquire_adapter_list_lock(DXGLOCK_EXCL); -+ list_for_each_entry(adapter, &dxgglobal->adapter_list_head, -+ adapter_list_entry) { -+ if (adapter->adapter_state == DXGADAPTER_STATE_ACTIVE) -+ dxgadapter_stop(adapter); -+ } -+ dxgglobal_release_adapter_list_lock(DXGLOCK_EXCL); -+} -+ - const struct file_operations dxgk_fops = { - .owner = THIS_MODULE, - }; -@@ -182,6 +332,15 @@ static int dxg_pci_probe_device(struct pci_dev *dev, - DXG_TRACE("Vmbus interface version: %d", dxgglobal->vmbus_ver); - DXG_TRACE("Host luid: %x-%x", vgpu_luid.b, vgpu_luid.a); - -+ /* Create new virtual GPU adapter */ -+ ret = dxgglobal_create_adapter(dev, &guid, vgpu_luid); -+ if (ret) -+ goto cleanup; -+ -+ /* Attempt to start the adapter in case VM bus channels are created */ -+ -+ dxgglobal_start_adapters(); -+ - cleanup: - - mutex_unlock(&dxgglobal->device_mutex); -@@ -193,7 +352,25 @@ static int dxg_pci_probe_device(struct pci_dev *dev, - - static void dxg_pci_remove_device(struct pci_dev *dev) - { -- /* Placeholder */ -+ struct dxgadapter *adapter; -+ struct dxgglobal *dxgglobal = dxggbl(); -+ -+ mutex_lock(&dxgglobal->device_mutex); -+ -+ adapter = find_pci_adapter(dev); -+ if (adapter) { -+ dxgglobal_acquire_adapter_list_lock(DXGLOCK_EXCL); -+ list_del(&adapter->adapter_list_entry); -+ dxgglobal->num_adapters--; -+ dxgglobal_release_adapter_list_lock(DXGLOCK_EXCL); -+ -+ dxgadapter_stop(adapter); -+ kref_put(&adapter->adapter_kref, dxgadapter_release); -+ } else { -+ DXG_ERR("Failed to find dxgadapter for pcidev"); -+ } -+ -+ mutex_unlock(&dxgglobal->device_mutex); - } - - static struct pci_device_id dxg_pci_id_table[] = { -@@ -297,6 +474,25 @@ void dxgglobal_destroy_global_channel(void) - up_write(&dxgglobal->channel_lock); - } - -+static void dxgglobal_stop_adapter_vmbus(struct hv_device *hdev) -+{ -+ struct dxgadapter *adapter = NULL; -+ struct winluid luid; -+ -+ guid_to_luid(&hdev->channel->offermsg.offer.if_instance, &luid); -+ -+ DXG_TRACE("Stopping adapter %x:%x", luid.b, luid.a); -+ -+ adapter = find_adapter(&luid); -+ -+ if (adapter && adapter->adapter_state == DXGADAPTER_STATE_ACTIVE) { -+ down_write(&adapter->core_lock); -+ dxgvmbuschannel_destroy(&adapter->channel); -+ adapter->adapter_state = DXGADAPTER_STATE_STOPPED; -+ up_write(&adapter->core_lock); -+ } -+} -+ - static const struct hv_vmbus_device_id dxg_vmbus_id_table[] = { - /* Per GPU Device GUID */ - { HV_GPUP_DXGK_VGPU_GUID }, -@@ -329,6 +525,7 @@ static int dxg_probe_vmbus(struct hv_device *hdev, - vgpuch->hdev = hdev; - list_add_tail(&vgpuch->vgpu_ch_list_entry, - &dxgglobal->vgpu_ch_list_head); -+ dxgglobal_start_adapters(); - } else if (uuid_le_cmp(hdev->dev_type, - dxg_vmbus_id_table[1].guid) == 0) { - /* This is the global Dxgkgnl channel */ -@@ -341,6 +538,7 @@ static int dxg_probe_vmbus(struct hv_device *hdev, - goto error; - } - dxgglobal->hdev = hdev; -+ dxgglobal_start_adapters(); - } else { - /* Unknown device type */ - DXG_ERR("Unknown VM bus device type"); -@@ -364,6 +562,7 @@ static int dxg_remove_vmbus(struct hv_device *hdev) - - if (uuid_le_cmp(hdev->dev_type, dxg_vmbus_id_table[0].guid) == 0) { - DXG_TRACE("Remove virtual GPU channel"); -+ dxgglobal_stop_adapter_vmbus(hdev); - list_for_each_entry(vgpu_channel, - &dxgglobal->vgpu_ch_list_head, - vgpu_ch_list_entry) { -@@ -420,6 +619,8 @@ static struct dxgglobal *dxgglobal_create(void) - mutex_init(&dxgglobal->device_mutex); - - INIT_LIST_HEAD(&dxgglobal->vgpu_ch_list_head); -+ INIT_LIST_HEAD(&dxgglobal->adapter_list_head); -+ init_rwsem(&dxgglobal->adapter_list_lock); - - init_rwsem(&dxgglobal->channel_lock); - -@@ -430,6 +631,7 @@ static void dxgglobal_destroy(struct dxgglobal *dxgglobal) - { - if (dxgglobal) { - mutex_lock(&dxgglobal->device_mutex); -+ dxgglobal_stop_adapters(); - dxgglobal_destroy_global_channel(); - mutex_unlock(&dxgglobal->device_mutex); - -diff --git a/drivers/hv/dxgkrnl/dxgvmbus.c b/drivers/hv/dxgkrnl/dxgvmbus.c -index 111111111111..222222222222 100644 ---- a/drivers/hv/dxgkrnl/dxgvmbus.c -+++ b/drivers/hv/dxgkrnl/dxgvmbus.c -@@ -77,7 +77,7 @@ struct dxgvmbusmsgres { - void *res; - }; - --static int init_message(struct dxgvmbusmsg *msg, -+static int init_message(struct dxgvmbusmsg *msg, struct dxgadapter *adapter, - struct dxgprocess *process, u32 size) - { - struct dxgglobal *dxgglobal = dxggbl(); -@@ -99,10 +99,15 @@ static int init_message(struct dxgvmbusmsg *msg, - if (use_ext_header) { - msg->msg = (char *)&msg->hdr[1]; - msg->hdr->command_offset = sizeof(msg->hdr[0]); -+ if (adapter) -+ msg->hdr->vgpu_luid = adapter->host_vgpu_luid; - } else { - msg->msg = (char *)msg->hdr; - } -- msg->channel = &dxgglobal->channel; -+ if (adapter && !dxgglobal->async_msg_enabled) -+ msg->channel = &adapter->channel; -+ else -+ msg->channel = &dxgglobal->channel; - return 0; - } - -@@ -116,6 +121,37 @@ static void free_message(struct dxgvmbusmsg *msg, struct dxgprocess *process) - * Helper functions - */ - -+static void command_vm_to_host_init2(struct dxgkvmb_command_vm_to_host *command, -+ enum dxgkvmb_commandtype_global t, -+ struct d3dkmthandle process) -+{ -+ command->command_type = t; -+ command->process = process; -+ command->command_id = 0; -+ command->channel_type = DXGKVMB_VM_TO_HOST; -+} -+ -+static void command_vgpu_to_host_init1(struct dxgkvmb_command_vgpu_to_host -+ *command, -+ enum dxgkvmb_commandtype type) -+{ -+ command->command_type = type; -+ command->process.v = 0; -+ command->command_id = 0; -+ command->channel_type = DXGKVMB_VGPU_TO_HOST; -+} -+ -+static void command_vgpu_to_host_init2(struct dxgkvmb_command_vgpu_to_host -+ *command, -+ enum dxgkvmb_commandtype type, -+ struct d3dkmthandle process) -+{ -+ command->command_type = type; -+ command->process = process; -+ command->command_id = 0; -+ command->channel_type = DXGKVMB_VGPU_TO_HOST; -+} -+ - int ntstatus2int(struct ntstatus status) - { - if (NT_SUCCESS(status)) -@@ -216,22 +252,26 @@ static void process_inband_packet(struct dxgvmbuschannel *channel, - u32 packet_length = hv_pkt_datalen(desc); - struct dxgkvmb_command_host_to_vm *packet; - -- if (packet_length < sizeof(struct dxgkvmb_command_host_to_vm)) { -- DXG_ERR("Invalid global packet"); -- } else { -- packet = hv_pkt_data(desc); -- DXG_TRACE("global packet %d", -- packet->command_type); -- switch (packet->command_type) { -- case DXGK_VMBCOMMAND_SIGNALGUESTEVENT: -- case DXGK_VMBCOMMAND_SIGNALGUESTEVENTPASSIVE: -- break; -- case DXGK_VMBCOMMAND_SENDWNFNOTIFICATION: -- break; -- default: -- DXG_ERR("unexpected host message %d", -+ if (channel->adapter == NULL) { -+ if (packet_length < sizeof(struct dxgkvmb_command_host_to_vm)) { -+ DXG_ERR("Invalid global packet"); -+ } else { -+ packet = hv_pkt_data(desc); -+ DXG_TRACE("global packet %d", - packet->command_type); -+ switch (packet->command_type) { -+ case DXGK_VMBCOMMAND_SIGNALGUESTEVENT: -+ case DXGK_VMBCOMMAND_SIGNALGUESTEVENTPASSIVE: -+ break; -+ case DXGK_VMBCOMMAND_SENDWNFNOTIFICATION: -+ break; -+ default: -+ DXG_ERR("unexpected host message %d", -+ packet->command_type); -+ } - } -+ } else { -+ DXG_ERR("Unexpected packet for adapter channel"); - } - } - -@@ -279,6 +319,7 @@ void dxgvmbuschannel_receive(void *ctx) - struct vmpacket_descriptor *desc; - u32 packet_length = 0; - -+ DXG_TRACE("New adapter message: %p", channel->adapter); - foreach_vmbus_pkt(desc, channel->channel) { - packet_length = hv_pkt_datalen(desc); - DXG_TRACE("next packet (id, size, type): %llu %d %d", -@@ -302,6 +343,8 @@ int dxgvmb_send_sync_msg(struct dxgvmbuschannel *channel, - { - int ret; - struct dxgvmbuspacket *packet = NULL; -+ struct dxgkvmb_command_vm_to_host *cmd1; -+ struct dxgkvmb_command_vgpu_to_host *cmd2; - - if (cmd_size > DXG_MAX_VM_BUS_PACKET_SIZE || - result_size > DXG_MAX_VM_BUS_PACKET_SIZE) { -@@ -315,6 +358,16 @@ int dxgvmb_send_sync_msg(struct dxgvmbuschannel *channel, - return -ENOMEM; - } - -+ if (channel->adapter == NULL) { -+ cmd1 = command; -+ DXG_TRACE("send_sync_msg global: %d %p %d %d", -+ cmd1->command_type, command, cmd_size, result_size); -+ } else { -+ cmd2 = command; -+ DXG_TRACE("send_sync_msg adapter: %d %p %d %d", -+ cmd2->command_type, command, cmd_size, result_size); -+ } -+ - packet->request_id = atomic64_inc_return(&channel->packet_request_id); - init_completion(&packet->wait); - packet->buffer = result; -@@ -358,6 +411,41 @@ int dxgvmb_send_sync_msg(struct dxgvmbuschannel *channel, - return ret; - } - -+int dxgvmb_send_async_msg(struct dxgvmbuschannel *channel, -+ void *command, -+ u32 cmd_size) -+{ -+ int ret; -+ int try_count = 0; -+ -+ if (cmd_size > DXG_MAX_VM_BUS_PACKET_SIZE) { -+ DXG_ERR("%s invalid data size", __func__); -+ return -EINVAL; -+ } -+ -+ if (channel->adapter) { -+ DXG_ERR("Async message sent to the adapter channel"); -+ return -EINVAL; -+ } -+ -+ do { -+ ret = vmbus_sendpacket(channel->channel, command, cmd_size, -+ 0, VM_PKT_DATA_INBAND, 0); -+ /* -+ * -EAGAIN is returned when the VM bus ring buffer if full. -+ * Wait 2ms to allow the host to process messages and try again. -+ */ -+ if (ret == -EAGAIN) { -+ usleep_range(1000, 2000); -+ try_count++; -+ } -+ } while (ret == -EAGAIN && try_count < 5000); -+ if (ret < 0) -+ DXG_ERR("vmbus_sendpacket failed: %x", ret); -+ -+ return ret; -+} -+ - static int - dxgvmb_send_sync_msg_ntstatus(struct dxgvmbuschannel *channel, - void *command, u32 cmd_size) -@@ -383,7 +471,7 @@ int dxgvmb_send_set_iospace_region(u64 start, u64 len) - struct dxgvmbusmsg msg; - struct dxgglobal *dxgglobal = dxggbl(); - -- ret = init_message(&msg, NULL, sizeof(*command)); -+ ret = init_message(&msg, NULL, NULL, sizeof(*command)); - if (ret) - return ret; - command = (void *)msg.msg; -@@ -408,3 +496,98 @@ int dxgvmb_send_set_iospace_region(u64 start, u64 len) - DXG_TRACE("Error: %d", ret); - return ret; - } -+ -+/* -+ * Virtual GPU messages to the host -+ */ -+ -+int dxgvmb_send_open_adapter(struct dxgadapter *adapter) -+{ -+ int ret; -+ struct dxgkvmb_command_openadapter *command; -+ struct dxgkvmb_command_openadapter_return result = { }; -+ struct dxgvmbusmsg msg; -+ struct dxgglobal *dxgglobal = dxggbl(); -+ -+ ret = init_message(&msg, adapter, NULL, sizeof(*command)); -+ if (ret) -+ return ret; -+ command = (void *)msg.msg; -+ -+ command_vgpu_to_host_init1(&command->hdr, DXGK_VMBCOMMAND_OPENADAPTER); -+ command->vmbus_interface_version = dxgglobal->vmbus_ver; -+ command->vmbus_last_compatible_interface_version = -+ DXGK_VMBUS_LAST_COMPATIBLE_INTERFACE_VERSION; -+ -+ ret = dxgvmb_send_sync_msg(msg.channel, msg.hdr, msg.size, -+ &result, sizeof(result)); -+ if (ret < 0) -+ goto cleanup; -+ -+ ret = ntstatus2int(result.status); -+ adapter->host_handle = result.host_adapter_handle; -+ -+cleanup: -+ free_message(&msg, NULL); -+ if (ret) -+ DXG_ERR("Failed to open adapter: %d", ret); -+ return ret; -+} -+ -+int dxgvmb_send_close_adapter(struct dxgadapter *adapter) -+{ -+ int ret; -+ struct dxgkvmb_command_closeadapter *command; -+ struct dxgvmbusmsg msg; -+ -+ ret = init_message(&msg, adapter, NULL, sizeof(*command)); -+ if (ret) -+ return ret; -+ command = (void *)msg.msg; -+ -+ command_vgpu_to_host_init1(&command->hdr, DXGK_VMBCOMMAND_CLOSEADAPTER); -+ command->host_handle = adapter->host_handle; -+ -+ ret = dxgvmb_send_sync_msg(msg.channel, msg.hdr, msg.size, -+ NULL, 0); -+ free_message(&msg, NULL); -+ if (ret) -+ DXG_ERR("Failed to close adapter: %d", ret); -+ return ret; -+} -+ -+int dxgvmb_send_get_internal_adapter_info(struct dxgadapter *adapter) -+{ -+ int ret; -+ struct dxgkvmb_command_getinternaladapterinfo *command; -+ struct dxgkvmb_command_getinternaladapterinfo_return result = { }; -+ struct dxgvmbusmsg msg; -+ u32 result_size = sizeof(result); -+ struct dxgglobal *dxgglobal = dxggbl(); -+ -+ ret = init_message(&msg, adapter, NULL, sizeof(*command)); -+ if (ret) -+ return ret; -+ command = (void *)msg.msg; -+ -+ command_vgpu_to_host_init1(&command->hdr, -+ DXGK_VMBCOMMAND_GETINTERNALADAPTERINFO); -+ if (dxgglobal->vmbus_ver < DXGK_VMBUS_INTERFACE_VERSION) -+ result_size -= sizeof(struct winluid); -+ -+ ret = dxgvmb_send_sync_msg(msg.channel, msg.hdr, msg.size, -+ &result, result_size); -+ if (ret >= 0) { -+ adapter->host_adapter_luid = result.host_adapter_luid; -+ adapter->host_vgpu_luid = result.host_vgpu_luid; -+ wcsncpy(adapter->device_description, result.device_description, -+ sizeof(adapter->device_description) / sizeof(u16)); -+ wcsncpy(adapter->device_instance_id, result.device_instance_id, -+ sizeof(adapter->device_instance_id) / sizeof(u16)); -+ dxgglobal->async_msg_enabled = result.async_msg_enabled != 0; -+ } -+ free_message(&msg, NULL); -+ if (ret) -+ DXG_ERR("Failed to get adapter info: %d", ret); -+ return ret; -+} -diff --git a/drivers/hv/dxgkrnl/dxgvmbus.h b/drivers/hv/dxgkrnl/dxgvmbus.h -index 111111111111..222222222222 100644 ---- a/drivers/hv/dxgkrnl/dxgvmbus.h -+++ b/drivers/hv/dxgkrnl/dxgvmbus.h -@@ -47,6 +47,83 @@ enum dxgkvmb_commandtype_global { - DXGK_VMBCOMMAND_INVALID_VM_TO_HOST - }; - -+/* -+ * -+ * Commands, sent to the host via the per adapter VM bus channel -+ * DXG_GUEST_VGPU_VMBUS -+ * -+ */ -+ -+enum dxgkvmb_commandtype { -+ DXGK_VMBCOMMAND_CREATEDEVICE = 0, -+ DXGK_VMBCOMMAND_DESTROYDEVICE = 1, -+ DXGK_VMBCOMMAND_QUERYADAPTERINFO = 2, -+ DXGK_VMBCOMMAND_DDIQUERYADAPTERINFO = 3, -+ DXGK_VMBCOMMAND_CREATEALLOCATION = 4, -+ DXGK_VMBCOMMAND_DESTROYALLOCATION = 5, -+ DXGK_VMBCOMMAND_CREATECONTEXTVIRTUAL = 6, -+ DXGK_VMBCOMMAND_DESTROYCONTEXT = 7, -+ DXGK_VMBCOMMAND_CREATESYNCOBJECT = 8, -+ DXGK_VMBCOMMAND_CREATEPAGINGQUEUE = 9, -+ DXGK_VMBCOMMAND_DESTROYPAGINGQUEUE = 10, -+ DXGK_VMBCOMMAND_MAKERESIDENT = 11, -+ DXGK_VMBCOMMAND_EVICT = 12, -+ DXGK_VMBCOMMAND_ESCAPE = 13, -+ DXGK_VMBCOMMAND_OPENADAPTER = 14, -+ DXGK_VMBCOMMAND_CLOSEADAPTER = 15, -+ DXGK_VMBCOMMAND_FREEGPUVIRTUALADDRESS = 16, -+ DXGK_VMBCOMMAND_MAPGPUVIRTUALADDRESS = 17, -+ DXGK_VMBCOMMAND_RESERVEGPUVIRTUALADDRESS = 18, -+ DXGK_VMBCOMMAND_UPDATEGPUVIRTUALADDRESS = 19, -+ DXGK_VMBCOMMAND_SUBMITCOMMAND = 20, -+ dxgk_vmbcommand_queryvideomemoryinfo = 21, -+ DXGK_VMBCOMMAND_WAITFORSYNCOBJECTFROMCPU = 22, -+ DXGK_VMBCOMMAND_LOCK2 = 23, -+ DXGK_VMBCOMMAND_UNLOCK2 = 24, -+ DXGK_VMBCOMMAND_WAITFORSYNCOBJECTFROMGPU = 25, -+ DXGK_VMBCOMMAND_SIGNALSYNCOBJECT = 26, -+ DXGK_VMBCOMMAND_SIGNALFENCENTSHAREDBYREF = 27, -+ DXGK_VMBCOMMAND_GETDEVICESTATE = 28, -+ DXGK_VMBCOMMAND_MARKDEVICEASERROR = 29, -+ DXGK_VMBCOMMAND_ADAPTERSTOP = 30, -+ DXGK_VMBCOMMAND_SETQUEUEDLIMIT = 31, -+ DXGK_VMBCOMMAND_OPENRESOURCE = 32, -+ DXGK_VMBCOMMAND_SETCONTEXTSCHEDULINGPRIORITY = 33, -+ DXGK_VMBCOMMAND_PRESENTHISTORYTOKEN = 34, -+ DXGK_VMBCOMMAND_SETREDIRECTEDFLIPFENCEVALUE = 35, -+ DXGK_VMBCOMMAND_GETINTERNALADAPTERINFO = 36, -+ DXGK_VMBCOMMAND_FLUSHHEAPTRANSITIONS = 37, -+ DXGK_VMBCOMMAND_BLT = 38, -+ DXGK_VMBCOMMAND_DDIGETSTANDARDALLOCATIONDRIVERDATA = 39, -+ DXGK_VMBCOMMAND_CDDGDICOMMAND = 40, -+ DXGK_VMBCOMMAND_QUERYALLOCATIONRESIDENCY = 41, -+ DXGK_VMBCOMMAND_FLUSHDEVICE = 42, -+ DXGK_VMBCOMMAND_FLUSHADAPTER = 43, -+ DXGK_VMBCOMMAND_DDIGETNODEMETADATA = 44, -+ DXGK_VMBCOMMAND_SETEXISTINGSYSMEMSTORE = 45, -+ DXGK_VMBCOMMAND_ISSYNCOBJECTSIGNALED = 46, -+ DXGK_VMBCOMMAND_CDDSYNCGPUACCESS = 47, -+ DXGK_VMBCOMMAND_QUERYSTATISTICS = 48, -+ DXGK_VMBCOMMAND_CHANGEVIDEOMEMORYRESERVATION = 49, -+ DXGK_VMBCOMMAND_CREATEHWQUEUE = 50, -+ DXGK_VMBCOMMAND_DESTROYHWQUEUE = 51, -+ DXGK_VMBCOMMAND_SUBMITCOMMANDTOHWQUEUE = 52, -+ DXGK_VMBCOMMAND_GETDRIVERSTOREFILE = 53, -+ DXGK_VMBCOMMAND_READDRIVERSTOREFILE = 54, -+ DXGK_VMBCOMMAND_GETNEXTHARDLINK = 55, -+ DXGK_VMBCOMMAND_UPDATEALLOCATIONPROPERTY = 56, -+ DXGK_VMBCOMMAND_OFFERALLOCATIONS = 57, -+ DXGK_VMBCOMMAND_RECLAIMALLOCATIONS = 58, -+ DXGK_VMBCOMMAND_SETALLOCATIONPRIORITY = 59, -+ DXGK_VMBCOMMAND_GETALLOCATIONPRIORITY = 60, -+ DXGK_VMBCOMMAND_GETCONTEXTSCHEDULINGPRIORITY = 61, -+ DXGK_VMBCOMMAND_QUERYCLOCKCALIBRATION = 62, -+ DXGK_VMBCOMMAND_QUERYRESOURCEINFO = 64, -+ DXGK_VMBCOMMAND_LOGEVENT = 65, -+ DXGK_VMBCOMMAND_SETEXISTINGSYSMEMPAGES = 66, -+ DXGK_VMBCOMMAND_INVALID -+}; -+ - /* - * Commands, sent by the host to the VM - */ -@@ -66,6 +143,15 @@ struct dxgkvmb_command_vm_to_host { - enum dxgkvmb_commandtype_global command_type; - }; - -+struct dxgkvmb_command_vgpu_to_host { -+ u64 command_id; -+ struct d3dkmthandle process; -+ u32 channel_type : 8; -+ u32 async_msg : 1; -+ u32 reserved : 23; -+ enum dxgkvmb_commandtype command_type; -+}; -+ - struct dxgkvmb_command_host_to_vm { - u64 command_id; - struct d3dkmthandle process; -@@ -83,4 +169,46 @@ struct dxgkvmb_command_setiospaceregion { - u32 shared_page_gpadl; - }; - -+struct dxgkvmb_command_openadapter { -+ struct dxgkvmb_command_vgpu_to_host hdr; -+ u32 vmbus_interface_version; -+ u32 vmbus_last_compatible_interface_version; -+ struct winluid guest_adapter_luid; -+}; -+ -+struct dxgkvmb_command_openadapter_return { -+ struct d3dkmthandle host_adapter_handle; -+ struct ntstatus status; -+ u32 vmbus_interface_version; -+ u32 vmbus_last_compatible_interface_version; -+}; -+ -+struct dxgkvmb_command_closeadapter { -+ struct dxgkvmb_command_vgpu_to_host hdr; -+ struct d3dkmthandle host_handle; -+}; -+ -+struct dxgkvmb_command_getinternaladapterinfo { -+ struct dxgkvmb_command_vgpu_to_host hdr; -+}; -+ -+struct dxgkvmb_command_getinternaladapterinfo_return { -+ struct dxgk_device_types device_types; -+ u32 driver_store_copy_mode; -+ u32 driver_ddi_version; -+ u32 secure_virtual_machine : 1; -+ u32 virtual_machine_reset : 1; -+ u32 is_vail_supported : 1; -+ u32 hw_sch_enabled : 1; -+ u32 hw_sch_capable : 1; -+ u32 va_backed_vm : 1; -+ u32 async_msg_enabled : 1; -+ u32 hw_support_state : 2; -+ u32 reserved : 23; -+ struct winluid host_adapter_luid; -+ u16 device_description[80]; -+ u16 device_instance_id[WIN_MAX_PATH]; -+ struct winluid host_vgpu_luid; -+}; -+ - #endif /* _DXGVMBUS_H */ -diff --git a/drivers/hv/dxgkrnl/misc.c b/drivers/hv/dxgkrnl/misc.c -new file mode 100644 -index 000000000000..111111111111 ---- /dev/null -+++ b/drivers/hv/dxgkrnl/misc.c -@@ -0,0 +1,37 @@ -+// SPDX-License-Identifier: GPL-2.0 -+ -+/* -+ * Copyright (c) 2019, Microsoft Corporation. -+ * -+ * Author: -+ * Iouri Tarassov -+ * -+ * Dxgkrnl Graphics Driver -+ * Helper functions -+ * -+ */ -+ -+#include -+#include -+#include -+ -+#include "dxgkrnl.h" -+#include "misc.h" -+ -+#undef pr_fmt -+#define pr_fmt(fmt) "dxgk: " fmt -+ -+u16 *wcsncpy(u16 *dest, const u16 *src, size_t n) -+{ -+ int i; -+ -+ for (i = 0; i < n; i++) { -+ dest[i] = src[i]; -+ if (src[i] == 0) { -+ i++; -+ break; -+ } -+ } -+ dest[i - 1] = 0; -+ return dest; -+} -diff --git a/drivers/hv/dxgkrnl/misc.h b/drivers/hv/dxgkrnl/misc.h -index 111111111111..222222222222 100644 ---- a/drivers/hv/dxgkrnl/misc.h -+++ b/drivers/hv/dxgkrnl/misc.h -@@ -14,18 +14,34 @@ - #ifndef _MISC_H_ - #define _MISC_H_ - -+/* Max characters in Windows path */ -+#define WIN_MAX_PATH 260 -+ - extern const struct d3dkmthandle zerohandle; - - /* - * Synchronization lock hierarchy. - * -- * The higher enum value, the higher is the lock order. -- * When a lower lock ois held, the higher lock should not be acquired. -+ * The locks here are in the order from lowest to highest. -+ * When a lower lock is held, the higher lock should not be acquired. - * -- * channel_lock -- * device_mutex -+ * channel_lock (VMBus channel lock) -+ * fd_mutex -+ * plistmutex (process list mutex) -+ * table_lock (handle table lock) -+ * core_lock (dxgadapter lock) -+ * device_lock (dxgdevice lock) -+ * adapter_list_lock -+ * device_mutex (dxgglobal mutex) - */ - -+u16 *wcsncpy(u16 *dest, const u16 *src, size_t n); -+ -+enum dxglockstate { -+ DXGLOCK_SHARED, -+ DXGLOCK_EXCL -+}; -+ - /* - * Some of the Windows return codes, which needs to be translated to Linux - * IOCTL return codes. Positive values are success codes and need to be --- -Armbian - diff --git a/patch/kernel/archive/wsl2-arm64-6.1/1672-drivers-hv-dxgkrnl-Opening-of-dev-dxg-device-and-dxgprocess-creation.patch b/patch/kernel/archive/wsl2-arm64-6.1/1672-drivers-hv-dxgkrnl-Opening-of-dev-dxg-device-and-dxgprocess-creation.patch deleted file mode 100644 index b1de83540952..000000000000 --- a/patch/kernel/archive/wsl2-arm64-6.1/1672-drivers-hv-dxgkrnl-Opening-of-dev-dxg-device-and-dxgprocess-creation.patch +++ /dev/null @@ -1,1847 +0,0 @@ -From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001 -From: Iouri Tarassov -Date: Tue, 15 Feb 2022 19:12:48 -0800 -Subject: drivers: hv: dxgkrnl: Opening of /dev/dxg device and dxgprocess - creation - -- Implement opening of the device (/dev/dxg) file object and creation of -dxgprocess objects. - -- Add VM bus messages to create and destroy the host side of a dxgprocess -object. - -- Implement the handle manager, which manages d3dkmthandle handles -for the internal process objects. The handles are used by a user mode -client to reference dxgkrnl objects. - -dxgprocess is created for each process, which opens /dev/dxg. -dxgprocess is ref counted, so the existing dxgprocess objects is used -for a process, which opens the device object multiple time. -dxgprocess is destroyed when the file object is released. - -A corresponding dxgprocess object is created on the host for every -dxgprocess object in the guest. - -When a dxgkrnl object is created, in most cases the corresponding -object is created in the host. The VM references the host objects by -handles (d3dkmthandle). d3dkmthandle values for a host object and -the corresponding VM object are the same. A host handle is allocated -first and its value is assigned to the guest object. - -Signed-off-by: Iouri Tarassov -[kms: Forward port to v6.1] -Signed-off-by: Kelsey Steele ---- - drivers/hv/dxgkrnl/Makefile | 2 +- - drivers/hv/dxgkrnl/dxgadapter.c | 72 ++ - drivers/hv/dxgkrnl/dxgkrnl.h | 95 +- - drivers/hv/dxgkrnl/dxgmodule.c | 97 ++ - drivers/hv/dxgkrnl/dxgprocess.c | 262 +++++ - drivers/hv/dxgkrnl/dxgvmbus.c | 164 +++ - drivers/hv/dxgkrnl/dxgvmbus.h | 36 + - drivers/hv/dxgkrnl/hmgr.c | 563 ++++++++++ - drivers/hv/dxgkrnl/hmgr.h | 112 ++ - drivers/hv/dxgkrnl/ioctl.c | 60 + - drivers/hv/dxgkrnl/misc.h | 9 +- - include/uapi/misc/d3dkmthk.h | 103 ++ - 12 files changed, 1569 insertions(+), 6 deletions(-) - -diff --git a/drivers/hv/dxgkrnl/Makefile b/drivers/hv/dxgkrnl/Makefile -index 111111111111..222222222222 100644 ---- a/drivers/hv/dxgkrnl/Makefile -+++ b/drivers/hv/dxgkrnl/Makefile -@@ -2,4 +2,4 @@ - # Makefile for the hyper-v compute device driver (dxgkrnl). - - obj-$(CONFIG_DXGKRNL) += dxgkrnl.o --dxgkrnl-y := dxgmodule.o misc.o dxgadapter.o ioctl.o dxgvmbus.o -+dxgkrnl-y := dxgmodule.o hmgr.o misc.o dxgadapter.o ioctl.o dxgvmbus.o dxgprocess.o -diff --git a/drivers/hv/dxgkrnl/dxgadapter.c b/drivers/hv/dxgkrnl/dxgadapter.c -index 111111111111..222222222222 100644 ---- a/drivers/hv/dxgkrnl/dxgadapter.c -+++ b/drivers/hv/dxgkrnl/dxgadapter.c -@@ -100,6 +100,7 @@ void dxgadapter_start(struct dxgadapter *adapter) - - void dxgadapter_stop(struct dxgadapter *adapter) - { -+ struct dxgprocess_adapter *entry; - bool adapter_stopped = false; - - down_write(&adapter->core_lock); -@@ -112,6 +113,15 @@ void dxgadapter_stop(struct dxgadapter *adapter) - if (adapter_stopped) - return; - -+ dxgglobal_acquire_process_adapter_lock(); -+ -+ list_for_each_entry(entry, &adapter->adapter_process_list_head, -+ adapter_process_list_entry) { -+ dxgprocess_adapter_stop(entry); -+ } -+ -+ dxgglobal_release_process_adapter_lock(); -+ - if (dxgadapter_acquire_lock_exclusive(adapter) == 0) { - dxgvmb_send_close_adapter(adapter); - dxgadapter_release_lock_exclusive(adapter); -@@ -135,6 +145,21 @@ bool dxgadapter_is_active(struct dxgadapter *adapter) - return adapter->adapter_state == DXGADAPTER_STATE_ACTIVE; - } - -+/* Protected by dxgglobal_acquire_process_adapter_lock */ -+void dxgadapter_add_process(struct dxgadapter *adapter, -+ struct dxgprocess_adapter *process_info) -+{ -+ DXG_TRACE("%p %p", adapter, process_info); -+ list_add_tail(&process_info->adapter_process_list_entry, -+ &adapter->adapter_process_list_head); -+} -+ -+void dxgadapter_remove_process(struct dxgprocess_adapter *process_info) -+{ -+ DXG_TRACE("%p %p", process_info->adapter, process_info); -+ list_del(&process_info->adapter_process_list_entry); -+} -+ - int dxgadapter_acquire_lock_exclusive(struct dxgadapter *adapter) - { - down_write(&adapter->core_lock); -@@ -168,3 +193,50 @@ void dxgadapter_release_lock_shared(struct dxgadapter *adapter) - { - up_read(&adapter->core_lock); - } -+ -+struct dxgprocess_adapter *dxgprocess_adapter_create(struct dxgprocess *process, -+ struct dxgadapter *adapter) -+{ -+ struct dxgprocess_adapter *adapter_info; -+ -+ adapter_info = kzalloc(sizeof(*adapter_info), GFP_KERNEL); -+ if (adapter_info) { -+ if (kref_get_unless_zero(&adapter->adapter_kref) == 0) { -+ DXG_ERR("failed to acquire adapter reference"); -+ goto cleanup; -+ } -+ adapter_info->adapter = adapter; -+ adapter_info->process = process; -+ adapter_info->refcount = 1; -+ list_add_tail(&adapter_info->process_adapter_list_entry, -+ &process->process_adapter_list_head); -+ dxgadapter_add_process(adapter, adapter_info); -+ } -+ return adapter_info; -+cleanup: -+ if (adapter_info) -+ kfree(adapter_info); -+ return NULL; -+} -+ -+void dxgprocess_adapter_stop(struct dxgprocess_adapter *adapter_info) -+{ -+} -+ -+void dxgprocess_adapter_destroy(struct dxgprocess_adapter *adapter_info) -+{ -+ dxgadapter_remove_process(adapter_info); -+ kref_put(&adapter_info->adapter->adapter_kref, dxgadapter_release); -+ list_del(&adapter_info->process_adapter_list_entry); -+ kfree(adapter_info); -+} -+ -+/* -+ * Must be called when dxgglobal::process_adapter_mutex is held -+ */ -+void dxgprocess_adapter_release(struct dxgprocess_adapter *adapter_info) -+{ -+ adapter_info->refcount--; -+ if (adapter_info->refcount == 0) -+ dxgprocess_adapter_destroy(adapter_info); -+} -diff --git a/drivers/hv/dxgkrnl/dxgkrnl.h b/drivers/hv/dxgkrnl/dxgkrnl.h -index 111111111111..222222222222 100644 ---- a/drivers/hv/dxgkrnl/dxgkrnl.h -+++ b/drivers/hv/dxgkrnl/dxgkrnl.h -@@ -29,8 +29,10 @@ - #include - #include - #include "misc.h" -+#include "hmgr.h" - #include - -+struct dxgprocess; - struct dxgadapter; - - /* -@@ -111,6 +113,10 @@ struct dxgglobal { - struct miscdevice dxgdevice; - struct mutex device_mutex; - -+ /* list of created processes */ -+ struct list_head plisthead; -+ struct mutex plistmutex; -+ - /* list of created adapters */ - struct list_head adapter_list_head; - struct rw_semaphore adapter_list_lock; -@@ -124,6 +130,9 @@ struct dxgglobal { - /* protects acces to the global VM bus channel */ - struct rw_semaphore channel_lock; - -+ /* protects the dxgprocess_adapter lists */ -+ struct mutex process_adapter_mutex; -+ - bool global_channel_initialized; - bool async_msg_enabled; - bool misc_registered; -@@ -144,13 +153,84 @@ int dxgglobal_init_global_channel(void); - void dxgglobal_destroy_global_channel(void); - struct vmbus_channel *dxgglobal_get_vmbus(void); - struct dxgvmbuschannel *dxgglobal_get_dxgvmbuschannel(void); -+void dxgglobal_acquire_process_adapter_lock(void); -+void dxgglobal_release_process_adapter_lock(void); - int dxgglobal_acquire_channel_lock(void); - void dxgglobal_release_channel_lock(void); - -+/* -+ * Describes adapter information for each process -+ */ -+struct dxgprocess_adapter { -+ /* Entry in dxgadapter::adapter_process_list_head */ -+ struct list_head adapter_process_list_entry; -+ /* Entry in dxgprocess::process_adapter_list_head */ -+ struct list_head process_adapter_list_entry; -+ struct dxgadapter *adapter; -+ struct dxgprocess *process; -+ int refcount; -+}; -+ -+struct dxgprocess_adapter *dxgprocess_adapter_create(struct dxgprocess *process, -+ struct dxgadapter -+ *adapter); -+void dxgprocess_adapter_release(struct dxgprocess_adapter *adapter); -+void dxgprocess_adapter_stop(struct dxgprocess_adapter *adapter_info); -+void dxgprocess_adapter_destroy(struct dxgprocess_adapter *adapter_info); -+ -+/* -+ * The structure represents a process, which opened the /dev/dxg device. -+ * A corresponding object is created on the host. -+ */ - struct dxgprocess { -- /* Placeholder */ -+ /* -+ * Process list entry in dxgglobal. -+ * Protected by the dxgglobal->plistmutex. -+ */ -+ struct list_head plistentry; -+ pid_t pid; -+ pid_t tgid; -+ /* how many time the process was opened */ -+ struct kref process_kref; -+ /* -+ * This handle table is used for all objects except dxgadapter -+ * The handle table lock order is higher than the local_handle_table -+ * lock -+ */ -+ struct hmgrtable handle_table; -+ /* -+ * This handle table is used for dxgadapter objects. -+ * The handle table lock order is lowest. -+ */ -+ struct hmgrtable local_handle_table; -+ /* Handle of the corresponding objec on the host */ -+ struct d3dkmthandle host_handle; -+ -+ /* List of opened adapters (dxgprocess_adapter) */ -+ struct list_head process_adapter_list_head; - }; - -+struct dxgprocess *dxgprocess_create(void); -+void dxgprocess_destroy(struct dxgprocess *process); -+void dxgprocess_release(struct kref *refcount); -+int dxgprocess_open_adapter(struct dxgprocess *process, -+ struct dxgadapter *adapter, -+ struct d3dkmthandle *handle); -+int dxgprocess_close_adapter(struct dxgprocess *process, -+ struct d3dkmthandle handle); -+struct dxgadapter *dxgprocess_get_adapter(struct dxgprocess *process, -+ struct d3dkmthandle handle); -+struct dxgadapter *dxgprocess_adapter_by_handle(struct dxgprocess *process, -+ struct d3dkmthandle handle); -+void dxgprocess_ht_lock_shared_down(struct dxgprocess *process); -+void dxgprocess_ht_lock_shared_up(struct dxgprocess *process); -+void dxgprocess_ht_lock_exclusive_down(struct dxgprocess *process); -+void dxgprocess_ht_lock_exclusive_up(struct dxgprocess *process); -+struct dxgprocess_adapter *dxgprocess_get_adapter_info(struct dxgprocess -+ *process, -+ struct dxgadapter -+ *adapter); -+ - enum dxgadapter_state { - DXGADAPTER_STATE_ACTIVE = 0, - DXGADAPTER_STATE_STOPPED = 1, -@@ -168,6 +248,8 @@ struct dxgadapter { - struct kref adapter_kref; - /* Entry in the list of adapters in dxgglobal */ - struct list_head adapter_list_entry; -+ /* The list of dxgprocess_adapter entries */ -+ struct list_head adapter_process_list_head; - struct pci_dev *pci_dev; - struct hv_device *hv_dev; - struct dxgvmbuschannel channel; -@@ -191,6 +273,12 @@ void dxgadapter_release_lock_shared(struct dxgadapter *adapter); - int dxgadapter_acquire_lock_exclusive(struct dxgadapter *adapter); - void dxgadapter_acquire_lock_forced(struct dxgadapter *adapter); - void dxgadapter_release_lock_exclusive(struct dxgadapter *adapter); -+void dxgadapter_add_process(struct dxgadapter *adapter, -+ struct dxgprocess_adapter *process_info); -+void dxgadapter_remove_process(struct dxgprocess_adapter *process_info); -+ -+long dxgk_compat_ioctl(struct file *f, unsigned int p1, unsigned long p2); -+long dxgk_unlocked_ioctl(struct file *f, unsigned int p1, unsigned long p2); - - /* - * The convention is that VNBus instance id is a GUID, but the host sets -@@ -220,9 +308,14 @@ static inline void guid_to_luid(guid_t *guid, struct winluid *luid) - - void dxgvmb_initialize(void); - int dxgvmb_send_set_iospace_region(u64 start, u64 len); -+int dxgvmb_send_create_process(struct dxgprocess *process); -+int dxgvmb_send_destroy_process(struct d3dkmthandle process); - int dxgvmb_send_open_adapter(struct dxgadapter *adapter); - int dxgvmb_send_close_adapter(struct dxgadapter *adapter); - int dxgvmb_send_get_internal_adapter_info(struct dxgadapter *adapter); -+int dxgvmb_send_query_adapter_info(struct dxgprocess *process, -+ struct dxgadapter *adapter, -+ struct d3dkmt_queryadapterinfo *args); - int dxgvmb_send_async_msg(struct dxgvmbuschannel *channel, - void *command, - u32 cmd_size); -diff --git a/drivers/hv/dxgkrnl/dxgmodule.c b/drivers/hv/dxgkrnl/dxgmodule.c -index 111111111111..222222222222 100644 ---- a/drivers/hv/dxgkrnl/dxgmodule.c -+++ b/drivers/hv/dxgkrnl/dxgmodule.c -@@ -123,6 +123,20 @@ static struct dxgadapter *find_adapter(struct winluid *luid) - return adapter; - } - -+void dxgglobal_acquire_process_adapter_lock(void) -+{ -+ struct dxgglobal *dxgglobal = dxggbl(); -+ -+ mutex_lock(&dxgglobal->process_adapter_mutex); -+} -+ -+void dxgglobal_release_process_adapter_lock(void) -+{ -+ struct dxgglobal *dxgglobal = dxggbl(); -+ -+ mutex_unlock(&dxgglobal->process_adapter_mutex); -+} -+ - /* - * Creates a new dxgadapter object, which represents a virtual GPU, projected - * by the host. -@@ -147,6 +161,7 @@ int dxgglobal_create_adapter(struct pci_dev *dev, guid_t *guid, - kref_init(&adapter->adapter_kref); - init_rwsem(&adapter->core_lock); - -+ INIT_LIST_HEAD(&adapter->adapter_process_list_head); - adapter->pci_dev = dev; - guid_to_luid(guid, &adapter->luid); - -@@ -205,8 +220,87 @@ static void dxgglobal_stop_adapters(void) - dxgglobal_release_adapter_list_lock(DXGLOCK_EXCL); - } - -+/* -+ * Returns dxgprocess for the current executing process. -+ * Creates dxgprocess if it doesn't exist. -+ */ -+static struct dxgprocess *dxgglobal_get_current_process(void) -+{ -+ /* -+ * Find the DXG process for the current process. -+ * A new process is created if necessary. -+ */ -+ struct dxgprocess *process = NULL; -+ struct dxgprocess *entry = NULL; -+ struct dxgglobal *dxgglobal = dxggbl(); -+ -+ mutex_lock(&dxgglobal->plistmutex); -+ list_for_each_entry(entry, &dxgglobal->plisthead, plistentry) { -+ /* All threads of a process have the same thread group ID */ -+ if (entry->tgid == current->tgid) { -+ if (kref_get_unless_zero(&entry->process_kref)) { -+ process = entry; -+ DXG_TRACE("found dxgprocess"); -+ } else { -+ DXG_TRACE("process is destroyed"); -+ } -+ break; -+ } -+ } -+ mutex_unlock(&dxgglobal->plistmutex); -+ -+ if (process == NULL) -+ process = dxgprocess_create(); -+ -+ return process; -+} -+ -+/* -+ * File operations for the /dev/dxg device -+ */ -+ -+static int dxgk_open(struct inode *n, struct file *f) -+{ -+ int ret = 0; -+ struct dxgprocess *process; -+ -+ DXG_TRACE("%p %d %d", f, current->pid, current->tgid); -+ -+ /* Find/create a dxgprocess structure for this process */ -+ process = dxgglobal_get_current_process(); -+ -+ if (process) { -+ f->private_data = process; -+ } else { -+ DXG_TRACE("cannot create dxgprocess"); -+ ret = -EBADF; -+ } -+ -+ return ret; -+} -+ -+static int dxgk_release(struct inode *n, struct file *f) -+{ -+ struct dxgprocess *process; -+ -+ process = (struct dxgprocess *)f->private_data; -+ DXG_TRACE("%p, %p", f, process); -+ -+ if (process == NULL) -+ return -EINVAL; -+ -+ kref_put(&process->process_kref, dxgprocess_release); -+ -+ f->private_data = NULL; -+ return 0; -+} -+ - const struct file_operations dxgk_fops = { - .owner = THIS_MODULE, -+ .open = dxgk_open, -+ .release = dxgk_release, -+ .compat_ioctl = dxgk_compat_ioctl, -+ .unlocked_ioctl = dxgk_unlocked_ioctl, - }; - - /* -@@ -616,7 +710,10 @@ static struct dxgglobal *dxgglobal_create(void) - if (!dxgglobal) - return NULL; - -+ INIT_LIST_HEAD(&dxgglobal->plisthead); -+ mutex_init(&dxgglobal->plistmutex); - mutex_init(&dxgglobal->device_mutex); -+ mutex_init(&dxgglobal->process_adapter_mutex); - - INIT_LIST_HEAD(&dxgglobal->vgpu_ch_list_head); - INIT_LIST_HEAD(&dxgglobal->adapter_list_head); -diff --git a/drivers/hv/dxgkrnl/dxgprocess.c b/drivers/hv/dxgkrnl/dxgprocess.c -new file mode 100644 -index 000000000000..111111111111 ---- /dev/null -+++ b/drivers/hv/dxgkrnl/dxgprocess.c -@@ -0,0 +1,262 @@ -+// SPDX-License-Identifier: GPL-2.0 -+ -+/* -+ * Copyright (c) 2022, Microsoft Corporation. -+ * -+ * Author: -+ * Iouri Tarassov -+ * -+ * Dxgkrnl Graphics Driver -+ * DXGPROCESS implementation -+ * -+ */ -+ -+#include "dxgkrnl.h" -+ -+#undef pr_fmt -+#define pr_fmt(fmt) "dxgk: " fmt -+ -+/* -+ * Creates a new dxgprocess object -+ * Must be called when dxgglobal->plistmutex is held -+ */ -+struct dxgprocess *dxgprocess_create(void) -+{ -+ struct dxgprocess *process; -+ int ret; -+ struct dxgglobal *dxgglobal = dxggbl(); -+ -+ process = kzalloc(sizeof(struct dxgprocess), GFP_KERNEL); -+ if (process != NULL) { -+ DXG_TRACE("new dxgprocess created"); -+ process->pid = current->pid; -+ process->tgid = current->tgid; -+ ret = dxgvmb_send_create_process(process); -+ if (ret < 0) { -+ DXG_TRACE("send_create_process failed"); -+ kfree(process); -+ process = NULL; -+ } else { -+ INIT_LIST_HEAD(&process->plistentry); -+ kref_init(&process->process_kref); -+ -+ mutex_lock(&dxgglobal->plistmutex); -+ list_add_tail(&process->plistentry, -+ &dxgglobal->plisthead); -+ mutex_unlock(&dxgglobal->plistmutex); -+ -+ hmgrtable_init(&process->handle_table, process); -+ hmgrtable_init(&process->local_handle_table, process); -+ INIT_LIST_HEAD(&process->process_adapter_list_head); -+ } -+ } -+ return process; -+} -+ -+void dxgprocess_destroy(struct dxgprocess *process) -+{ -+ int i; -+ enum hmgrentry_type t; -+ struct d3dkmthandle h; -+ void *o; -+ struct dxgprocess_adapter *entry; -+ struct dxgprocess_adapter *tmp; -+ -+ /* Destroy all adapter state */ -+ dxgglobal_acquire_process_adapter_lock(); -+ list_for_each_entry_safe(entry, tmp, -+ &process->process_adapter_list_head, -+ process_adapter_list_entry) { -+ dxgprocess_adapter_destroy(entry); -+ } -+ dxgglobal_release_process_adapter_lock(); -+ -+ i = 0; -+ while (hmgrtable_next_entry(&process->local_handle_table, -+ &i, &t, &h, &o)) { -+ switch (t) { -+ case HMGRENTRY_TYPE_DXGADAPTER: -+ dxgprocess_close_adapter(process, h); -+ break; -+ default: -+ DXG_ERR("invalid entry in handle table %d", t); -+ break; -+ } -+ } -+ -+ hmgrtable_destroy(&process->handle_table); -+ hmgrtable_destroy(&process->local_handle_table); -+} -+ -+void dxgprocess_release(struct kref *refcount) -+{ -+ struct dxgprocess *process; -+ struct dxgglobal *dxgglobal = dxggbl(); -+ -+ process = container_of(refcount, struct dxgprocess, process_kref); -+ -+ mutex_lock(&dxgglobal->plistmutex); -+ list_del(&process->plistentry); -+ mutex_unlock(&dxgglobal->plistmutex); -+ -+ dxgprocess_destroy(process); -+ -+ if (process->host_handle.v) -+ dxgvmb_send_destroy_process(process->host_handle); -+ kfree(process); -+} -+ -+struct dxgprocess_adapter *dxgprocess_get_adapter_info(struct dxgprocess -+ *process, -+ struct dxgadapter -+ *adapter) -+{ -+ struct dxgprocess_adapter *entry; -+ -+ list_for_each_entry(entry, &process->process_adapter_list_head, -+ process_adapter_list_entry) { -+ if (adapter == entry->adapter) { -+ DXG_TRACE("Found process info %p", entry); -+ return entry; -+ } -+ } -+ return NULL; -+} -+ -+/* -+ * Dxgprocess takes references on dxgadapter and dxgprocess_adapter. -+ * -+ * The process_adapter lock is held. -+ * -+ */ -+int dxgprocess_open_adapter(struct dxgprocess *process, -+ struct dxgadapter *adapter, -+ struct d3dkmthandle *h) -+{ -+ int ret = 0; -+ struct dxgprocess_adapter *adapter_info; -+ struct d3dkmthandle handle; -+ -+ h->v = 0; -+ adapter_info = dxgprocess_get_adapter_info(process, adapter); -+ if (adapter_info == NULL) { -+ DXG_TRACE("creating new process adapter info"); -+ adapter_info = dxgprocess_adapter_create(process, adapter); -+ if (adapter_info == NULL) { -+ ret = -ENOMEM; -+ goto cleanup; -+ } -+ } else { -+ adapter_info->refcount++; -+ } -+ -+ handle = hmgrtable_alloc_handle_safe(&process->local_handle_table, -+ adapter, HMGRENTRY_TYPE_DXGADAPTER, -+ true); -+ if (handle.v) { -+ *h = handle; -+ } else { -+ DXG_ERR("failed to create adapter handle"); -+ ret = -ENOMEM; -+ } -+ -+cleanup: -+ -+ if (ret < 0) { -+ if (adapter_info) -+ dxgprocess_adapter_release(adapter_info); -+ } -+ -+ return ret; -+} -+ -+int dxgprocess_close_adapter(struct dxgprocess *process, -+ struct d3dkmthandle handle) -+{ -+ struct dxgadapter *adapter; -+ struct dxgprocess_adapter *adapter_info; -+ int ret = 0; -+ -+ if (handle.v == 0) -+ return 0; -+ -+ hmgrtable_lock(&process->local_handle_table, DXGLOCK_EXCL); -+ adapter = dxgprocess_get_adapter(process, handle); -+ if (adapter) -+ hmgrtable_free_handle(&process->local_handle_table, -+ HMGRENTRY_TYPE_DXGADAPTER, handle); -+ hmgrtable_unlock(&process->local_handle_table, DXGLOCK_EXCL); -+ -+ if (adapter) { -+ adapter_info = dxgprocess_get_adapter_info(process, adapter); -+ if (adapter_info) { -+ dxgglobal_acquire_process_adapter_lock(); -+ dxgprocess_adapter_release(adapter_info); -+ dxgglobal_release_process_adapter_lock(); -+ } else { -+ ret = -EINVAL; -+ } -+ } else { -+ DXG_ERR("Adapter not found %x", handle.v); -+ ret = -EINVAL; -+ } -+ -+ return ret; -+} -+ -+struct dxgadapter *dxgprocess_get_adapter(struct dxgprocess *process, -+ struct d3dkmthandle handle) -+{ -+ struct dxgadapter *adapter; -+ -+ adapter = hmgrtable_get_object_by_type(&process->local_handle_table, -+ HMGRENTRY_TYPE_DXGADAPTER, -+ handle); -+ if (adapter == NULL) -+ DXG_ERR("Adapter not found %x", handle.v); -+ return adapter; -+} -+ -+/* -+ * Gets the adapter object from the process handle table. -+ * The adapter object is referenced. -+ * The function acquired the handle table lock shared. -+ */ -+struct dxgadapter *dxgprocess_adapter_by_handle(struct dxgprocess *process, -+ struct d3dkmthandle handle) -+{ -+ struct dxgadapter *adapter; -+ -+ hmgrtable_lock(&process->local_handle_table, DXGLOCK_SHARED); -+ adapter = hmgrtable_get_object_by_type(&process->local_handle_table, -+ HMGRENTRY_TYPE_DXGADAPTER, -+ handle); -+ if (adapter == NULL) -+ DXG_ERR("adapter_by_handle failed %x", handle.v); -+ else if (kref_get_unless_zero(&adapter->adapter_kref) == 0) { -+ DXG_ERR("failed to acquire adapter reference"); -+ adapter = NULL; -+ } -+ hmgrtable_unlock(&process->local_handle_table, DXGLOCK_SHARED); -+ return adapter; -+} -+ -+void dxgprocess_ht_lock_shared_down(struct dxgprocess *process) -+{ -+ hmgrtable_lock(&process->handle_table, DXGLOCK_SHARED); -+} -+ -+void dxgprocess_ht_lock_shared_up(struct dxgprocess *process) -+{ -+ hmgrtable_unlock(&process->handle_table, DXGLOCK_SHARED); -+} -+ -+void dxgprocess_ht_lock_exclusive_down(struct dxgprocess *process) -+{ -+ hmgrtable_lock(&process->handle_table, DXGLOCK_EXCL); -+} -+ -+void dxgprocess_ht_lock_exclusive_up(struct dxgprocess *process) -+{ -+ hmgrtable_unlock(&process->handle_table, DXGLOCK_EXCL); -+} -diff --git a/drivers/hv/dxgkrnl/dxgvmbus.c b/drivers/hv/dxgkrnl/dxgvmbus.c -index 111111111111..222222222222 100644 ---- a/drivers/hv/dxgkrnl/dxgvmbus.c -+++ b/drivers/hv/dxgkrnl/dxgvmbus.c -@@ -497,6 +497,87 @@ int dxgvmb_send_set_iospace_region(u64 start, u64 len) - return ret; - } - -+int dxgvmb_send_create_process(struct dxgprocess *process) -+{ -+ int ret; -+ struct dxgkvmb_command_createprocess *command; -+ struct dxgkvmb_command_createprocess_return result = { 0 }; -+ struct dxgvmbusmsg msg; -+ char s[WIN_MAX_PATH]; -+ int i; -+ struct dxgglobal *dxgglobal = dxggbl(); -+ -+ ret = init_message(&msg, NULL, process, sizeof(*command)); -+ if (ret) -+ return ret; -+ command = (void *)msg.msg; -+ -+ ret = dxgglobal_acquire_channel_lock(); -+ if (ret < 0) -+ goto cleanup; -+ -+ command_vm_to_host_init1(&command->hdr, DXGK_VMBCOMMAND_CREATEPROCESS); -+ command->process = process; -+ command->process_id = process->pid; -+ command->linux_process = 1; -+ s[0] = 0; -+ __get_task_comm(s, WIN_MAX_PATH, current); -+ for (i = 0; i < WIN_MAX_PATH; i++) { -+ command->process_name[i] = s[i]; -+ if (s[i] == 0) -+ break; -+ } -+ -+ ret = dxgvmb_send_sync_msg(&dxgglobal->channel, msg.hdr, msg.size, -+ &result, sizeof(result)); -+ if (ret < 0) { -+ DXG_ERR("create_process failed %d", ret); -+ } else if (result.hprocess.v == 0) { -+ DXG_ERR("create_process returned 0 handle"); -+ ret = -ENOTRECOVERABLE; -+ } else { -+ process->host_handle = result.hprocess; -+ DXG_TRACE("create_process returned %x", -+ process->host_handle.v); -+ } -+ -+ dxgglobal_release_channel_lock(); -+ -+cleanup: -+ free_message(&msg, process); -+ if (ret) -+ DXG_TRACE("err: %d", ret); -+ return ret; -+} -+ -+int dxgvmb_send_destroy_process(struct d3dkmthandle process) -+{ -+ int ret; -+ struct dxgkvmb_command_destroyprocess *command; -+ struct dxgvmbusmsg msg; -+ struct dxgglobal *dxgglobal = dxggbl(); -+ -+ ret = init_message(&msg, NULL, NULL, sizeof(*command)); -+ if (ret) -+ return ret; -+ command = (void *)msg.msg; -+ -+ ret = dxgglobal_acquire_channel_lock(); -+ if (ret < 0) -+ goto cleanup; -+ command_vm_to_host_init2(&command->hdr, DXGK_VMBCOMMAND_DESTROYPROCESS, -+ process); -+ ret = dxgvmb_send_sync_msg_ntstatus(&dxgglobal->channel, -+ msg.hdr, msg.size); -+ dxgglobal_release_channel_lock(); -+ -+cleanup: -+ free_message(&msg, NULL); -+ if (ret) -+ DXG_TRACE("err: %d", ret); -+ return ret; -+} -+ - /* - * Virtual GPU messages to the host - */ -@@ -591,3 +672,86 @@ int dxgvmb_send_get_internal_adapter_info(struct dxgadapter *adapter) - DXG_ERR("Failed to get adapter info: %d", ret); - return ret; - } -+ -+int dxgvmb_send_query_adapter_info(struct dxgprocess *process, -+ struct dxgadapter *adapter, -+ struct d3dkmt_queryadapterinfo *args) -+{ -+ struct dxgkvmb_command_queryadapterinfo *command; -+ u32 cmd_size = sizeof(*command) + args->private_data_size - 1; -+ int ret; -+ u32 private_data_size; -+ void *private_data; -+ struct dxgvmbusmsg msg = {.hdr = NULL}; -+ struct dxgglobal *dxgglobal = dxggbl(); -+ -+ ret = init_message(&msg, adapter, process, cmd_size); -+ if (ret) -+ goto cleanup; -+ command = (void *)msg.msg; -+ -+ ret = copy_from_user(command->private_data, -+ args->private_data, args->private_data_size); -+ if (ret) { -+ DXG_ERR("Faled to copy private data"); -+ ret = -EINVAL; -+ goto cleanup; -+ } -+ -+ command_vgpu_to_host_init2(&command->hdr, -+ DXGK_VMBCOMMAND_QUERYADAPTERINFO, -+ process->host_handle); -+ command->private_data_size = args->private_data_size; -+ command->query_type = args->type; -+ -+ if (dxgglobal->vmbus_ver >= DXGK_VMBUS_INTERFACE_VERSION) { -+ private_data = msg.msg; -+ private_data_size = command->private_data_size + -+ sizeof(struct ntstatus); -+ } else { -+ private_data = command->private_data; -+ private_data_size = command->private_data_size; -+ } -+ -+ ret = dxgvmb_send_sync_msg(msg.channel, msg.hdr, msg.size, -+ private_data, private_data_size); -+ if (ret < 0) -+ goto cleanup; -+ -+ if (dxgglobal->vmbus_ver >= DXGK_VMBUS_INTERFACE_VERSION) { -+ ret = ntstatus2int(*(struct ntstatus *)private_data); -+ if (ret < 0) -+ goto cleanup; -+ private_data = (char *)private_data + sizeof(struct ntstatus); -+ } -+ -+ switch (args->type) { -+ case _KMTQAITYPE_ADAPTERTYPE: -+ case _KMTQAITYPE_ADAPTERTYPE_RENDER: -+ { -+ struct d3dkmt_adaptertype *adapter_type = -+ (void *)private_data; -+ adapter_type->paravirtualized = 1; -+ adapter_type->display_supported = 0; -+ adapter_type->post_device = 0; -+ adapter_type->indirect_display_device = 0; -+ adapter_type->acg_supported = 0; -+ adapter_type->support_set_timings_from_vidpn = 0; -+ break; -+ } -+ default: -+ break; -+ } -+ ret = copy_to_user(args->private_data, private_data, -+ args->private_data_size); -+ if (ret) { -+ DXG_ERR("Faled to copy private data to user"); -+ ret = -EINVAL; -+ } -+ -+cleanup: -+ free_message(&msg, process); -+ if (ret) -+ DXG_TRACE("err: %d", ret); -+ return ret; -+} -diff --git a/drivers/hv/dxgkrnl/dxgvmbus.h b/drivers/hv/dxgkrnl/dxgvmbus.h -index 111111111111..222222222222 100644 ---- a/drivers/hv/dxgkrnl/dxgvmbus.h -+++ b/drivers/hv/dxgkrnl/dxgvmbus.h -@@ -14,7 +14,11 @@ - #ifndef _DXGVMBUS_H - #define _DXGVMBUS_H - -+struct dxgprocess; -+struct dxgadapter; -+ - #define DXG_MAX_VM_BUS_PACKET_SIZE (1024 * 128) -+#define DXG_VM_PROCESS_NAME_LENGTH 260 - - enum dxgkvmb_commandchanneltype { - DXGKVMB_VGPU_TO_HOST, -@@ -169,6 +173,26 @@ struct dxgkvmb_command_setiospaceregion { - u32 shared_page_gpadl; - }; - -+struct dxgkvmb_command_createprocess { -+ struct dxgkvmb_command_vm_to_host hdr; -+ void *process; -+ u64 process_id; -+ u16 process_name[DXG_VM_PROCESS_NAME_LENGTH + 1]; -+ u8 csrss_process:1; -+ u8 dwm_process:1; -+ u8 wow64_process:1; -+ u8 linux_process:1; -+}; -+ -+struct dxgkvmb_command_createprocess_return { -+ struct d3dkmthandle hprocess; -+}; -+ -+// The command returns ntstatus -+struct dxgkvmb_command_destroyprocess { -+ struct dxgkvmb_command_vm_to_host hdr; -+}; -+ - struct dxgkvmb_command_openadapter { - struct dxgkvmb_command_vgpu_to_host hdr; - u32 vmbus_interface_version; -@@ -211,4 +235,16 @@ struct dxgkvmb_command_getinternaladapterinfo_return { - struct winluid host_vgpu_luid; - }; - -+struct dxgkvmb_command_queryadapterinfo { -+ struct dxgkvmb_command_vgpu_to_host hdr; -+ enum kmtqueryadapterinfotype query_type; -+ u32 private_data_size; -+ u8 private_data[1]; -+}; -+ -+struct dxgkvmb_command_queryadapterinfo_return { -+ struct ntstatus status; -+ u8 private_data[1]; -+}; -+ - #endif /* _DXGVMBUS_H */ -diff --git a/drivers/hv/dxgkrnl/hmgr.c b/drivers/hv/dxgkrnl/hmgr.c -new file mode 100644 -index 000000000000..111111111111 ---- /dev/null -+++ b/drivers/hv/dxgkrnl/hmgr.c -@@ -0,0 +1,563 @@ -+// SPDX-License-Identifier: GPL-2.0 -+ -+/* -+ * Copyright (c) 2022, Microsoft Corporation. -+ * -+ * Author: -+ * Iouri Tarassov -+ * -+ * Dxgkrnl Graphics Driver -+ * Handle manager implementation -+ * -+ */ -+ -+#include -+#include -+#include -+ -+#include "misc.h" -+#include "dxgkrnl.h" -+#include "hmgr.h" -+ -+#undef pr_fmt -+#define pr_fmt(fmt) "dxgk: " fmt -+ -+const struct d3dkmthandle zerohandle; -+ -+/* -+ * Handle parameters -+ */ -+#define HMGRHANDLE_INSTANCE_BITS 6 -+#define HMGRHANDLE_INDEX_BITS 24 -+#define HMGRHANDLE_UNIQUE_BITS 2 -+ -+#define HMGRHANDLE_INSTANCE_SHIFT 0 -+#define HMGRHANDLE_INDEX_SHIFT \ -+ (HMGRHANDLE_INSTANCE_BITS + HMGRHANDLE_INSTANCE_SHIFT) -+#define HMGRHANDLE_UNIQUE_SHIFT \ -+ (HMGRHANDLE_INDEX_BITS + HMGRHANDLE_INDEX_SHIFT) -+ -+#define HMGRHANDLE_INSTANCE_MASK \ -+ (((1 << HMGRHANDLE_INSTANCE_BITS) - 1) << HMGRHANDLE_INSTANCE_SHIFT) -+#define HMGRHANDLE_INDEX_MASK \ -+ (((1 << HMGRHANDLE_INDEX_BITS) - 1) << HMGRHANDLE_INDEX_SHIFT) -+#define HMGRHANDLE_UNIQUE_MASK \ -+ (((1 << HMGRHANDLE_UNIQUE_BITS) - 1) << HMGRHANDLE_UNIQUE_SHIFT) -+ -+#define HMGRHANDLE_INSTANCE_MAX ((1 << HMGRHANDLE_INSTANCE_BITS) - 1) -+#define HMGRHANDLE_INDEX_MAX ((1 << HMGRHANDLE_INDEX_BITS) - 1) -+#define HMGRHANDLE_UNIQUE_MAX ((1 << HMGRHANDLE_UNIQUE_BITS) - 1) -+ -+/* -+ * Handle entry -+ */ -+struct hmgrentry { -+ union { -+ void *object; -+ struct { -+ u32 prev_free_index; -+ u32 next_free_index; -+ }; -+ }; -+ u32 type:HMGRENTRY_TYPE_BITS + 1; -+ u32 unique:HMGRHANDLE_UNIQUE_BITS; -+ u32 instance:HMGRHANDLE_INSTANCE_BITS; -+ u32 destroyed:1; -+}; -+ -+#define HMGRTABLE_SIZE_INCREMENT 1024 -+#define HMGRTABLE_MIN_FREE_ENTRIES 128 -+#define HMGRTABLE_INVALID_INDEX (~((1 << HMGRHANDLE_INDEX_BITS) - 1)) -+#define HMGRTABLE_SIZE_MAX 0xFFFFFFF -+ -+static u32 table_size_increment = HMGRTABLE_SIZE_INCREMENT; -+ -+static u32 get_unique(struct d3dkmthandle h) -+{ -+ return (h.v & HMGRHANDLE_UNIQUE_MASK) >> HMGRHANDLE_UNIQUE_SHIFT; -+} -+ -+static u32 get_index(struct d3dkmthandle h) -+{ -+ return (h.v & HMGRHANDLE_INDEX_MASK) >> HMGRHANDLE_INDEX_SHIFT; -+} -+ -+static bool is_handle_valid(struct hmgrtable *table, struct d3dkmthandle h, -+ bool ignore_destroyed, enum hmgrentry_type t) -+{ -+ u32 index = get_index(h); -+ u32 unique = get_unique(h); -+ struct hmgrentry *entry; -+ -+ if (index >= table->table_size) { -+ DXG_ERR("Invalid index %x %d", h.v, index); -+ return false; -+ } -+ -+ entry = &table->entry_table[index]; -+ if (unique != entry->unique) { -+ DXG_ERR("Invalid unique %x %d %d %d %p", -+ h.v, unique, entry->unique, index, entry->object); -+ return false; -+ } -+ -+ if (entry->destroyed && !ignore_destroyed) { -+ DXG_ERR("Invalid destroyed value"); -+ return false; -+ } -+ -+ if (entry->type == HMGRENTRY_TYPE_FREE) { -+ DXG_ERR("Entry is freed %x %d", h.v, index); -+ return false; -+ } -+ -+ if (t != HMGRENTRY_TYPE_FREE && t != entry->type) { -+ DXG_ERR("type mismatch %x %d %d", h.v, t, entry->type); -+ return false; -+ } -+ -+ return true; -+} -+ -+static struct d3dkmthandle build_handle(u32 index, u32 unique, u32 instance) -+{ -+ struct d3dkmthandle handle; -+ -+ handle.v = (index << HMGRHANDLE_INDEX_SHIFT) & HMGRHANDLE_INDEX_MASK; -+ handle.v |= (unique << HMGRHANDLE_UNIQUE_SHIFT) & -+ HMGRHANDLE_UNIQUE_MASK; -+ handle.v |= (instance << HMGRHANDLE_INSTANCE_SHIFT) & -+ HMGRHANDLE_INSTANCE_MASK; -+ -+ return handle; -+} -+ -+inline u32 hmgrtable_get_used_entry_count(struct hmgrtable *table) -+{ -+ DXGKRNL_ASSERT(table->table_size >= table->free_count); -+ return (table->table_size - table->free_count); -+} -+ -+bool hmgrtable_mark_destroyed(struct hmgrtable *table, struct d3dkmthandle h) -+{ -+ if (!is_handle_valid(table, h, false, HMGRENTRY_TYPE_FREE)) -+ return false; -+ -+ table->entry_table[get_index(h)].destroyed = true; -+ return true; -+} -+ -+bool hmgrtable_unmark_destroyed(struct hmgrtable *table, struct d3dkmthandle h) -+{ -+ if (!is_handle_valid(table, h, true, HMGRENTRY_TYPE_FREE)) -+ return true; -+ -+ DXGKRNL_ASSERT(table->entry_table[get_index(h)].destroyed); -+ table->entry_table[get_index(h)].destroyed = 0; -+ return true; -+} -+ -+static bool expand_table(struct hmgrtable *table, u32 NumEntries) -+{ -+ u32 new_table_size; -+ struct hmgrentry *new_entry; -+ u32 table_index; -+ u32 new_free_count; -+ u32 prev_free_index; -+ u32 tail_index = table->free_handle_list_tail; -+ -+ /* The tail should point to the last free element in the list */ -+ if (table->free_count != 0) { -+ if (tail_index >= table->table_size || -+ table->entry_table[tail_index].next_free_index != -+ HMGRTABLE_INVALID_INDEX) { -+ DXG_ERR("corruption"); -+ DXG_ERR("tail_index: %x", tail_index); -+ DXG_ERR("table size: %x", table->table_size); -+ DXG_ERR("free_count: %d", table->free_count); -+ DXG_ERR("NumEntries: %x", NumEntries); -+ return false; -+ } -+ } -+ -+ new_free_count = table_size_increment + table->free_count; -+ new_table_size = table->table_size + table_size_increment; -+ if (new_table_size < NumEntries) { -+ new_free_count += NumEntries - new_table_size; -+ new_table_size = NumEntries; -+ } -+ -+ if (new_table_size > HMGRHANDLE_INDEX_MAX) { -+ DXG_ERR("Invalid new table size"); -+ return false; -+ } -+ -+ new_entry = (struct hmgrentry *) -+ vzalloc(new_table_size * sizeof(struct hmgrentry)); -+ if (new_entry == NULL) { -+ DXG_ERR("allocation failed"); -+ return false; -+ } -+ -+ if (table->entry_table) { -+ memcpy(new_entry, table->entry_table, -+ table->table_size * sizeof(struct hmgrentry)); -+ vfree(table->entry_table); -+ } else { -+ table->free_handle_list_head = 0; -+ } -+ -+ table->entry_table = new_entry; -+ -+ /* Initialize new table entries and add to the free list */ -+ table_index = table->table_size; -+ -+ prev_free_index = table->free_handle_list_tail; -+ -+ while (table_index < new_table_size) { -+ struct hmgrentry *entry = &table->entry_table[table_index]; -+ -+ entry->prev_free_index = prev_free_index; -+ entry->next_free_index = table_index + 1; -+ entry->type = HMGRENTRY_TYPE_FREE; -+ entry->unique = 1; -+ entry->instance = 0; -+ prev_free_index = table_index; -+ -+ table_index++; -+ } -+ -+ table->entry_table[table_index - 1].next_free_index = -+ (u32) HMGRTABLE_INVALID_INDEX; -+ -+ if (table->free_count != 0) { -+ /* Link the current free list with the new entries */ -+ struct hmgrentry *entry; -+ -+ entry = &table->entry_table[table->free_handle_list_tail]; -+ entry->next_free_index = table->table_size; -+ } -+ table->free_handle_list_tail = new_table_size - 1; -+ if (table->free_handle_list_head == HMGRTABLE_INVALID_INDEX) -+ table->free_handle_list_head = table->table_size; -+ -+ table->table_size = new_table_size; -+ table->free_count = new_free_count; -+ -+ return true; -+} -+ -+void hmgrtable_init(struct hmgrtable *table, struct dxgprocess *process) -+{ -+ table->process = process; -+ table->entry_table = NULL; -+ table->table_size = 0; -+ table->free_handle_list_head = HMGRTABLE_INVALID_INDEX; -+ table->free_handle_list_tail = HMGRTABLE_INVALID_INDEX; -+ table->free_count = 0; -+ init_rwsem(&table->table_lock); -+} -+ -+void hmgrtable_destroy(struct hmgrtable *table) -+{ -+ if (table->entry_table) { -+ vfree(table->entry_table); -+ table->entry_table = NULL; -+ } -+} -+ -+void hmgrtable_lock(struct hmgrtable *table, enum dxglockstate state) -+{ -+ if (state == DXGLOCK_EXCL) -+ down_write(&table->table_lock); -+ else -+ down_read(&table->table_lock); -+} -+ -+void hmgrtable_unlock(struct hmgrtable *table, enum dxglockstate state) -+{ -+ if (state == DXGLOCK_EXCL) -+ up_write(&table->table_lock); -+ else -+ up_read(&table->table_lock); -+} -+ -+struct d3dkmthandle hmgrtable_alloc_handle(struct hmgrtable *table, -+ void *object, -+ enum hmgrentry_type type, -+ bool make_valid) -+{ -+ u32 index; -+ struct hmgrentry *entry; -+ u32 unique; -+ -+ DXGKRNL_ASSERT(type <= HMGRENTRY_TYPE_LIMIT); -+ DXGKRNL_ASSERT(type > HMGRENTRY_TYPE_FREE); -+ -+ if (table->free_count <= HMGRTABLE_MIN_FREE_ENTRIES) { -+ if (!expand_table(table, 0)) { -+ DXG_ERR("hmgrtable expand_table failed"); -+ return zerohandle; -+ } -+ } -+ -+ if (table->free_handle_list_head >= table->table_size) { -+ DXG_ERR("hmgrtable corrupted handle table head"); -+ return zerohandle; -+ } -+ -+ index = table->free_handle_list_head; -+ entry = &table->entry_table[index]; -+ -+ if (entry->type != HMGRENTRY_TYPE_FREE) { -+ DXG_ERR("hmgrtable expected free handle"); -+ return zerohandle; -+ } -+ -+ table->free_handle_list_head = entry->next_free_index; -+ -+ if (entry->next_free_index != table->free_handle_list_tail) { -+ if (entry->next_free_index >= table->table_size) { -+ DXG_ERR("hmgrtable invalid next free index"); -+ return zerohandle; -+ } -+ table->entry_table[entry->next_free_index].prev_free_index = -+ HMGRTABLE_INVALID_INDEX; -+ } -+ -+ unique = table->entry_table[index].unique; -+ -+ table->entry_table[index].object = object; -+ table->entry_table[index].type = type; -+ table->entry_table[index].instance = 0; -+ table->entry_table[index].destroyed = !make_valid; -+ table->free_count--; -+ DXGKRNL_ASSERT(table->free_count <= table->table_size); -+ -+ return build_handle(index, unique, table->entry_table[index].instance); -+} -+ -+int hmgrtable_assign_handle_safe(struct hmgrtable *table, -+ void *object, -+ enum hmgrentry_type type, -+ struct d3dkmthandle h) -+{ -+ int ret; -+ -+ hmgrtable_lock(table, DXGLOCK_EXCL); -+ ret = hmgrtable_assign_handle(table, object, type, h); -+ hmgrtable_unlock(table, DXGLOCK_EXCL); -+ return ret; -+} -+ -+int hmgrtable_assign_handle(struct hmgrtable *table, void *object, -+ enum hmgrentry_type type, struct d3dkmthandle h) -+{ -+ u32 index = get_index(h); -+ u32 unique = get_unique(h); -+ struct hmgrentry *entry = NULL; -+ -+ DXG_TRACE("%x, %d %p, %p", h.v, index, object, table); -+ -+ if (index >= HMGRHANDLE_INDEX_MAX) { -+ DXG_ERR("handle index is too big: %x %d", h.v, index); -+ return -EINVAL; -+ } -+ -+ if (index >= table->table_size) { -+ u32 new_size = index + table_size_increment; -+ -+ if (new_size > HMGRHANDLE_INDEX_MAX) -+ new_size = HMGRHANDLE_INDEX_MAX; -+ if (!expand_table(table, new_size)) { -+ DXG_ERR("failed to expand handle table %d", -+ new_size); -+ return -ENOMEM; -+ } -+ } -+ -+ entry = &table->entry_table[index]; -+ -+ if (entry->type != HMGRENTRY_TYPE_FREE) { -+ DXG_ERR("the entry is not free: %d %x", entry->type, -+ hmgrtable_build_entry_handle(table, index).v); -+ return -EINVAL; -+ } -+ -+ if (index != table->free_handle_list_tail) { -+ if (entry->next_free_index >= table->table_size) { -+ DXG_ERR("hmgr: invalid next free index %d", -+ entry->next_free_index); -+ return -EINVAL; -+ } -+ table->entry_table[entry->next_free_index].prev_free_index = -+ entry->prev_free_index; -+ } else { -+ table->free_handle_list_tail = entry->prev_free_index; -+ } -+ -+ if (index != table->free_handle_list_head) { -+ if (entry->prev_free_index >= table->table_size) { -+ DXG_ERR("hmgr: invalid next prev index %d", -+ entry->prev_free_index); -+ return -EINVAL; -+ } -+ table->entry_table[entry->prev_free_index].next_free_index = -+ entry->next_free_index; -+ } else { -+ table->free_handle_list_head = entry->next_free_index; -+ } -+ -+ entry->prev_free_index = HMGRTABLE_INVALID_INDEX; -+ entry->next_free_index = HMGRTABLE_INVALID_INDEX; -+ entry->object = object; -+ entry->type = type; -+ entry->instance = 0; -+ entry->unique = unique; -+ entry->destroyed = false; -+ -+ table->free_count--; -+ DXGKRNL_ASSERT(table->free_count <= table->table_size); -+ return 0; -+} -+ -+struct d3dkmthandle hmgrtable_alloc_handle_safe(struct hmgrtable *table, -+ void *obj, -+ enum hmgrentry_type type, -+ bool make_valid) -+{ -+ struct d3dkmthandle h; -+ -+ hmgrtable_lock(table, DXGLOCK_EXCL); -+ h = hmgrtable_alloc_handle(table, obj, type, make_valid); -+ hmgrtable_unlock(table, DXGLOCK_EXCL); -+ return h; -+} -+ -+void hmgrtable_free_handle(struct hmgrtable *table, enum hmgrentry_type t, -+ struct d3dkmthandle h) -+{ -+ struct hmgrentry *entry; -+ u32 i = get_index(h); -+ -+ DXG_TRACE("%p %x", table, h.v); -+ -+ /* Ignore the destroyed flag when checking the handle */ -+ if (is_handle_valid(table, h, true, t)) { -+ DXGKRNL_ASSERT(table->free_count < table->table_size); -+ entry = &table->entry_table[i]; -+ entry->unique = 1; -+ entry->type = HMGRENTRY_TYPE_FREE; -+ entry->destroyed = 0; -+ if (entry->unique != HMGRHANDLE_UNIQUE_MAX) -+ entry->unique += 1; -+ else -+ entry->unique = 1; -+ -+ table->free_count++; -+ DXGKRNL_ASSERT(table->free_count <= table->table_size); -+ -+ /* -+ * Insert the index to the free list at the tail. -+ */ -+ entry->next_free_index = HMGRTABLE_INVALID_INDEX; -+ entry->prev_free_index = table->free_handle_list_tail; -+ entry = &table->entry_table[table->free_handle_list_tail]; -+ entry->next_free_index = i; -+ table->free_handle_list_tail = i; -+ } else { -+ DXG_ERR("Invalid handle to free: %d %x", i, h.v); -+ } -+} -+ -+void hmgrtable_free_handle_safe(struct hmgrtable *table, enum hmgrentry_type t, -+ struct d3dkmthandle h) -+{ -+ hmgrtable_lock(table, DXGLOCK_EXCL); -+ hmgrtable_free_handle(table, t, h); -+ hmgrtable_unlock(table, DXGLOCK_EXCL); -+} -+ -+struct d3dkmthandle hmgrtable_build_entry_handle(struct hmgrtable *table, -+ u32 index) -+{ -+ DXGKRNL_ASSERT(index < table->table_size); -+ -+ return build_handle(index, table->entry_table[index].unique, -+ table->entry_table[index].instance); -+} -+ -+void *hmgrtable_get_object(struct hmgrtable *table, struct d3dkmthandle h) -+{ -+ if (!is_handle_valid(table, h, false, HMGRENTRY_TYPE_FREE)) -+ return NULL; -+ -+ return table->entry_table[get_index(h)].object; -+} -+ -+void *hmgrtable_get_object_by_type(struct hmgrtable *table, -+ enum hmgrentry_type type, -+ struct d3dkmthandle h) -+{ -+ if (!is_handle_valid(table, h, false, type)) { -+ DXG_ERR("Invalid handle %x", h.v); -+ return NULL; -+ } -+ return table->entry_table[get_index(h)].object; -+} -+ -+void *hmgrtable_get_entry_object(struct hmgrtable *table, u32 index) -+{ -+ DXGKRNL_ASSERT(index < table->table_size); -+ DXGKRNL_ASSERT(table->entry_table[index].type != HMGRENTRY_TYPE_FREE); -+ -+ return table->entry_table[index].object; -+} -+ -+static enum hmgrentry_type hmgrtable_get_entry_type(struct hmgrtable *table, -+ u32 index) -+{ -+ DXGKRNL_ASSERT(index < table->table_size); -+ return (enum hmgrentry_type)table->entry_table[index].type; -+} -+ -+enum hmgrentry_type hmgrtable_get_object_type(struct hmgrtable *table, -+ struct d3dkmthandle h) -+{ -+ if (!is_handle_valid(table, h, false, HMGRENTRY_TYPE_FREE)) -+ return HMGRENTRY_TYPE_FREE; -+ -+ return hmgrtable_get_entry_type(table, get_index(h)); -+} -+ -+void *hmgrtable_get_object_ignore_destroyed(struct hmgrtable *table, -+ struct d3dkmthandle h, -+ enum hmgrentry_type type) -+{ -+ if (!is_handle_valid(table, h, true, type)) -+ return NULL; -+ return table->entry_table[get_index(h)].object; -+} -+ -+bool hmgrtable_next_entry(struct hmgrtable *tbl, -+ u32 *index, -+ enum hmgrentry_type *type, -+ struct d3dkmthandle *handle, -+ void **object) -+{ -+ u32 i; -+ struct hmgrentry *entry; -+ -+ for (i = *index; i < tbl->table_size; i++) { -+ entry = &tbl->entry_table[i]; -+ if (entry->type != HMGRENTRY_TYPE_FREE) { -+ *index = i + 1; -+ *object = entry->object; -+ *handle = build_handle(i, entry->unique, -+ entry->instance); -+ *type = entry->type; -+ return true; -+ } -+ } -+ return false; -+} -diff --git a/drivers/hv/dxgkrnl/hmgr.h b/drivers/hv/dxgkrnl/hmgr.h -new file mode 100644 -index 000000000000..111111111111 ---- /dev/null -+++ b/drivers/hv/dxgkrnl/hmgr.h -@@ -0,0 +1,112 @@ -+/* SPDX-License-Identifier: GPL-2.0 */ -+ -+/* -+ * Copyright (c) 2022, Microsoft Corporation. -+ * -+ * Author: -+ * Iouri Tarassov -+ * -+ * Dxgkrnl Graphics Driver -+ * Handle manager definitions -+ * -+ */ -+ -+#ifndef _HMGR_H_ -+#define _HMGR_H_ -+ -+#include "misc.h" -+ -+struct hmgrentry; -+ -+/* -+ * Handle manager table. -+ * -+ * Implementation notes: -+ * A list of free handles is built on top of the array of table entries. -+ * free_handle_list_head is the index of the first entry in the list. -+ * m_FreeHandleListTail is the index of an entry in the list, which is -+ * HMGRTABLE_MIN_FREE_ENTRIES from the head. It means that when a handle is -+ * freed, the next time the handle can be re-used is after allocating -+ * HMGRTABLE_MIN_FREE_ENTRIES number of handles. -+ * Handles are allocated from the start of the list and free handles are -+ * inserted after the tail of the list. -+ * -+ */ -+struct hmgrtable { -+ struct dxgprocess *process; -+ struct hmgrentry *entry_table; -+ u32 free_handle_list_head; -+ u32 free_handle_list_tail; -+ u32 table_size; -+ u32 free_count; -+ struct rw_semaphore table_lock; -+}; -+ -+/* -+ * Handle entry data types. -+ */ -+#define HMGRENTRY_TYPE_BITS 5 -+ -+enum hmgrentry_type { -+ HMGRENTRY_TYPE_FREE = 0, -+ HMGRENTRY_TYPE_DXGADAPTER = 1, -+ HMGRENTRY_TYPE_DXGSHAREDRESOURCE = 2, -+ HMGRENTRY_TYPE_DXGDEVICE = 3, -+ HMGRENTRY_TYPE_DXGRESOURCE = 4, -+ HMGRENTRY_TYPE_DXGALLOCATION = 5, -+ HMGRENTRY_TYPE_DXGOVERLAY = 6, -+ HMGRENTRY_TYPE_DXGCONTEXT = 7, -+ HMGRENTRY_TYPE_DXGSYNCOBJECT = 8, -+ HMGRENTRY_TYPE_DXGKEYEDMUTEX = 9, -+ HMGRENTRY_TYPE_DXGPAGINGQUEUE = 10, -+ HMGRENTRY_TYPE_DXGDEVICESYNCOBJECT = 11, -+ HMGRENTRY_TYPE_DXGPROCESS = 12, -+ HMGRENTRY_TYPE_DXGSHAREDVMOBJECT = 13, -+ HMGRENTRY_TYPE_DXGPROTECTEDSESSION = 14, -+ HMGRENTRY_TYPE_DXGHWQUEUE = 15, -+ HMGRENTRY_TYPE_DXGREMOTEBUNDLEOBJECT = 16, -+ HMGRENTRY_TYPE_DXGCOMPOSITIONSURFACEOBJECT = 17, -+ HMGRENTRY_TYPE_DXGCOMPOSITIONSURFACEPROXY = 18, -+ HMGRENTRY_TYPE_DXGTRACKEDWORKLOAD = 19, -+ HMGRENTRY_TYPE_LIMIT = ((1 << HMGRENTRY_TYPE_BITS) - 1), -+ HMGRENTRY_TYPE_MONITOREDFENCE = HMGRENTRY_TYPE_LIMIT + 1, -+}; -+ -+void hmgrtable_init(struct hmgrtable *tbl, struct dxgprocess *process); -+void hmgrtable_destroy(struct hmgrtable *tbl); -+void hmgrtable_lock(struct hmgrtable *tbl, enum dxglockstate state); -+void hmgrtable_unlock(struct hmgrtable *tbl, enum dxglockstate state); -+struct d3dkmthandle hmgrtable_alloc_handle(struct hmgrtable *tbl, void *object, -+ enum hmgrentry_type t, bool make_valid); -+struct d3dkmthandle hmgrtable_alloc_handle_safe(struct hmgrtable *tbl, -+ void *obj, -+ enum hmgrentry_type t, -+ bool reserve); -+int hmgrtable_assign_handle(struct hmgrtable *tbl, void *obj, -+ enum hmgrentry_type, struct d3dkmthandle h); -+int hmgrtable_assign_handle_safe(struct hmgrtable *tbl, void *obj, -+ enum hmgrentry_type t, struct d3dkmthandle h); -+void hmgrtable_free_handle(struct hmgrtable *tbl, enum hmgrentry_type t, -+ struct d3dkmthandle h); -+void hmgrtable_free_handle_safe(struct hmgrtable *tbl, enum hmgrentry_type t, -+ struct d3dkmthandle h); -+struct d3dkmthandle hmgrtable_build_entry_handle(struct hmgrtable *tbl, -+ u32 index); -+enum hmgrentry_type hmgrtable_get_object_type(struct hmgrtable *tbl, -+ struct d3dkmthandle h); -+void *hmgrtable_get_object(struct hmgrtable *tbl, struct d3dkmthandle h); -+void *hmgrtable_get_object_by_type(struct hmgrtable *tbl, enum hmgrentry_type t, -+ struct d3dkmthandle h); -+void *hmgrtable_get_object_ignore_destroyed(struct hmgrtable *tbl, -+ struct d3dkmthandle h, -+ enum hmgrentry_type t); -+bool hmgrtable_mark_destroyed(struct hmgrtable *tbl, struct d3dkmthandle h); -+bool hmgrtable_unmark_destroyed(struct hmgrtable *tbl, struct d3dkmthandle h); -+void *hmgrtable_get_entry_object(struct hmgrtable *tbl, u32 index); -+bool hmgrtable_next_entry(struct hmgrtable *tbl, -+ u32 *start_index, -+ enum hmgrentry_type *type, -+ struct d3dkmthandle *handle, -+ void **object); -+ -+#endif -diff --git a/drivers/hv/dxgkrnl/ioctl.c b/drivers/hv/dxgkrnl/ioctl.c -index 111111111111..222222222222 100644 ---- a/drivers/hv/dxgkrnl/ioctl.c -+++ b/drivers/hv/dxgkrnl/ioctl.c -@@ -22,3 +22,63 @@ - - #undef pr_fmt - #define pr_fmt(fmt) "dxgk: " fmt -+ -+struct ioctl_desc { -+ int (*ioctl_callback)(struct dxgprocess *p, void __user *arg); -+ u32 ioctl; -+ u32 arg_size; -+}; -+ -+static struct ioctl_desc ioctls[] = { -+ -+}; -+ -+/* -+ * IOCTL processing -+ * The driver IOCTLs return -+ * - 0 in case of success -+ * - positive values, which are Windows NTSTATUS (for example, STATUS_PENDING). -+ * Positive values are success codes. -+ * - Linux negative error codes -+ */ -+static int dxgk_ioctl(struct file *f, unsigned int p1, unsigned long p2) -+{ -+ int code = _IOC_NR(p1); -+ int status; -+ struct dxgprocess *process; -+ -+ if (code < 1 || code >= ARRAY_SIZE(ioctls)) { -+ DXG_ERR("bad ioctl %x %x %x %x", -+ code, _IOC_TYPE(p1), _IOC_SIZE(p1), _IOC_DIR(p1)); -+ return -ENOTTY; -+ } -+ if (ioctls[code].ioctl_callback == NULL) { -+ DXG_ERR("ioctl callback is NULL %x", code); -+ return -ENOTTY; -+ } -+ if (ioctls[code].ioctl != p1) { -+ DXG_ERR("ioctl mismatch. Code: %x User: %x Kernel: %x", -+ code, p1, ioctls[code].ioctl); -+ return -ENOTTY; -+ } -+ process = (struct dxgprocess *)f->private_data; -+ if (process->tgid != current->tgid) { -+ DXG_ERR("Call from a wrong process: %d %d", -+ process->tgid, current->tgid); -+ return -ENOTTY; -+ } -+ status = ioctls[code].ioctl_callback(process, (void *__user)p2); -+ return status; -+} -+ -+long dxgk_compat_ioctl(struct file *f, unsigned int p1, unsigned long p2) -+{ -+ DXG_TRACE("compat ioctl %x", p1); -+ return dxgk_ioctl(f, p1, p2); -+} -+ -+long dxgk_unlocked_ioctl(struct file *f, unsigned int p1, unsigned long p2) -+{ -+ DXG_TRACE("unlocked ioctl %x Code:%d", p1, _IOC_NR(p1)); -+ return dxgk_ioctl(f, p1, p2); -+} -diff --git a/drivers/hv/dxgkrnl/misc.h b/drivers/hv/dxgkrnl/misc.h -index 111111111111..222222222222 100644 ---- a/drivers/hv/dxgkrnl/misc.h -+++ b/drivers/hv/dxgkrnl/misc.h -@@ -27,10 +27,11 @@ extern const struct d3dkmthandle zerohandle; - * - * channel_lock (VMBus channel lock) - * fd_mutex -- * plistmutex (process list mutex) -- * table_lock (handle table lock) -- * core_lock (dxgadapter lock) -- * device_lock (dxgdevice lock) -+ * plistmutex -+ * table_lock -+ * core_lock -+ * device_lock -+ * process_adapter_mutex - * adapter_list_lock - * device_mutex (dxgglobal mutex) - */ -diff --git a/include/uapi/misc/d3dkmthk.h b/include/uapi/misc/d3dkmthk.h -index 111111111111..222222222222 100644 ---- a/include/uapi/misc/d3dkmthk.h -+++ b/include/uapi/misc/d3dkmthk.h -@@ -58,4 +58,107 @@ struct winluid { - __u32 b; - }; - -+#define D3DKMT_ADAPTERS_MAX 64 -+ -+struct d3dkmt_adapterinfo { -+ struct d3dkmthandle adapter_handle; -+ struct winluid adapter_luid; -+ __u32 num_sources; -+ __u32 present_move_regions_preferred; -+}; -+ -+struct d3dkmt_enumadapters2 { -+ __u32 num_adapters; -+ __u32 reserved; -+#ifdef __KERNEL__ -+ struct d3dkmt_adapterinfo *adapters; -+#else -+ __u64 *adapters; -+#endif -+}; -+ -+struct d3dkmt_closeadapter { -+ struct d3dkmthandle adapter_handle; -+}; -+ -+struct d3dkmt_openadapterfromluid { -+ struct winluid adapter_luid; -+ struct d3dkmthandle adapter_handle; -+}; -+ -+struct d3dkmt_adaptertype { -+ union { -+ struct { -+ __u32 render_supported:1; -+ __u32 display_supported:1; -+ __u32 software_device:1; -+ __u32 post_device:1; -+ __u32 hybrid_discrete:1; -+ __u32 hybrid_integrated:1; -+ __u32 indirect_display_device:1; -+ __u32 paravirtualized:1; -+ __u32 acg_supported:1; -+ __u32 support_set_timings_from_vidpn:1; -+ __u32 detachable:1; -+ __u32 compute_only:1; -+ __u32 prototype:1; -+ __u32 reserved:19; -+ }; -+ __u32 value; -+ }; -+}; -+ -+enum kmtqueryadapterinfotype { -+ _KMTQAITYPE_UMDRIVERPRIVATE = 0, -+ _KMTQAITYPE_ADAPTERTYPE = 15, -+ _KMTQAITYPE_ADAPTERTYPE_RENDER = 57 -+}; -+ -+struct d3dkmt_queryadapterinfo { -+ struct d3dkmthandle adapter; -+ enum kmtqueryadapterinfotype type; -+#ifdef __KERNEL__ -+ void *private_data; -+#else -+ __u64 private_data; -+#endif -+ __u32 private_data_size; -+}; -+ -+union d3dkmt_enumadapters_filter { -+ struct { -+ __u64 include_compute_only:1; -+ __u64 include_display_only:1; -+ __u64 reserved:62; -+ }; -+ __u64 value; -+}; -+ -+struct d3dkmt_enumadapters3 { -+ union d3dkmt_enumadapters_filter filter; -+ __u32 adapter_count; -+ __u32 reserved; -+#ifdef __KERNEL__ -+ struct d3dkmt_adapterinfo *adapters; -+#else -+ __u64 adapters; -+#endif -+}; -+ -+/* -+ * Dxgkrnl Graphics Port Driver ioctl definitions -+ * -+ */ -+ -+#define LX_DXOPENADAPTERFROMLUID \ -+ _IOWR(0x47, 0x01, struct d3dkmt_openadapterfromluid) -+#define LX_DXQUERYADAPTERINFO \ -+ _IOWR(0x47, 0x09, struct d3dkmt_queryadapterinfo) -+#define LX_DXENUMADAPTERS2 \ -+ _IOWR(0x47, 0x14, struct d3dkmt_enumadapters2) -+#define LX_DXCLOSEADAPTER \ -+ _IOWR(0x47, 0x15, struct d3dkmt_closeadapter) -+#define LX_DXENUMADAPTERS3 \ -+ _IOWR(0x47, 0x3e, struct d3dkmt_enumadapters3) -+ - #endif /* _D3DKMTHK_H */ --- -Armbian - diff --git a/patch/kernel/archive/wsl2-arm64-6.1/1673-drivers-hv-dxgkrnl-Enumerate-and-open-dxgadapter-objects.patch b/patch/kernel/archive/wsl2-arm64-6.1/1673-drivers-hv-dxgkrnl-Enumerate-and-open-dxgadapter-objects.patch deleted file mode 100644 index 78e761b42b20..000000000000 --- a/patch/kernel/archive/wsl2-arm64-6.1/1673-drivers-hv-dxgkrnl-Enumerate-and-open-dxgadapter-objects.patch +++ /dev/null @@ -1,554 +0,0 @@ -From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001 -From: Iouri Tarassov -Date: Mon, 21 Mar 2022 19:18:50 -0700 -Subject: drivers: hv: dxgkrnl: Enumerate and open dxgadapter objects - -Implement ioctls to enumerate dxgadapter objects: - - The LX_DXENUMADAPTERS2 ioctl - - The LX_DXENUMADAPTERS3 ioctl. - -Implement ioctls to open adapter by LUID and to close adapter -handle: - - The LX_DXOPENADAPTERFROMLUID ioctl - - the LX_DXCLOSEADAPTER ioctl - -Impllement the ioctl to query dxgadapter information: - - The LX_DXQUERYADAPTERINFO ioctl - -When a dxgadapter is enumerated, it is implicitely opened and -a handle (d3dkmthandle) is created in the current process handle -table. The handle is returned to the caller and can be used -by user mode to reference the VGPU adapter in other ioctls. - -The caller is responsible to close the adapter when it is not -longer used by sending the LX_DXCLOSEADAPTER ioctl. - -A dxgprocess has a list of opened dxgadapter objects -(dxgprocess_adapter is used to represent the entry in the list). -A dxgadapter also has a list of dxgprocess_adapter objects. -This is needed for cleanup because either a process or an adapter -could be destroyed first. - -Signed-off-by: Iouri Tarassov -[kms: Forward port to v6.1] -Signed-off-by: Kelsey Steele ---- - drivers/hv/dxgkrnl/dxgmodule.c | 3 + - drivers/hv/dxgkrnl/ioctl.c | 482 +++++++++- - 2 files changed, 484 insertions(+), 1 deletion(-) - -diff --git a/drivers/hv/dxgkrnl/dxgmodule.c b/drivers/hv/dxgkrnl/dxgmodule.c -index 111111111111..222222222222 100644 ---- a/drivers/hv/dxgkrnl/dxgmodule.c -+++ b/drivers/hv/dxgkrnl/dxgmodule.c -@@ -721,6 +721,9 @@ static struct dxgglobal *dxgglobal_create(void) - - init_rwsem(&dxgglobal->channel_lock); - -+#ifdef DEBUG -+ dxgk_validate_ioctls(); -+#endif - return dxgglobal; - } - -diff --git a/drivers/hv/dxgkrnl/ioctl.c b/drivers/hv/dxgkrnl/ioctl.c -index 111111111111..222222222222 100644 ---- a/drivers/hv/dxgkrnl/ioctl.c -+++ b/drivers/hv/dxgkrnl/ioctl.c -@@ -29,8 +29,472 @@ struct ioctl_desc { - u32 arg_size; - }; - --static struct ioctl_desc ioctls[] = { -+#ifdef DEBUG -+static char *errorstr(int ret) -+{ -+ return ret < 0 ? "err" : ""; -+} -+#endif -+ -+static int dxgkio_open_adapter_from_luid(struct dxgprocess *process, -+ void *__user inargs) -+{ -+ struct d3dkmt_openadapterfromluid args; -+ int ret; -+ struct dxgadapter *entry; -+ struct dxgadapter *adapter = NULL; -+ struct d3dkmt_openadapterfromluid *__user result = inargs; -+ struct dxgglobal *dxgglobal = dxggbl(); -+ -+ ret = copy_from_user(&args, inargs, sizeof(args)); -+ if (ret) { -+ DXG_ERR("Faled to copy input args"); -+ ret = -EINVAL; -+ goto cleanup; -+ } -+ -+ dxgglobal_acquire_adapter_list_lock(DXGLOCK_SHARED); -+ dxgglobal_acquire_process_adapter_lock(); -+ -+ list_for_each_entry(entry, &dxgglobal->adapter_list_head, -+ adapter_list_entry) { -+ if (dxgadapter_acquire_lock_shared(entry) == 0) { -+ if (*(u64 *) &entry->luid == -+ *(u64 *) &args.adapter_luid) { -+ ret = dxgprocess_open_adapter(process, entry, -+ &args.adapter_handle); -+ -+ if (ret >= 0) { -+ ret = copy_to_user( -+ &result->adapter_handle, -+ &args.adapter_handle, -+ sizeof(struct d3dkmthandle)); -+ if (ret) -+ ret = -EINVAL; -+ } -+ adapter = entry; -+ } -+ dxgadapter_release_lock_shared(entry); -+ if (adapter) -+ break; -+ } -+ } -+ -+ dxgglobal_release_process_adapter_lock(); -+ dxgglobal_release_adapter_list_lock(DXGLOCK_SHARED); -+ -+ if (args.adapter_handle.v == 0) -+ ret = -EINVAL; -+ -+cleanup: -+ -+ if (ret < 0) -+ dxgprocess_close_adapter(process, args.adapter_handle); -+ -+ DXG_TRACE("ioctl:%s %d", errorstr(ret), ret); -+ return ret; -+} -+ -+static int -+dxgkp_enum_adapters(struct dxgprocess *process, -+ union d3dkmt_enumadapters_filter filter, -+ u32 adapter_count_max, -+ struct d3dkmt_adapterinfo *__user info_out, -+ u32 * __user adapter_count_out) -+{ -+ int ret = 0; -+ struct dxgadapter *entry; -+ struct d3dkmt_adapterinfo *info = NULL; -+ struct dxgadapter **adapters = NULL; -+ int adapter_count = 0; -+ int i; -+ struct dxgglobal *dxgglobal = dxggbl(); -+ -+ if (info_out == NULL || adapter_count_max == 0) { -+ ret = copy_to_user(adapter_count_out, -+ &dxgglobal->num_adapters, sizeof(u32)); -+ if (ret) { -+ DXG_ERR("copy_to_user faled"); -+ ret = -EINVAL; -+ } -+ goto cleanup; -+ } -+ -+ if (adapter_count_max > 0xFFFF) { -+ DXG_ERR("too many adapters"); -+ ret = -EINVAL; -+ goto cleanup; -+ } -+ -+ info = vzalloc(sizeof(struct d3dkmt_adapterinfo) * adapter_count_max); -+ if (info == NULL) { -+ ret = -ENOMEM; -+ goto cleanup; -+ } -+ -+ adapters = vzalloc(sizeof(struct dxgadapter *) * adapter_count_max); -+ if (adapters == NULL) { -+ ret = -ENOMEM; -+ goto cleanup; -+ } -+ -+ dxgglobal_acquire_adapter_list_lock(DXGLOCK_SHARED); -+ dxgglobal_acquire_process_adapter_lock(); - -+ list_for_each_entry(entry, &dxgglobal->adapter_list_head, -+ adapter_list_entry) { -+ if (dxgadapter_acquire_lock_shared(entry) == 0) { -+ struct d3dkmt_adapterinfo *inf = &info[adapter_count]; -+ -+ ret = dxgprocess_open_adapter(process, entry, -+ &inf->adapter_handle); -+ if (ret >= 0) { -+ inf->adapter_luid = entry->luid; -+ adapters[adapter_count] = entry; -+ DXG_TRACE("adapter: %x %x:%x", -+ inf->adapter_handle.v, -+ inf->adapter_luid.b, -+ inf->adapter_luid.a); -+ adapter_count++; -+ } -+ dxgadapter_release_lock_shared(entry); -+ } -+ if (ret < 0) -+ break; -+ } -+ -+ dxgglobal_release_process_adapter_lock(); -+ dxgglobal_release_adapter_list_lock(DXGLOCK_SHARED); -+ -+ if (adapter_count > adapter_count_max) { -+ ret = STATUS_BUFFER_TOO_SMALL; -+ DXG_TRACE("Too many adapters"); -+ ret = copy_to_user(adapter_count_out, -+ &dxgglobal->num_adapters, sizeof(u32)); -+ if (ret) { -+ DXG_ERR("copy_to_user failed"); -+ ret = -EINVAL; -+ } -+ goto cleanup; -+ } -+ -+ ret = copy_to_user(adapter_count_out, &adapter_count, -+ sizeof(adapter_count)); -+ if (ret) { -+ DXG_ERR("failed to copy adapter_count"); -+ ret = -EINVAL; -+ goto cleanup; -+ } -+ ret = copy_to_user(info_out, info, sizeof(info[0]) * adapter_count); -+ if (ret) { -+ DXG_ERR("failed to copy adapter info"); -+ ret = -EINVAL; -+ } -+ -+cleanup: -+ -+ if (ret >= 0) { -+ DXG_TRACE("found %d adapters", adapter_count); -+ goto success; -+ } -+ if (info) { -+ for (i = 0; i < adapter_count; i++) -+ dxgprocess_close_adapter(process, -+ info[i].adapter_handle); -+ } -+success: -+ if (info) -+ vfree(info); -+ if (adapters) -+ vfree(adapters); -+ -+ DXG_TRACE("ioctl:%s %d", errorstr(ret), ret); -+ return ret; -+} -+ -+static int -+dxgkio_enum_adapters(struct dxgprocess *process, void *__user inargs) -+{ -+ struct d3dkmt_enumadapters2 args; -+ int ret; -+ struct dxgadapter *entry; -+ struct d3dkmt_adapterinfo *info = NULL; -+ struct dxgadapter **adapters = NULL; -+ int adapter_count = 0; -+ int i; -+ struct dxgglobal *dxgglobal = dxggbl(); -+ -+ ret = copy_from_user(&args, inargs, sizeof(args)); -+ if (ret) { -+ DXG_ERR("failed to copy input args"); -+ ret = -EINVAL; -+ goto cleanup; -+ } -+ -+ if (args.adapters == NULL) { -+ DXG_TRACE("buffer is NULL"); -+ args.num_adapters = dxgglobal->num_adapters; -+ ret = copy_to_user(inargs, &args, sizeof(args)); -+ if (ret) { -+ DXG_ERR("failed to copy args to user"); -+ ret = -EINVAL; -+ } -+ goto cleanup; -+ } -+ if (args.num_adapters < dxgglobal->num_adapters) { -+ args.num_adapters = dxgglobal->num_adapters; -+ DXG_TRACE("buffer is too small"); -+ ret = -EOVERFLOW; -+ goto cleanup; -+ } -+ -+ if (args.num_adapters > D3DKMT_ADAPTERS_MAX) { -+ DXG_TRACE("too many adapters"); -+ ret = -EINVAL; -+ goto cleanup; -+ } -+ -+ info = vzalloc(sizeof(struct d3dkmt_adapterinfo) * args.num_adapters); -+ if (info == NULL) { -+ ret = -ENOMEM; -+ goto cleanup; -+ } -+ -+ adapters = vzalloc(sizeof(struct dxgadapter *) * args.num_adapters); -+ if (adapters == NULL) { -+ ret = -ENOMEM; -+ goto cleanup; -+ } -+ -+ dxgglobal_acquire_adapter_list_lock(DXGLOCK_SHARED); -+ dxgglobal_acquire_process_adapter_lock(); -+ -+ list_for_each_entry(entry, &dxgglobal->adapter_list_head, -+ adapter_list_entry) { -+ if (dxgadapter_acquire_lock_shared(entry) == 0) { -+ struct d3dkmt_adapterinfo *inf = &info[adapter_count]; -+ -+ ret = dxgprocess_open_adapter(process, entry, -+ &inf->adapter_handle); -+ if (ret >= 0) { -+ inf->adapter_luid = entry->luid; -+ adapters[adapter_count] = entry; -+ DXG_TRACE("adapter: %x %llx", -+ inf->adapter_handle.v, -+ *(u64 *) &inf->adapter_luid); -+ adapter_count++; -+ } -+ dxgadapter_release_lock_shared(entry); -+ } -+ if (ret < 0) -+ break; -+ } -+ -+ dxgglobal_release_process_adapter_lock(); -+ dxgglobal_release_adapter_list_lock(DXGLOCK_SHARED); -+ -+ args.num_adapters = adapter_count; -+ -+ ret = copy_to_user(inargs, &args, sizeof(args)); -+ if (ret) { -+ DXG_ERR("failed to copy args to user"); -+ ret = -EINVAL; -+ goto cleanup; -+ } -+ ret = copy_to_user(args.adapters, info, -+ sizeof(info[0]) * args.num_adapters); -+ if (ret) { -+ DXG_ERR("failed to copy adapter info to user"); -+ ret = -EINVAL; -+ } -+ -+cleanup: -+ -+ if (ret < 0) { -+ if (info) { -+ for (i = 0; i < args.num_adapters; i++) { -+ dxgprocess_close_adapter(process, -+ info[i].adapter_handle); -+ } -+ } -+ } else { -+ DXG_TRACE("found %d adapters", args.num_adapters); -+ } -+ -+ if (info) -+ vfree(info); -+ if (adapters) -+ vfree(adapters); -+ -+ DXG_TRACE("ioctl:%s %d", errorstr(ret), ret); -+ return ret; -+} -+ -+static int -+dxgkio_enum_adapters3(struct dxgprocess *process, void *__user inargs) -+{ -+ struct d3dkmt_enumadapters3 args; -+ int ret; -+ -+ ret = copy_from_user(&args, inargs, sizeof(args)); -+ if (ret) { -+ DXG_ERR("failed to copy input args"); -+ ret = -EINVAL; -+ goto cleanup; -+ } -+ -+ ret = dxgkp_enum_adapters(process, args.filter, -+ args.adapter_count, -+ args.adapters, -+ &((struct d3dkmt_enumadapters3 *)inargs)-> -+ adapter_count); -+ -+cleanup: -+ -+ DXG_TRACE("ioctl: %s %d", errorstr(ret), ret); -+ return ret; -+} -+ -+static int -+dxgkio_close_adapter(struct dxgprocess *process, void *__user inargs) -+{ -+ struct d3dkmthandle args; -+ int ret; -+ -+ ret = copy_from_user(&args, inargs, sizeof(args)); -+ if (ret) { -+ DXG_ERR("failed to copy input args"); -+ ret = -EINVAL; -+ goto cleanup; -+ } -+ -+ ret = dxgprocess_close_adapter(process, args); -+ if (ret < 0) -+ DXG_ERR("failed to close adapter: %d", ret); -+ -+cleanup: -+ -+ DXG_TRACE("ioctl: %s %d", errorstr(ret), ret); -+ return ret; -+} -+ -+static int -+dxgkio_query_adapter_info(struct dxgprocess *process, void *__user inargs) -+{ -+ struct d3dkmt_queryadapterinfo args; -+ int ret; -+ struct dxgadapter *adapter = NULL; -+ -+ ret = copy_from_user(&args, inargs, sizeof(args)); -+ if (ret) { -+ DXG_ERR("failed to copy input args"); -+ ret = -EINVAL; -+ goto cleanup; -+ } -+ -+ if (args.private_data_size > DXG_MAX_VM_BUS_PACKET_SIZE || -+ args.private_data_size == 0) { -+ DXG_ERR("invalid private data size"); -+ ret = -EINVAL; -+ goto cleanup; -+ } -+ -+ DXG_TRACE("Type: %d Size: %x", args.type, args.private_data_size); -+ -+ adapter = dxgprocess_adapter_by_handle(process, args.adapter); -+ if (adapter == NULL) { -+ ret = -EINVAL; -+ goto cleanup; -+ } -+ -+ ret = dxgadapter_acquire_lock_shared(adapter); -+ if (ret < 0) -+ goto cleanup; -+ -+ ret = dxgvmb_send_query_adapter_info(process, adapter, &args); -+ -+ dxgadapter_release_lock_shared(adapter); -+ -+cleanup: -+ -+ if (adapter) -+ kref_put(&adapter->adapter_kref, dxgadapter_release); -+ -+ DXG_TRACE("ioctl:%s %d", errorstr(ret), ret); -+ return ret; -+} -+ -+static struct ioctl_desc ioctls[] = { -+/* 0x00 */ {}, -+/* 0x01 */ {dxgkio_open_adapter_from_luid, LX_DXOPENADAPTERFROMLUID}, -+/* 0x02 */ {}, -+/* 0x03 */ {}, -+/* 0x04 */ {}, -+/* 0x05 */ {}, -+/* 0x06 */ {}, -+/* 0x07 */ {}, -+/* 0x08 */ {}, -+/* 0x09 */ {dxgkio_query_adapter_info, LX_DXQUERYADAPTERINFO}, -+/* 0x0a */ {}, -+/* 0x0b */ {}, -+/* 0x0c */ {}, -+/* 0x0d */ {}, -+/* 0x0e */ {}, -+/* 0x0f */ {}, -+/* 0x10 */ {}, -+/* 0x11 */ {}, -+/* 0x12 */ {}, -+/* 0x13 */ {}, -+/* 0x14 */ {dxgkio_enum_adapters, LX_DXENUMADAPTERS2}, -+/* 0x15 */ {dxgkio_close_adapter, LX_DXCLOSEADAPTER}, -+/* 0x16 */ {}, -+/* 0x17 */ {}, -+/* 0x18 */ {}, -+/* 0x19 */ {}, -+/* 0x1a */ {}, -+/* 0x1b */ {}, -+/* 0x1c */ {}, -+/* 0x1d */ {}, -+/* 0x1e */ {}, -+/* 0x1f */ {}, -+/* 0x20 */ {}, -+/* 0x21 */ {}, -+/* 0x22 */ {}, -+/* 0x23 */ {}, -+/* 0x24 */ {}, -+/* 0x25 */ {}, -+/* 0x26 */ {}, -+/* 0x27 */ {}, -+/* 0x28 */ {}, -+/* 0x29 */ {}, -+/* 0x2a */ {}, -+/* 0x2b */ {}, -+/* 0x2c */ {}, -+/* 0x2d */ {}, -+/* 0x2e */ {}, -+/* 0x2f */ {}, -+/* 0x30 */ {}, -+/* 0x31 */ {}, -+/* 0x32 */ {}, -+/* 0x33 */ {}, -+/* 0x34 */ {}, -+/* 0x35 */ {}, -+/* 0x36 */ {}, -+/* 0x37 */ {}, -+/* 0x38 */ {}, -+/* 0x39 */ {}, -+/* 0x3a */ {}, -+/* 0x3b */ {}, -+/* 0x3c */ {}, -+/* 0x3d */ {}, -+/* 0x3e */ {dxgkio_enum_adapters3, LX_DXENUMADAPTERS3}, -+/* 0x3f */ {}, -+/* 0x40 */ {}, -+/* 0x41 */ {}, -+/* 0x42 */ {}, -+/* 0x43 */ {}, -+/* 0x44 */ {}, -+/* 0x45 */ {}, - }; - - /* -@@ -82,3 +546,19 @@ long dxgk_unlocked_ioctl(struct file *f, unsigned int p1, unsigned long p2) - DXG_TRACE("unlocked ioctl %x Code:%d", p1, _IOC_NR(p1)); - return dxgk_ioctl(f, p1, p2); - } -+ -+#ifdef DEBUG -+void dxgk_validate_ioctls(void) -+{ -+ int i; -+ -+ for (i=0; i < ARRAY_SIZE(ioctls); i++) -+ { -+ if (ioctls[i].ioctl && _IOC_NR(ioctls[i].ioctl) != i) -+ { -+ DXG_ERR("Invalid ioctl"); -+ DXGKRNL_ASSERT(0); -+ } -+ } -+} -+#endif --- -Armbian - diff --git a/patch/kernel/archive/wsl2-arm64-6.1/1674-drivers-hv-dxgkrnl-Creation-of-dxgdevice-objects.patch b/patch/kernel/archive/wsl2-arm64-6.1/1674-drivers-hv-dxgkrnl-Creation-of-dxgdevice-objects.patch deleted file mode 100644 index 8bbd854777b4..000000000000 --- a/patch/kernel/archive/wsl2-arm64-6.1/1674-drivers-hv-dxgkrnl-Creation-of-dxgdevice-objects.patch +++ /dev/null @@ -1,828 +0,0 @@ -From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001 -From: Iouri Tarassov -Date: Tue, 1 Feb 2022 17:23:58 -0800 -Subject: drivers: hv: dxgkrnl: Creation of dxgdevice objects - -Implement ioctls for creation and destruction of dxgdevice -objects: - - the LX_DXCREATEDEVICE ioctl - - the LX_DXDESTROYDEVICE ioctl - -A dxgdevice object represents a container of other virtual -compute device objects (allocations, sync objects, contexts, -etc.). It belongs to a dxgadapter object. - -Signed-off-by: Iouri Tarassov -[kms: Forward port to v6.1] -Signed-off-by: Kelsey Steele ---- - drivers/hv/dxgkrnl/dxgadapter.c | 187 ++++++++++ - drivers/hv/dxgkrnl/dxgkrnl.h | 58 +++ - drivers/hv/dxgkrnl/dxgprocess.c | 43 +++ - drivers/hv/dxgkrnl/dxgvmbus.c | 80 ++++ - drivers/hv/dxgkrnl/dxgvmbus.h | 22 ++ - drivers/hv/dxgkrnl/ioctl.c | 130 ++++++- - drivers/hv/dxgkrnl/misc.h | 8 +- - include/uapi/misc/d3dkmthk.h | 82 ++++ - 8 files changed, 604 insertions(+), 6 deletions(-) - -diff --git a/drivers/hv/dxgkrnl/dxgadapter.c b/drivers/hv/dxgkrnl/dxgadapter.c -index 111111111111..222222222222 100644 ---- a/drivers/hv/dxgkrnl/dxgadapter.c -+++ b/drivers/hv/dxgkrnl/dxgadapter.c -@@ -194,6 +194,122 @@ void dxgadapter_release_lock_shared(struct dxgadapter *adapter) - up_read(&adapter->core_lock); - } - -+struct dxgdevice *dxgdevice_create(struct dxgadapter *adapter, -+ struct dxgprocess *process) -+{ -+ struct dxgdevice *device; -+ int ret; -+ -+ device = kzalloc(sizeof(struct dxgdevice), GFP_KERNEL); -+ if (device) { -+ kref_init(&device->device_kref); -+ device->adapter = adapter; -+ device->process = process; -+ kref_get(&adapter->adapter_kref); -+ init_rwsem(&device->device_lock); -+ INIT_LIST_HEAD(&device->pqueue_list_head); -+ device->object_state = DXGOBJECTSTATE_CREATED; -+ device->execution_state = _D3DKMT_DEVICEEXECUTION_ACTIVE; -+ -+ ret = dxgprocess_adapter_add_device(process, adapter, device); -+ if (ret < 0) { -+ kref_put(&device->device_kref, dxgdevice_release); -+ device = NULL; -+ } -+ } -+ return device; -+} -+ -+void dxgdevice_stop(struct dxgdevice *device) -+{ -+} -+ -+void dxgdevice_mark_destroyed(struct dxgdevice *device) -+{ -+ down_write(&device->device_lock); -+ device->object_state = DXGOBJECTSTATE_DESTROYED; -+ up_write(&device->device_lock); -+} -+ -+void dxgdevice_destroy(struct dxgdevice *device) -+{ -+ struct dxgprocess *process = device->process; -+ struct dxgadapter *adapter = device->adapter; -+ struct d3dkmthandle device_handle = {}; -+ -+ DXG_TRACE("Destroying device: %p", device); -+ -+ down_write(&device->device_lock); -+ -+ if (device->object_state != DXGOBJECTSTATE_ACTIVE) -+ goto cleanup; -+ -+ device->object_state = DXGOBJECTSTATE_DESTROYED; -+ -+ dxgdevice_stop(device); -+ -+ /* Guest handles need to be released before the host handles */ -+ hmgrtable_lock(&process->handle_table, DXGLOCK_EXCL); -+ if (device->handle_valid) { -+ hmgrtable_free_handle(&process->handle_table, -+ HMGRENTRY_TYPE_DXGDEVICE, device->handle); -+ device_handle = device->handle; -+ device->handle_valid = 0; -+ } -+ hmgrtable_unlock(&process->handle_table, DXGLOCK_EXCL); -+ -+ if (device_handle.v) { -+ up_write(&device->device_lock); -+ if (dxgadapter_acquire_lock_shared(adapter) == 0) { -+ dxgvmb_send_destroy_device(adapter, process, -+ device_handle); -+ dxgadapter_release_lock_shared(adapter); -+ } -+ down_write(&device->device_lock); -+ } -+ -+cleanup: -+ -+ if (device->adapter) { -+ dxgprocess_adapter_remove_device(device); -+ kref_put(&device->adapter->adapter_kref, dxgadapter_release); -+ device->adapter = NULL; -+ } -+ -+ up_write(&device->device_lock); -+ -+ kref_put(&device->device_kref, dxgdevice_release); -+ DXG_TRACE("Device destroyed"); -+} -+ -+int dxgdevice_acquire_lock_shared(struct dxgdevice *device) -+{ -+ down_read(&device->device_lock); -+ if (!dxgdevice_is_active(device)) { -+ up_read(&device->device_lock); -+ return -ENODEV; -+ } -+ return 0; -+} -+ -+void dxgdevice_release_lock_shared(struct dxgdevice *device) -+{ -+ up_read(&device->device_lock); -+} -+ -+bool dxgdevice_is_active(struct dxgdevice *device) -+{ -+ return device->object_state == DXGOBJECTSTATE_ACTIVE; -+} -+ -+void dxgdevice_release(struct kref *refcount) -+{ -+ struct dxgdevice *device; -+ -+ device = container_of(refcount, struct dxgdevice, device_kref); -+ kfree(device); -+} -+ - struct dxgprocess_adapter *dxgprocess_adapter_create(struct dxgprocess *process, - struct dxgadapter *adapter) - { -@@ -208,6 +324,8 @@ struct dxgprocess_adapter *dxgprocess_adapter_create(struct dxgprocess *process, - adapter_info->adapter = adapter; - adapter_info->process = process; - adapter_info->refcount = 1; -+ mutex_init(&adapter_info->device_list_mutex); -+ INIT_LIST_HEAD(&adapter_info->device_list_head); - list_add_tail(&adapter_info->process_adapter_list_entry, - &process->process_adapter_list_head); - dxgadapter_add_process(adapter, adapter_info); -@@ -221,10 +339,34 @@ struct dxgprocess_adapter *dxgprocess_adapter_create(struct dxgprocess *process, - - void dxgprocess_adapter_stop(struct dxgprocess_adapter *adapter_info) - { -+ struct dxgdevice *device; -+ -+ mutex_lock(&adapter_info->device_list_mutex); -+ list_for_each_entry(device, &adapter_info->device_list_head, -+ device_list_entry) { -+ dxgdevice_stop(device); -+ } -+ mutex_unlock(&adapter_info->device_list_mutex); - } - - void dxgprocess_adapter_destroy(struct dxgprocess_adapter *adapter_info) - { -+ struct dxgdevice *device; -+ -+ mutex_lock(&adapter_info->device_list_mutex); -+ while (!list_empty(&adapter_info->device_list_head)) { -+ device = list_first_entry(&adapter_info->device_list_head, -+ struct dxgdevice, device_list_entry); -+ list_del(&device->device_list_entry); -+ device->device_list_entry.next = NULL; -+ mutex_unlock(&adapter_info->device_list_mutex); -+ dxgvmb_send_flush_device(device, -+ DXGDEVICE_FLUSHSCHEDULER_DEVICE_TERMINATE); -+ dxgdevice_destroy(device); -+ mutex_lock(&adapter_info->device_list_mutex); -+ } -+ mutex_unlock(&adapter_info->device_list_mutex); -+ - dxgadapter_remove_process(adapter_info); - kref_put(&adapter_info->adapter->adapter_kref, dxgadapter_release); - list_del(&adapter_info->process_adapter_list_entry); -@@ -240,3 +382,48 @@ void dxgprocess_adapter_release(struct dxgprocess_adapter *adapter_info) - if (adapter_info->refcount == 0) - dxgprocess_adapter_destroy(adapter_info); - } -+ -+int dxgprocess_adapter_add_device(struct dxgprocess *process, -+ struct dxgadapter *adapter, -+ struct dxgdevice *device) -+{ -+ struct dxgprocess_adapter *entry; -+ struct dxgprocess_adapter *adapter_info = NULL; -+ int ret = 0; -+ -+ dxgglobal_acquire_process_adapter_lock(); -+ -+ list_for_each_entry(entry, &process->process_adapter_list_head, -+ process_adapter_list_entry) { -+ if (entry->adapter == adapter) { -+ adapter_info = entry; -+ break; -+ } -+ } -+ if (adapter_info == NULL) { -+ DXG_ERR("failed to find process adapter info"); -+ ret = -EINVAL; -+ goto cleanup; -+ } -+ mutex_lock(&adapter_info->device_list_mutex); -+ list_add_tail(&device->device_list_entry, -+ &adapter_info->device_list_head); -+ device->adapter_info = adapter_info; -+ mutex_unlock(&adapter_info->device_list_mutex); -+ -+cleanup: -+ -+ dxgglobal_release_process_adapter_lock(); -+ return ret; -+} -+ -+void dxgprocess_adapter_remove_device(struct dxgdevice *device) -+{ -+ DXG_TRACE("Removing device: %p", device); -+ mutex_lock(&device->adapter_info->device_list_mutex); -+ if (device->device_list_entry.next) { -+ list_del(&device->device_list_entry); -+ device->device_list_entry.next = NULL; -+ } -+ mutex_unlock(&device->adapter_info->device_list_mutex); -+} -diff --git a/drivers/hv/dxgkrnl/dxgkrnl.h b/drivers/hv/dxgkrnl/dxgkrnl.h -index 111111111111..222222222222 100644 ---- a/drivers/hv/dxgkrnl/dxgkrnl.h -+++ b/drivers/hv/dxgkrnl/dxgkrnl.h -@@ -34,6 +34,7 @@ - - struct dxgprocess; - struct dxgadapter; -+struct dxgdevice; - - /* - * Driver private data. -@@ -71,6 +72,10 @@ struct dxgk_device_types { - u32 virtual_monitor_device:1; - }; - -+enum dxgdevice_flushschedulerreason { -+ DXGDEVICE_FLUSHSCHEDULER_DEVICE_TERMINATE = 4, -+}; -+ - enum dxgobjectstate { - DXGOBJECTSTATE_CREATED, - DXGOBJECTSTATE_ACTIVE, -@@ -166,6 +171,9 @@ struct dxgprocess_adapter { - struct list_head adapter_process_list_entry; - /* Entry in dxgprocess::process_adapter_list_head */ - struct list_head process_adapter_list_entry; -+ /* List of all dxgdevice objects created for the process on adapter */ -+ struct list_head device_list_head; -+ struct mutex device_list_mutex; - struct dxgadapter *adapter; - struct dxgprocess *process; - int refcount; -@@ -175,6 +183,10 @@ struct dxgprocess_adapter *dxgprocess_adapter_create(struct dxgprocess *process, - struct dxgadapter - *adapter); - void dxgprocess_adapter_release(struct dxgprocess_adapter *adapter); -+int dxgprocess_adapter_add_device(struct dxgprocess *process, -+ struct dxgadapter *adapter, -+ struct dxgdevice *device); -+void dxgprocess_adapter_remove_device(struct dxgdevice *device); - void dxgprocess_adapter_stop(struct dxgprocess_adapter *adapter_info); - void dxgprocess_adapter_destroy(struct dxgprocess_adapter *adapter_info); - -@@ -222,6 +234,11 @@ struct dxgadapter *dxgprocess_get_adapter(struct dxgprocess *process, - struct d3dkmthandle handle); - struct dxgadapter *dxgprocess_adapter_by_handle(struct dxgprocess *process, - struct d3dkmthandle handle); -+struct dxgdevice *dxgprocess_device_by_handle(struct dxgprocess *process, -+ struct d3dkmthandle handle); -+struct dxgdevice *dxgprocess_device_by_object_handle(struct dxgprocess *process, -+ enum hmgrentry_type t, -+ struct d3dkmthandle h); - void dxgprocess_ht_lock_shared_down(struct dxgprocess *process); - void dxgprocess_ht_lock_shared_up(struct dxgprocess *process); - void dxgprocess_ht_lock_exclusive_down(struct dxgprocess *process); -@@ -241,6 +258,7 @@ enum dxgadapter_state { - * This object represents the grapchis adapter. - * Objects, which take reference on the adapter: - * - dxgglobal -+ * - dxgdevice - * - adapter handle (struct d3dkmthandle) - */ - struct dxgadapter { -@@ -277,6 +295,38 @@ void dxgadapter_add_process(struct dxgadapter *adapter, - struct dxgprocess_adapter *process_info); - void dxgadapter_remove_process(struct dxgprocess_adapter *process_info); - -+/* -+ * The object represent the device object. -+ * The following objects take reference on the device -+ * - device handle (struct d3dkmthandle) -+ */ -+struct dxgdevice { -+ enum dxgobjectstate object_state; -+ /* Device takes reference on the adapter */ -+ struct dxgadapter *adapter; -+ struct dxgprocess_adapter *adapter_info; -+ struct dxgprocess *process; -+ /* Entry in the DGXPROCESS_ADAPTER device list */ -+ struct list_head device_list_entry; -+ struct kref device_kref; -+ /* Protects destcruction of the device object */ -+ struct rw_semaphore device_lock; -+ /* List of paging queues. Protected by process handle table lock. */ -+ struct list_head pqueue_list_head; -+ struct d3dkmthandle handle; -+ enum d3dkmt_deviceexecution_state execution_state; -+ u32 handle_valid; -+}; -+ -+struct dxgdevice *dxgdevice_create(struct dxgadapter *a, struct dxgprocess *p); -+void dxgdevice_destroy(struct dxgdevice *device); -+void dxgdevice_stop(struct dxgdevice *device); -+void dxgdevice_mark_destroyed(struct dxgdevice *device); -+int dxgdevice_acquire_lock_shared(struct dxgdevice *dev); -+void dxgdevice_release_lock_shared(struct dxgdevice *dev); -+void dxgdevice_release(struct kref *refcount); -+bool dxgdevice_is_active(struct dxgdevice *dev); -+ - long dxgk_compat_ioctl(struct file *f, unsigned int p1, unsigned long p2); - long dxgk_unlocked_ioctl(struct file *f, unsigned int p1, unsigned long p2); - -@@ -313,6 +363,14 @@ int dxgvmb_send_destroy_process(struct d3dkmthandle process); - int dxgvmb_send_open_adapter(struct dxgadapter *adapter); - int dxgvmb_send_close_adapter(struct dxgadapter *adapter); - int dxgvmb_send_get_internal_adapter_info(struct dxgadapter *adapter); -+struct d3dkmthandle dxgvmb_send_create_device(struct dxgadapter *adapter, -+ struct dxgprocess *process, -+ struct d3dkmt_createdevice *args); -+int dxgvmb_send_destroy_device(struct dxgadapter *adapter, -+ struct dxgprocess *process, -+ struct d3dkmthandle h); -+int dxgvmb_send_flush_device(struct dxgdevice *device, -+ enum dxgdevice_flushschedulerreason reason); - int dxgvmb_send_query_adapter_info(struct dxgprocess *process, - struct dxgadapter *adapter, - struct d3dkmt_queryadapterinfo *args); -diff --git a/drivers/hv/dxgkrnl/dxgprocess.c b/drivers/hv/dxgkrnl/dxgprocess.c -index 111111111111..222222222222 100644 ---- a/drivers/hv/dxgkrnl/dxgprocess.c -+++ b/drivers/hv/dxgkrnl/dxgprocess.c -@@ -241,6 +241,49 @@ struct dxgadapter *dxgprocess_adapter_by_handle(struct dxgprocess *process, - return adapter; - } - -+struct dxgdevice *dxgprocess_device_by_object_handle(struct dxgprocess *process, -+ enum hmgrentry_type t, -+ struct d3dkmthandle handle) -+{ -+ struct dxgdevice *device = NULL; -+ void *obj; -+ -+ hmgrtable_lock(&process->handle_table, DXGLOCK_SHARED); -+ obj = hmgrtable_get_object_by_type(&process->handle_table, t, handle); -+ if (obj) { -+ struct d3dkmthandle device_handle = {}; -+ -+ switch (t) { -+ case HMGRENTRY_TYPE_DXGDEVICE: -+ device = obj; -+ break; -+ default: -+ DXG_ERR("invalid handle type: %d", t); -+ break; -+ } -+ if (device == NULL) -+ device = hmgrtable_get_object_by_type( -+ &process->handle_table, -+ HMGRENTRY_TYPE_DXGDEVICE, -+ device_handle); -+ if (device) -+ if (kref_get_unless_zero(&device->device_kref) == 0) -+ device = NULL; -+ } -+ if (device == NULL) -+ DXG_ERR("device_by_handle failed: %d %x", t, handle.v); -+ hmgrtable_unlock(&process->handle_table, DXGLOCK_SHARED); -+ return device; -+} -+ -+struct dxgdevice *dxgprocess_device_by_handle(struct dxgprocess *process, -+ struct d3dkmthandle handle) -+{ -+ return dxgprocess_device_by_object_handle(process, -+ HMGRENTRY_TYPE_DXGDEVICE, -+ handle); -+} -+ - void dxgprocess_ht_lock_shared_down(struct dxgprocess *process) - { - hmgrtable_lock(&process->handle_table, DXGLOCK_SHARED); -diff --git a/drivers/hv/dxgkrnl/dxgvmbus.c b/drivers/hv/dxgkrnl/dxgvmbus.c -index 111111111111..222222222222 100644 ---- a/drivers/hv/dxgkrnl/dxgvmbus.c -+++ b/drivers/hv/dxgkrnl/dxgvmbus.c -@@ -673,6 +673,86 @@ int dxgvmb_send_get_internal_adapter_info(struct dxgadapter *adapter) - return ret; - } - -+struct d3dkmthandle dxgvmb_send_create_device(struct dxgadapter *adapter, -+ struct dxgprocess *process, -+ struct d3dkmt_createdevice *args) -+{ -+ int ret; -+ struct dxgkvmb_command_createdevice *command; -+ struct dxgkvmb_command_createdevice_return result = { }; -+ struct dxgvmbusmsg msg; -+ -+ ret = init_message(&msg, adapter, process, sizeof(*command)); -+ if (ret) -+ goto cleanup; -+ command = (void *)msg.msg; -+ -+ command_vgpu_to_host_init2(&command->hdr, DXGK_VMBCOMMAND_CREATEDEVICE, -+ process->host_handle); -+ command->flags = args->flags; -+ -+ ret = dxgvmb_send_sync_msg(msg.channel, msg.hdr, msg.size, -+ &result, sizeof(result)); -+ if (ret < 0) -+ result.device.v = 0; -+ free_message(&msg, process); -+cleanup: -+ if (ret) -+ DXG_TRACE("err: %d", ret); -+ return result.device; -+} -+ -+int dxgvmb_send_destroy_device(struct dxgadapter *adapter, -+ struct dxgprocess *process, -+ struct d3dkmthandle h) -+{ -+ int ret; -+ struct dxgkvmb_command_destroydevice *command; -+ struct dxgvmbusmsg msg = {.hdr = NULL}; -+ -+ ret = init_message(&msg, adapter, process, sizeof(*command)); -+ if (ret) -+ goto cleanup; -+ command = (void *)msg.msg; -+ -+ command_vgpu_to_host_init2(&command->hdr, DXGK_VMBCOMMAND_DESTROYDEVICE, -+ process->host_handle); -+ command->device = h; -+ -+ ret = dxgvmb_send_sync_msg_ntstatus(msg.channel, msg.hdr, msg.size); -+cleanup: -+ free_message(&msg, process); -+ if (ret) -+ DXG_TRACE("err: %d", ret); -+ return ret; -+} -+ -+int dxgvmb_send_flush_device(struct dxgdevice *device, -+ enum dxgdevice_flushschedulerreason reason) -+{ -+ int ret; -+ struct dxgkvmb_command_flushdevice *command; -+ struct dxgvmbusmsg msg = {.hdr = NULL}; -+ struct dxgprocess *process = device->process; -+ -+ ret = init_message(&msg, device->adapter, process, sizeof(*command)); -+ if (ret) -+ goto cleanup; -+ command = (void *)msg.msg; -+ -+ command_vgpu_to_host_init2(&command->hdr, DXGK_VMBCOMMAND_FLUSHDEVICE, -+ process->host_handle); -+ command->device = device->handle; -+ command->reason = reason; -+ -+ ret = dxgvmb_send_sync_msg_ntstatus(msg.channel, msg.hdr, msg.size); -+cleanup: -+ free_message(&msg, process); -+ if (ret) -+ DXG_TRACE("err: %d", ret); -+ return ret; -+} -+ - int dxgvmb_send_query_adapter_info(struct dxgprocess *process, - struct dxgadapter *adapter, - struct d3dkmt_queryadapterinfo *args) -diff --git a/drivers/hv/dxgkrnl/dxgvmbus.h b/drivers/hv/dxgkrnl/dxgvmbus.h -index 111111111111..222222222222 100644 ---- a/drivers/hv/dxgkrnl/dxgvmbus.h -+++ b/drivers/hv/dxgkrnl/dxgvmbus.h -@@ -247,4 +247,26 @@ struct dxgkvmb_command_queryadapterinfo_return { - u8 private_data[1]; - }; - -+struct dxgkvmb_command_createdevice { -+ struct dxgkvmb_command_vgpu_to_host hdr; -+ struct d3dkmt_createdeviceflags flags; -+ bool cdd_device; -+ void *error_code; -+}; -+ -+struct dxgkvmb_command_createdevice_return { -+ struct d3dkmthandle device; -+}; -+ -+struct dxgkvmb_command_destroydevice { -+ struct dxgkvmb_command_vgpu_to_host hdr; -+ struct d3dkmthandle device; -+}; -+ -+struct dxgkvmb_command_flushdevice { -+ struct dxgkvmb_command_vgpu_to_host hdr; -+ struct d3dkmthandle device; -+ enum dxgdevice_flushschedulerreason reason; -+}; -+ - #endif /* _DXGVMBUS_H */ -diff --git a/drivers/hv/dxgkrnl/ioctl.c b/drivers/hv/dxgkrnl/ioctl.c -index 111111111111..222222222222 100644 ---- a/drivers/hv/dxgkrnl/ioctl.c -+++ b/drivers/hv/dxgkrnl/ioctl.c -@@ -424,10 +424,136 @@ dxgkio_query_adapter_info(struct dxgprocess *process, void *__user inargs) - return ret; - } - -+static int -+dxgkio_create_device(struct dxgprocess *process, void *__user inargs) -+{ -+ struct d3dkmt_createdevice args; -+ int ret; -+ struct dxgadapter *adapter = NULL; -+ struct dxgdevice *device = NULL; -+ struct d3dkmthandle host_device_handle = {}; -+ bool adapter_locked = false; -+ -+ ret = copy_from_user(&args, inargs, sizeof(args)); -+ if (ret) { -+ DXG_ERR("failed to copy input args"); -+ ret = -EINVAL; -+ goto cleanup; -+ } -+ -+ /* The call acquires reference on the adapter */ -+ adapter = dxgprocess_adapter_by_handle(process, args.adapter); -+ if (adapter == NULL) { -+ ret = -EINVAL; -+ goto cleanup; -+ } -+ -+ device = dxgdevice_create(adapter, process); -+ if (device == NULL) { -+ ret = -ENOMEM; -+ goto cleanup; -+ } -+ -+ ret = dxgadapter_acquire_lock_shared(adapter); -+ if (ret < 0) -+ goto cleanup; -+ -+ adapter_locked = true; -+ -+ host_device_handle = dxgvmb_send_create_device(adapter, process, &args); -+ if (host_device_handle.v) { -+ ret = copy_to_user(&((struct d3dkmt_createdevice *)inargs)-> -+ device, &host_device_handle, -+ sizeof(struct d3dkmthandle)); -+ if (ret) { -+ DXG_ERR("failed to copy device handle"); -+ ret = -EINVAL; -+ goto cleanup; -+ } -+ -+ hmgrtable_lock(&process->handle_table, DXGLOCK_EXCL); -+ ret = hmgrtable_assign_handle(&process->handle_table, device, -+ HMGRENTRY_TYPE_DXGDEVICE, -+ host_device_handle); -+ if (ret >= 0) { -+ device->handle = host_device_handle; -+ device->handle_valid = 1; -+ device->object_state = DXGOBJECTSTATE_ACTIVE; -+ } -+ hmgrtable_unlock(&process->handle_table, DXGLOCK_EXCL); -+ } -+ -+cleanup: -+ -+ if (ret < 0) { -+ if (host_device_handle.v) -+ dxgvmb_send_destroy_device(adapter, process, -+ host_device_handle); -+ if (device) -+ dxgdevice_destroy(device); -+ } -+ -+ if (adapter_locked) -+ dxgadapter_release_lock_shared(adapter); -+ -+ if (adapter) -+ kref_put(&adapter->adapter_kref, dxgadapter_release); -+ -+ DXG_TRACE("ioctl:%s %d", errorstr(ret), ret); -+ return ret; -+} -+ -+static int -+dxgkio_destroy_device(struct dxgprocess *process, void *__user inargs) -+{ -+ struct d3dkmt_destroydevice args; -+ int ret; -+ struct dxgadapter *adapter = NULL; -+ struct dxgdevice *device = NULL; -+ -+ ret = copy_from_user(&args, inargs, sizeof(args)); -+ if (ret) { -+ DXG_ERR("failed to copy input args"); -+ ret = -EINVAL; -+ goto cleanup; -+ } -+ -+ hmgrtable_lock(&process->handle_table, DXGLOCK_EXCL); -+ device = hmgrtable_get_object_by_type(&process->handle_table, -+ HMGRENTRY_TYPE_DXGDEVICE, -+ args.device); -+ if (device) { -+ hmgrtable_free_handle(&process->handle_table, -+ HMGRENTRY_TYPE_DXGDEVICE, args.device); -+ device->handle_valid = 0; -+ } -+ hmgrtable_unlock(&process->handle_table, DXGLOCK_EXCL); -+ -+ if (device == NULL) { -+ DXG_ERR("invalid device handle: %x", args.device.v); -+ ret = -EINVAL; -+ goto cleanup; -+ } -+ -+ adapter = device->adapter; -+ -+ dxgdevice_destroy(device); -+ -+ if (dxgadapter_acquire_lock_shared(adapter) == 0) { -+ dxgvmb_send_destroy_device(adapter, process, args.device); -+ dxgadapter_release_lock_shared(adapter); -+ } -+ -+cleanup: -+ -+ DXG_TRACE("ioctl:%s %d", errorstr(ret), ret); -+ return ret; -+} -+ - static struct ioctl_desc ioctls[] = { - /* 0x00 */ {}, - /* 0x01 */ {dxgkio_open_adapter_from_luid, LX_DXOPENADAPTERFROMLUID}, --/* 0x02 */ {}, -+/* 0x02 */ {dxgkio_create_device, LX_DXCREATEDEVICE}, - /* 0x03 */ {}, - /* 0x04 */ {}, - /* 0x05 */ {}, -@@ -450,7 +576,7 @@ static struct ioctl_desc ioctls[] = { - /* 0x16 */ {}, - /* 0x17 */ {}, - /* 0x18 */ {}, --/* 0x19 */ {}, -+/* 0x19 */ {dxgkio_destroy_device, LX_DXDESTROYDEVICE}, - /* 0x1a */ {}, - /* 0x1b */ {}, - /* 0x1c */ {}, -diff --git a/drivers/hv/dxgkrnl/misc.h b/drivers/hv/dxgkrnl/misc.h -index 111111111111..222222222222 100644 ---- a/drivers/hv/dxgkrnl/misc.h -+++ b/drivers/hv/dxgkrnl/misc.h -@@ -27,10 +27,10 @@ extern const struct d3dkmthandle zerohandle; - * - * channel_lock (VMBus channel lock) - * fd_mutex -- * plistmutex -- * table_lock -- * core_lock -- * device_lock -+ * plistmutex (process list mutex) -+ * table_lock (handle table lock) -+ * core_lock (dxgadapter lock) -+ * device_lock (dxgdevice lock) - * process_adapter_mutex - * adapter_list_lock - * device_mutex (dxgglobal mutex) -diff --git a/include/uapi/misc/d3dkmthk.h b/include/uapi/misc/d3dkmthk.h -index 111111111111..222222222222 100644 ---- a/include/uapi/misc/d3dkmthk.h -+++ b/include/uapi/misc/d3dkmthk.h -@@ -86,6 +86,74 @@ struct d3dkmt_openadapterfromluid { - struct d3dkmthandle adapter_handle; - }; - -+struct d3dddi_allocationlist { -+ struct d3dkmthandle allocation; -+ union { -+ struct { -+ __u32 write_operation :1; -+ __u32 do_not_retire_instance :1; -+ __u32 offer_priority :3; -+ __u32 reserved :27; -+ }; -+ __u32 value; -+ }; -+}; -+ -+struct d3dddi_patchlocationlist { -+ __u32 allocation_index; -+ union { -+ struct { -+ __u32 slot_id:24; -+ __u32 reserved:8; -+ }; -+ __u32 value; -+ }; -+ __u32 driver_id; -+ __u32 allocation_offset; -+ __u32 patch_offset; -+ __u32 split_offset; -+}; -+ -+struct d3dkmt_createdeviceflags { -+ __u32 legacy_mode:1; -+ __u32 request_vSync:1; -+ __u32 disable_gpu_timeout:1; -+ __u32 gdi_device:1; -+ __u32 reserved:28; -+}; -+ -+struct d3dkmt_createdevice { -+ struct d3dkmthandle adapter; -+ __u32 reserved3; -+ struct d3dkmt_createdeviceflags flags; -+ struct d3dkmthandle device; -+#ifdef __KERNEL__ -+ void *command_buffer; -+#else -+ __u64 command_buffer; -+#endif -+ __u32 command_buffer_size; -+ __u32 reserved; -+#ifdef __KERNEL__ -+ struct d3dddi_allocationlist *allocation_list; -+#else -+ __u64 allocation_list; -+#endif -+ __u32 allocation_list_size; -+ __u32 reserved1; -+#ifdef __KERNEL__ -+ struct d3dddi_patchlocationlist *patch_location_list; -+#else -+ __u64 patch_location_list; -+#endif -+ __u32 patch_location_list_size; -+ __u32 reserved2; -+}; -+ -+struct d3dkmt_destroydevice { -+ struct d3dkmthandle device; -+}; -+ - struct d3dkmt_adaptertype { - union { - struct { -@@ -125,6 +193,16 @@ struct d3dkmt_queryadapterinfo { - __u32 private_data_size; - }; - -+enum d3dkmt_deviceexecution_state { -+ _D3DKMT_DEVICEEXECUTION_ACTIVE = 1, -+ _D3DKMT_DEVICEEXECUTION_RESET = 2, -+ _D3DKMT_DEVICEEXECUTION_HUNG = 3, -+ _D3DKMT_DEVICEEXECUTION_STOPPED = 4, -+ _D3DKMT_DEVICEEXECUTION_ERROR_OUTOFMEMORY = 5, -+ _D3DKMT_DEVICEEXECUTION_ERROR_DMAFAULT = 6, -+ _D3DKMT_DEVICEEXECUTION_ERROR_DMAPAGEFAULT = 7, -+}; -+ - union d3dkmt_enumadapters_filter { - struct { - __u64 include_compute_only:1; -@@ -152,12 +230,16 @@ struct d3dkmt_enumadapters3 { - - #define LX_DXOPENADAPTERFROMLUID \ - _IOWR(0x47, 0x01, struct d3dkmt_openadapterfromluid) -+#define LX_DXCREATEDEVICE \ -+ _IOWR(0x47, 0x02, struct d3dkmt_createdevice) - #define LX_DXQUERYADAPTERINFO \ - _IOWR(0x47, 0x09, struct d3dkmt_queryadapterinfo) - #define LX_DXENUMADAPTERS2 \ - _IOWR(0x47, 0x14, struct d3dkmt_enumadapters2) - #define LX_DXCLOSEADAPTER \ - _IOWR(0x47, 0x15, struct d3dkmt_closeadapter) -+#define LX_DXDESTROYDEVICE \ -+ _IOWR(0x47, 0x19, struct d3dkmt_destroydevice) - #define LX_DXENUMADAPTERS3 \ - _IOWR(0x47, 0x3e, struct d3dkmt_enumadapters3) - --- -Armbian - diff --git a/patch/kernel/archive/wsl2-arm64-6.1/1675-drivers-hv-dxgkrnl-Creation-of-dxgcontext-objects.patch b/patch/kernel/archive/wsl2-arm64-6.1/1675-drivers-hv-dxgkrnl-Creation-of-dxgcontext-objects.patch deleted file mode 100644 index 7e8f50dcb826..000000000000 --- a/patch/kernel/archive/wsl2-arm64-6.1/1675-drivers-hv-dxgkrnl-Creation-of-dxgcontext-objects.patch +++ /dev/null @@ -1,668 +0,0 @@ -From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001 -From: Iouri Tarassov -Date: Tue, 1 Feb 2022 17:03:47 -0800 -Subject: drivers: hv: dxgkrnl: Creation of dxgcontext objects - -Implement ioctls for creation/destruction of dxgcontext -objects: - - the LX_DXCREATECONTEXTVIRTUAL ioctl - - the LX_DXDESTROYCONTEXT ioctl. - -A dxgcontext object represents a compute device execution thread. -Ccompute device DMA buffers and synchronization operations are -submitted for execution to a dxgcontext. dxgcontexts objects -belong to a dxgdevice object. - -Signed-off-by: Iouri Tarassov -[kms: Forward port to v6.1] -Signed-off-by: Kelsey Steele ---- - drivers/hv/dxgkrnl/dxgadapter.c | 103 ++++++ - drivers/hv/dxgkrnl/dxgkrnl.h | 38 +++ - drivers/hv/dxgkrnl/dxgprocess.c | 4 + - drivers/hv/dxgkrnl/dxgvmbus.c | 101 +++++- - drivers/hv/dxgkrnl/dxgvmbus.h | 18 + - drivers/hv/dxgkrnl/ioctl.c | 168 +++++++++- - drivers/hv/dxgkrnl/misc.h | 1 + - include/uapi/misc/d3dkmthk.h | 47 +++ - 8 files changed, 477 insertions(+), 3 deletions(-) - -diff --git a/drivers/hv/dxgkrnl/dxgadapter.c b/drivers/hv/dxgkrnl/dxgadapter.c -index 111111111111..222222222222 100644 ---- a/drivers/hv/dxgkrnl/dxgadapter.c -+++ b/drivers/hv/dxgkrnl/dxgadapter.c -@@ -206,7 +206,9 @@ struct dxgdevice *dxgdevice_create(struct dxgadapter *adapter, - device->adapter = adapter; - device->process = process; - kref_get(&adapter->adapter_kref); -+ INIT_LIST_HEAD(&device->context_list_head); - init_rwsem(&device->device_lock); -+ init_rwsem(&device->context_list_lock); - INIT_LIST_HEAD(&device->pqueue_list_head); - device->object_state = DXGOBJECTSTATE_CREATED; - device->execution_state = _D3DKMT_DEVICEEXECUTION_ACTIVE; -@@ -248,6 +250,20 @@ void dxgdevice_destroy(struct dxgdevice *device) - - dxgdevice_stop(device); - -+ { -+ struct dxgcontext *context; -+ struct dxgcontext *tmp; -+ -+ DXG_TRACE("destroying contexts"); -+ dxgdevice_acquire_context_list_lock(device); -+ list_for_each_entry_safe(context, tmp, -+ &device->context_list_head, -+ context_list_entry) { -+ dxgcontext_destroy(process, context); -+ } -+ dxgdevice_release_context_list_lock(device); -+ } -+ - /* Guest handles need to be released before the host handles */ - hmgrtable_lock(&process->handle_table, DXGLOCK_EXCL); - if (device->handle_valid) { -@@ -302,6 +318,32 @@ bool dxgdevice_is_active(struct dxgdevice *device) - return device->object_state == DXGOBJECTSTATE_ACTIVE; - } - -+void dxgdevice_acquire_context_list_lock(struct dxgdevice *device) -+{ -+ down_write(&device->context_list_lock); -+} -+ -+void dxgdevice_release_context_list_lock(struct dxgdevice *device) -+{ -+ up_write(&device->context_list_lock); -+} -+ -+void dxgdevice_add_context(struct dxgdevice *device, struct dxgcontext *context) -+{ -+ down_write(&device->context_list_lock); -+ list_add_tail(&context->context_list_entry, &device->context_list_head); -+ up_write(&device->context_list_lock); -+} -+ -+void dxgdevice_remove_context(struct dxgdevice *device, -+ struct dxgcontext *context) -+{ -+ if (context->context_list_entry.next) { -+ list_del(&context->context_list_entry); -+ context->context_list_entry.next = NULL; -+ } -+} -+ - void dxgdevice_release(struct kref *refcount) - { - struct dxgdevice *device; -@@ -310,6 +352,67 @@ void dxgdevice_release(struct kref *refcount) - kfree(device); - } - -+struct dxgcontext *dxgcontext_create(struct dxgdevice *device) -+{ -+ struct dxgcontext *context; -+ -+ context = kzalloc(sizeof(struct dxgcontext), GFP_KERNEL); -+ if (context) { -+ kref_init(&context->context_kref); -+ context->device = device; -+ context->process = device->process; -+ context->device_handle = device->handle; -+ kref_get(&device->device_kref); -+ INIT_LIST_HEAD(&context->hwqueue_list_head); -+ init_rwsem(&context->hwqueue_list_lock); -+ dxgdevice_add_context(device, context); -+ context->object_state = DXGOBJECTSTATE_ACTIVE; -+ } -+ return context; -+} -+ -+/* -+ * Called when the device context list lock is held -+ */ -+void dxgcontext_destroy(struct dxgprocess *process, struct dxgcontext *context) -+{ -+ DXG_TRACE("Destroying context %p", context); -+ context->object_state = DXGOBJECTSTATE_DESTROYED; -+ if (context->device) { -+ if (context->handle.v) { -+ hmgrtable_free_handle_safe(&process->handle_table, -+ HMGRENTRY_TYPE_DXGCONTEXT, -+ context->handle); -+ } -+ dxgdevice_remove_context(context->device, context); -+ kref_put(&context->device->device_kref, dxgdevice_release); -+ } -+ kref_put(&context->context_kref, dxgcontext_release); -+} -+ -+void dxgcontext_destroy_safe(struct dxgprocess *process, -+ struct dxgcontext *context) -+{ -+ struct dxgdevice *device = context->device; -+ -+ dxgdevice_acquire_context_list_lock(device); -+ dxgcontext_destroy(process, context); -+ dxgdevice_release_context_list_lock(device); -+} -+ -+bool dxgcontext_is_active(struct dxgcontext *context) -+{ -+ return context->object_state == DXGOBJECTSTATE_ACTIVE; -+} -+ -+void dxgcontext_release(struct kref *refcount) -+{ -+ struct dxgcontext *context; -+ -+ context = container_of(refcount, struct dxgcontext, context_kref); -+ kfree(context); -+} -+ - struct dxgprocess_adapter *dxgprocess_adapter_create(struct dxgprocess *process, - struct dxgadapter *adapter) - { -diff --git a/drivers/hv/dxgkrnl/dxgkrnl.h b/drivers/hv/dxgkrnl/dxgkrnl.h -index 111111111111..222222222222 100644 ---- a/drivers/hv/dxgkrnl/dxgkrnl.h -+++ b/drivers/hv/dxgkrnl/dxgkrnl.h -@@ -35,6 +35,7 @@ - struct dxgprocess; - struct dxgadapter; - struct dxgdevice; -+struct dxgcontext; - - /* - * Driver private data. -@@ -298,6 +299,7 @@ void dxgadapter_remove_process(struct dxgprocess_adapter *process_info); - /* - * The object represent the device object. - * The following objects take reference on the device -+ * - dxgcontext - * - device handle (struct d3dkmthandle) - */ - struct dxgdevice { -@@ -311,6 +313,8 @@ struct dxgdevice { - struct kref device_kref; - /* Protects destcruction of the device object */ - struct rw_semaphore device_lock; -+ struct rw_semaphore context_list_lock; -+ struct list_head context_list_head; - /* List of paging queues. Protected by process handle table lock. */ - struct list_head pqueue_list_head; - struct d3dkmthandle handle; -@@ -325,7 +329,33 @@ void dxgdevice_mark_destroyed(struct dxgdevice *device); - int dxgdevice_acquire_lock_shared(struct dxgdevice *dev); - void dxgdevice_release_lock_shared(struct dxgdevice *dev); - void dxgdevice_release(struct kref *refcount); -+void dxgdevice_add_context(struct dxgdevice *dev, struct dxgcontext *ctx); -+void dxgdevice_remove_context(struct dxgdevice *dev, struct dxgcontext *ctx); - bool dxgdevice_is_active(struct dxgdevice *dev); -+void dxgdevice_acquire_context_list_lock(struct dxgdevice *dev); -+void dxgdevice_release_context_list_lock(struct dxgdevice *dev); -+ -+/* -+ * The object represent the execution context of a device. -+ */ -+struct dxgcontext { -+ enum dxgobjectstate object_state; -+ struct dxgdevice *device; -+ struct dxgprocess *process; -+ /* entry in the device context list */ -+ struct list_head context_list_entry; -+ struct list_head hwqueue_list_head; -+ struct rw_semaphore hwqueue_list_lock; -+ struct kref context_kref; -+ struct d3dkmthandle handle; -+ struct d3dkmthandle device_handle; -+}; -+ -+struct dxgcontext *dxgcontext_create(struct dxgdevice *dev); -+void dxgcontext_destroy(struct dxgprocess *pr, struct dxgcontext *ctx); -+void dxgcontext_destroy_safe(struct dxgprocess *pr, struct dxgcontext *ctx); -+void dxgcontext_release(struct kref *refcount); -+bool dxgcontext_is_active(struct dxgcontext *ctx); - - long dxgk_compat_ioctl(struct file *f, unsigned int p1, unsigned long p2); - long dxgk_unlocked_ioctl(struct file *f, unsigned int p1, unsigned long p2); -@@ -371,6 +401,14 @@ int dxgvmb_send_destroy_device(struct dxgadapter *adapter, - struct d3dkmthandle h); - int dxgvmb_send_flush_device(struct dxgdevice *device, - enum dxgdevice_flushschedulerreason reason); -+struct d3dkmthandle -+dxgvmb_send_create_context(struct dxgadapter *adapter, -+ struct dxgprocess *process, -+ struct d3dkmt_createcontextvirtual -+ *args); -+int dxgvmb_send_destroy_context(struct dxgadapter *adapter, -+ struct dxgprocess *process, -+ struct d3dkmthandle h); - int dxgvmb_send_query_adapter_info(struct dxgprocess *process, - struct dxgadapter *adapter, - struct d3dkmt_queryadapterinfo *args); -diff --git a/drivers/hv/dxgkrnl/dxgprocess.c b/drivers/hv/dxgkrnl/dxgprocess.c -index 111111111111..222222222222 100644 ---- a/drivers/hv/dxgkrnl/dxgprocess.c -+++ b/drivers/hv/dxgkrnl/dxgprocess.c -@@ -257,6 +257,10 @@ struct dxgdevice *dxgprocess_device_by_object_handle(struct dxgprocess *process, - case HMGRENTRY_TYPE_DXGDEVICE: - device = obj; - break; -+ case HMGRENTRY_TYPE_DXGCONTEXT: -+ device_handle = -+ ((struct dxgcontext *)obj)->device_handle; -+ break; - default: - DXG_ERR("invalid handle type: %d", t); - break; -diff --git a/drivers/hv/dxgkrnl/dxgvmbus.c b/drivers/hv/dxgkrnl/dxgvmbus.c -index 111111111111..222222222222 100644 ---- a/drivers/hv/dxgkrnl/dxgvmbus.c -+++ b/drivers/hv/dxgkrnl/dxgvmbus.c -@@ -731,7 +731,7 @@ int dxgvmb_send_flush_device(struct dxgdevice *device, - enum dxgdevice_flushschedulerreason reason) - { - int ret; -- struct dxgkvmb_command_flushdevice *command; -+ struct dxgkvmb_command_flushdevice *command = NULL; - struct dxgvmbusmsg msg = {.hdr = NULL}; - struct dxgprocess *process = device->process; - -@@ -745,6 +745,105 @@ int dxgvmb_send_flush_device(struct dxgdevice *device, - command->device = device->handle; - command->reason = reason; - -+ ret = dxgvmb_send_sync_msg_ntstatus(msg.channel, msg.hdr, msg.size); -+ -+cleanup: -+ free_message(&msg, process); -+ if (ret) -+ DXG_TRACE("err: %d", ret); -+ return ret; -+} -+ -+struct d3dkmthandle -+dxgvmb_send_create_context(struct dxgadapter *adapter, -+ struct dxgprocess *process, -+ struct d3dkmt_createcontextvirtual *args) -+{ -+ struct dxgkvmb_command_createcontextvirtual *command = NULL; -+ u32 cmd_size; -+ int ret; -+ struct d3dkmthandle context = {}; -+ struct dxgvmbusmsg msg = {.hdr = NULL}; -+ -+ if (args->priv_drv_data_size > DXG_MAX_VM_BUS_PACKET_SIZE) { -+ DXG_ERR("PrivateDriverDataSize is invalid"); -+ ret = -EINVAL; -+ goto cleanup; -+ } -+ cmd_size = sizeof(struct dxgkvmb_command_createcontextvirtual) + -+ args->priv_drv_data_size - 1; -+ -+ ret = init_message(&msg, adapter, process, cmd_size); -+ if (ret) -+ goto cleanup; -+ command = (void *)msg.msg; -+ -+ command_vgpu_to_host_init2(&command->hdr, -+ DXGK_VMBCOMMAND_CREATECONTEXTVIRTUAL, -+ process->host_handle); -+ command->device = args->device; -+ command->node_ordinal = args->node_ordinal; -+ command->engine_affinity = args->engine_affinity; -+ command->flags = args->flags; -+ command->client_hint = args->client_hint; -+ command->priv_drv_data_size = args->priv_drv_data_size; -+ if (args->priv_drv_data_size) { -+ ret = copy_from_user(command->priv_drv_data, -+ args->priv_drv_data, -+ args->priv_drv_data_size); -+ if (ret) { -+ DXG_ERR("Faled to copy private data"); -+ ret = -EINVAL; -+ goto cleanup; -+ } -+ } -+ /* Input command is returned back as output */ -+ ret = dxgvmb_send_sync_msg(msg.channel, msg.hdr, msg.size, -+ command, cmd_size); -+ if (ret < 0) { -+ goto cleanup; -+ } else { -+ context = command->context; -+ if (args->priv_drv_data_size) { -+ ret = copy_to_user(args->priv_drv_data, -+ command->priv_drv_data, -+ args->priv_drv_data_size); -+ if (ret) { -+ dev_err(DXGDEV, -+ "Faled to copy private data to user"); -+ ret = -EINVAL; -+ dxgvmb_send_destroy_context(adapter, process, -+ context); -+ context.v = 0; -+ } -+ } -+ } -+ -+cleanup: -+ free_message(&msg, process); -+ if (ret) -+ DXG_TRACE("err: %d", ret); -+ return context; -+} -+ -+int dxgvmb_send_destroy_context(struct dxgadapter *adapter, -+ struct dxgprocess *process, -+ struct d3dkmthandle h) -+{ -+ int ret; -+ struct dxgkvmb_command_destroycontext *command; -+ struct dxgvmbusmsg msg = {.hdr = NULL}; -+ -+ ret = init_message(&msg, adapter, process, sizeof(*command)); -+ if (ret) -+ goto cleanup; -+ command = (void *)msg.msg; -+ -+ command_vgpu_to_host_init2(&command->hdr, -+ DXGK_VMBCOMMAND_DESTROYCONTEXT, -+ process->host_handle); -+ command->context = h; -+ - ret = dxgvmb_send_sync_msg_ntstatus(msg.channel, msg.hdr, msg.size); - cleanup: - free_message(&msg, process); -diff --git a/drivers/hv/dxgkrnl/dxgvmbus.h b/drivers/hv/dxgkrnl/dxgvmbus.h -index 111111111111..222222222222 100644 ---- a/drivers/hv/dxgkrnl/dxgvmbus.h -+++ b/drivers/hv/dxgkrnl/dxgvmbus.h -@@ -269,4 +269,22 @@ struct dxgkvmb_command_flushdevice { - enum dxgdevice_flushschedulerreason reason; - }; - -+struct dxgkvmb_command_createcontextvirtual { -+ struct dxgkvmb_command_vgpu_to_host hdr; -+ struct d3dkmthandle context; -+ struct d3dkmthandle device; -+ u32 node_ordinal; -+ u32 engine_affinity; -+ struct d3dddi_createcontextflags flags; -+ enum d3dkmt_clienthint client_hint; -+ u32 priv_drv_data_size; -+ u8 priv_drv_data[1]; -+}; -+ -+/* The command returns ntstatus */ -+struct dxgkvmb_command_destroycontext { -+ struct dxgkvmb_command_vgpu_to_host hdr; -+ struct d3dkmthandle context; -+}; -+ - #endif /* _DXGVMBUS_H */ -diff --git a/drivers/hv/dxgkrnl/ioctl.c b/drivers/hv/dxgkrnl/ioctl.c -index 111111111111..222222222222 100644 ---- a/drivers/hv/dxgkrnl/ioctl.c -+++ b/drivers/hv/dxgkrnl/ioctl.c -@@ -550,13 +550,177 @@ dxgkio_destroy_device(struct dxgprocess *process, void *__user inargs) - return ret; - } - -+static int -+dxgkio_create_context_virtual(struct dxgprocess *process, void *__user inargs) -+{ -+ struct d3dkmt_createcontextvirtual args; -+ int ret; -+ struct dxgadapter *adapter = NULL; -+ struct dxgdevice *device = NULL; -+ struct dxgcontext *context = NULL; -+ struct d3dkmthandle host_context_handle = {}; -+ bool device_lock_acquired = false; -+ -+ ret = copy_from_user(&args, inargs, sizeof(args)); -+ if (ret) { -+ DXG_ERR("failed to copy input args"); -+ ret = -EINVAL; -+ goto cleanup; -+ } -+ -+ /* -+ * The call acquires reference on the device. It is safe to access the -+ * adapter, because the device holds reference on it. -+ */ -+ device = dxgprocess_device_by_handle(process, args.device); -+ if (device == NULL) { -+ ret = -EINVAL; -+ goto cleanup; -+ } -+ -+ ret = dxgdevice_acquire_lock_shared(device); -+ if (ret < 0) -+ goto cleanup; -+ -+ device_lock_acquired = true; -+ -+ adapter = device->adapter; -+ ret = dxgadapter_acquire_lock_shared(adapter); -+ if (ret < 0) { -+ adapter = NULL; -+ goto cleanup; -+ } -+ -+ context = dxgcontext_create(device); -+ if (context == NULL) { -+ ret = -ENOMEM; -+ goto cleanup; -+ } -+ -+ host_context_handle = dxgvmb_send_create_context(adapter, -+ process, &args); -+ if (host_context_handle.v) { -+ hmgrtable_lock(&process->handle_table, DXGLOCK_EXCL); -+ ret = hmgrtable_assign_handle(&process->handle_table, context, -+ HMGRENTRY_TYPE_DXGCONTEXT, -+ host_context_handle); -+ if (ret >= 0) -+ context->handle = host_context_handle; -+ hmgrtable_unlock(&process->handle_table, DXGLOCK_EXCL); -+ if (ret < 0) -+ goto cleanup; -+ ret = copy_to_user(&((struct d3dkmt_createcontextvirtual *) -+ inargs)->context, &host_context_handle, -+ sizeof(struct d3dkmthandle)); -+ if (ret) { -+ DXG_ERR("failed to copy context handle"); -+ ret = -EINVAL; -+ } -+ } else { -+ DXG_ERR("invalid host handle"); -+ ret = -EINVAL; -+ } -+ -+cleanup: -+ -+ if (ret < 0) { -+ if (host_context_handle.v) { -+ dxgvmb_send_destroy_context(adapter, process, -+ host_context_handle); -+ } -+ if (context) -+ dxgcontext_destroy_safe(process, context); -+ } -+ -+ if (adapter) -+ dxgadapter_release_lock_shared(adapter); -+ -+ if (device) { -+ if (device_lock_acquired) -+ dxgdevice_release_lock_shared(device); -+ kref_put(&device->device_kref, dxgdevice_release); -+ } -+ -+ DXG_TRACE("ioctl:%s %d", errorstr(ret), ret); -+ return ret; -+} -+ -+static int -+dxgkio_destroy_context(struct dxgprocess *process, void *__user inargs) -+{ -+ struct d3dkmt_destroycontext args; -+ int ret; -+ struct dxgadapter *adapter = NULL; -+ struct dxgcontext *context = NULL; -+ struct dxgdevice *device = NULL; -+ struct d3dkmthandle device_handle = {}; -+ -+ ret = copy_from_user(&args, inargs, sizeof(args)); -+ if (ret) { -+ DXG_ERR("failed to copy input args"); -+ ret = -EINVAL; -+ goto cleanup; -+ } -+ -+ hmgrtable_lock(&process->handle_table, DXGLOCK_EXCL); -+ context = hmgrtable_get_object_by_type(&process->handle_table, -+ HMGRENTRY_TYPE_DXGCONTEXT, -+ args.context); -+ if (context) { -+ hmgrtable_free_handle(&process->handle_table, -+ HMGRENTRY_TYPE_DXGCONTEXT, args.context); -+ context->handle.v = 0; -+ device_handle = context->device_handle; -+ context->object_state = DXGOBJECTSTATE_DESTROYED; -+ } -+ hmgrtable_unlock(&process->handle_table, DXGLOCK_EXCL); -+ -+ if (context == NULL) { -+ DXG_ERR("invalid context handle: %x", args.context.v); -+ ret = -EINVAL; -+ goto cleanup; -+ } -+ -+ /* -+ * The call acquires reference on the device. It is safe to access the -+ * adapter, because the device holds reference on it. -+ */ -+ device = dxgprocess_device_by_handle(process, device_handle); -+ if (device == NULL) { -+ ret = -EINVAL; -+ goto cleanup; -+ } -+ -+ adapter = device->adapter; -+ ret = dxgadapter_acquire_lock_shared(adapter); -+ if (ret < 0) { -+ adapter = NULL; -+ goto cleanup; -+ } -+ -+ ret = dxgvmb_send_destroy_context(adapter, process, args.context); -+ -+ dxgcontext_destroy_safe(process, context); -+ -+cleanup: -+ -+ if (adapter) -+ dxgadapter_release_lock_shared(adapter); -+ -+ if (device) -+ kref_put(&device->device_kref, dxgdevice_release); -+ -+ DXG_TRACE("ioctl:%s %s %d", errorstr(ret), __func__, ret); -+ return ret; -+} -+ - static struct ioctl_desc ioctls[] = { - /* 0x00 */ {}, - /* 0x01 */ {dxgkio_open_adapter_from_luid, LX_DXOPENADAPTERFROMLUID}, - /* 0x02 */ {dxgkio_create_device, LX_DXCREATEDEVICE}, - /* 0x03 */ {}, --/* 0x04 */ {}, --/* 0x05 */ {}, -+/* 0x04 */ {dxgkio_create_context_virtual, LX_DXCREATECONTEXTVIRTUAL}, -+/* 0x05 */ {dxgkio_destroy_context, LX_DXDESTROYCONTEXT}, - /* 0x06 */ {}, - /* 0x07 */ {}, - /* 0x08 */ {}, -diff --git a/drivers/hv/dxgkrnl/misc.h b/drivers/hv/dxgkrnl/misc.h -index 111111111111..222222222222 100644 ---- a/drivers/hv/dxgkrnl/misc.h -+++ b/drivers/hv/dxgkrnl/misc.h -@@ -29,6 +29,7 @@ extern const struct d3dkmthandle zerohandle; - * fd_mutex - * plistmutex (process list mutex) - * table_lock (handle table lock) -+ * context_list_lock - * core_lock (dxgadapter lock) - * device_lock (dxgdevice lock) - * process_adapter_mutex -diff --git a/include/uapi/misc/d3dkmthk.h b/include/uapi/misc/d3dkmthk.h -index 111111111111..222222222222 100644 ---- a/include/uapi/misc/d3dkmthk.h -+++ b/include/uapi/misc/d3dkmthk.h -@@ -154,6 +154,49 @@ struct d3dkmt_destroydevice { - struct d3dkmthandle device; - }; - -+enum d3dkmt_clienthint { -+ _D3DKMT_CLIENTHNT_UNKNOWN = 0, -+ _D3DKMT_CLIENTHINT_OPENGL = 1, -+ _D3DKMT_CLIENTHINT_CDD = 2, -+ _D3DKMT_CLIENTHINT_DX7 = 7, -+ _D3DKMT_CLIENTHINT_DX8 = 8, -+ _D3DKMT_CLIENTHINT_DX9 = 9, -+ _D3DKMT_CLIENTHINT_DX10 = 10, -+}; -+ -+struct d3dddi_createcontextflags { -+ union { -+ struct { -+ __u32 null_rendering:1; -+ __u32 initial_data:1; -+ __u32 disable_gpu_timeout:1; -+ __u32 synchronization_only:1; -+ __u32 hw_queue_supported:1; -+ __u32 reserved:27; -+ }; -+ __u32 value; -+ }; -+}; -+ -+struct d3dkmt_destroycontext { -+ struct d3dkmthandle context; -+}; -+ -+struct d3dkmt_createcontextvirtual { -+ struct d3dkmthandle device; -+ __u32 node_ordinal; -+ __u32 engine_affinity; -+ struct d3dddi_createcontextflags flags; -+#ifdef __KERNEL__ -+ void *priv_drv_data; -+#else -+ __u64 priv_drv_data; -+#endif -+ __u32 priv_drv_data_size; -+ enum d3dkmt_clienthint client_hint; -+ struct d3dkmthandle context; -+}; -+ - struct d3dkmt_adaptertype { - union { - struct { -@@ -232,6 +275,10 @@ struct d3dkmt_enumadapters3 { - _IOWR(0x47, 0x01, struct d3dkmt_openadapterfromluid) - #define LX_DXCREATEDEVICE \ - _IOWR(0x47, 0x02, struct d3dkmt_createdevice) -+#define LX_DXCREATECONTEXTVIRTUAL \ -+ _IOWR(0x47, 0x04, struct d3dkmt_createcontextvirtual) -+#define LX_DXDESTROYCONTEXT \ -+ _IOWR(0x47, 0x05, struct d3dkmt_destroycontext) - #define LX_DXQUERYADAPTERINFO \ - _IOWR(0x47, 0x09, struct d3dkmt_queryadapterinfo) - #define LX_DXENUMADAPTERS2 \ --- -Armbian - diff --git a/patch/kernel/archive/wsl2-arm64-6.1/1676-drivers-hv-dxgkrnl-Creation-of-compute-device-allocations-and-resources.patch b/patch/kernel/archive/wsl2-arm64-6.1/1676-drivers-hv-dxgkrnl-Creation-of-compute-device-allocations-and-resources.patch deleted file mode 100644 index 9e0f2e4175a6..000000000000 --- a/patch/kernel/archive/wsl2-arm64-6.1/1676-drivers-hv-dxgkrnl-Creation-of-compute-device-allocations-and-resources.patch +++ /dev/null @@ -1,2263 +0,0 @@ -From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001 -From: Iouri Tarassov -Date: Tue, 1 Feb 2022 15:37:52 -0800 -Subject: drivers: hv: dxgkrnl: Creation of compute device allocations and - resources - -Implemented ioctls to create and destroy virtual compute device -allocations (dxgallocation) and resources (dxgresource): - - the LX_DXCREATEALLOCATION ioctl, - - the LX_DXDESTROYALLOCATION2 ioctl. - -Compute device allocations (dxgallocation objects) represent memory -allocation, which could be accessible by the device. Allocations can -be created around existing system memory (provided by an application) -or memory, allocated by dxgkrnl on the host. - -Compute device resources (dxgresource objects) represent containers of -compute device allocations. Allocations could be dynamically added, -removed from a resource. - -Each allocation/resource has associated driver private data, which -is provided during creation. - -Each created resource or allocation have a handle (d3dkmthandle), -which is used to reference the corresponding object in other ioctls. - -A dxgallocation can be resident (meaning that it is accessible by -the compute device) or evicted. When an allocation is evicted, -its content is stored in the backing store in system memory. - -Signed-off-by: Iouri Tarassov -[kms: Forward port to v6.1] -Signed-off-by: Kelsey Steele ---- - drivers/hv/dxgkrnl/dxgadapter.c | 282 ++++ - drivers/hv/dxgkrnl/dxgkrnl.h | 113 ++ - drivers/hv/dxgkrnl/dxgmodule.c | 1 + - drivers/hv/dxgkrnl/dxgvmbus.c | 649 ++++++++++ - drivers/hv/dxgkrnl/dxgvmbus.h | 123 ++ - drivers/hv/dxgkrnl/ioctl.c | 631 ++++++++- - drivers/hv/dxgkrnl/misc.h | 3 + - include/uapi/misc/d3dkmthk.h | 204 +++ - 8 files changed, 2004 insertions(+), 2 deletions(-) - -diff --git a/drivers/hv/dxgkrnl/dxgadapter.c b/drivers/hv/dxgkrnl/dxgadapter.c -index 111111111111..222222222222 100644 ---- a/drivers/hv/dxgkrnl/dxgadapter.c -+++ b/drivers/hv/dxgkrnl/dxgadapter.c -@@ -207,8 +207,11 @@ struct dxgdevice *dxgdevice_create(struct dxgadapter *adapter, - device->process = process; - kref_get(&adapter->adapter_kref); - INIT_LIST_HEAD(&device->context_list_head); -+ INIT_LIST_HEAD(&device->alloc_list_head); -+ INIT_LIST_HEAD(&device->resource_list_head); - init_rwsem(&device->device_lock); - init_rwsem(&device->context_list_lock); -+ init_rwsem(&device->alloc_list_lock); - INIT_LIST_HEAD(&device->pqueue_list_head); - device->object_state = DXGOBJECTSTATE_CREATED; - device->execution_state = _D3DKMT_DEVICEEXECUTION_ACTIVE; -@@ -224,6 +227,14 @@ struct dxgdevice *dxgdevice_create(struct dxgadapter *adapter, - - void dxgdevice_stop(struct dxgdevice *device) - { -+ struct dxgallocation *alloc; -+ -+ DXG_TRACE("Destroying device: %p", device); -+ dxgdevice_acquire_alloc_list_lock(device); -+ list_for_each_entry(alloc, &device->alloc_list_head, alloc_list_entry) { -+ dxgallocation_stop(alloc); -+ } -+ dxgdevice_release_alloc_list_lock(device); - } - - void dxgdevice_mark_destroyed(struct dxgdevice *device) -@@ -250,6 +261,33 @@ void dxgdevice_destroy(struct dxgdevice *device) - - dxgdevice_stop(device); - -+ dxgdevice_acquire_alloc_list_lock(device); -+ -+ { -+ struct dxgallocation *alloc; -+ struct dxgallocation *tmp; -+ -+ DXG_TRACE("destroying allocations"); -+ list_for_each_entry_safe(alloc, tmp, &device->alloc_list_head, -+ alloc_list_entry) { -+ dxgallocation_destroy(alloc); -+ } -+ } -+ -+ { -+ struct dxgresource *resource; -+ struct dxgresource *tmp; -+ -+ DXG_TRACE("destroying resources"); -+ list_for_each_entry_safe(resource, tmp, -+ &device->resource_list_head, -+ resource_list_entry) { -+ dxgresource_destroy(resource); -+ } -+ } -+ -+ dxgdevice_release_alloc_list_lock(device); -+ - { - struct dxgcontext *context; - struct dxgcontext *tmp; -@@ -328,6 +366,26 @@ void dxgdevice_release_context_list_lock(struct dxgdevice *device) - up_write(&device->context_list_lock); - } - -+void dxgdevice_acquire_alloc_list_lock(struct dxgdevice *device) -+{ -+ down_write(&device->alloc_list_lock); -+} -+ -+void dxgdevice_release_alloc_list_lock(struct dxgdevice *device) -+{ -+ up_write(&device->alloc_list_lock); -+} -+ -+void dxgdevice_acquire_alloc_list_lock_shared(struct dxgdevice *device) -+{ -+ down_read(&device->alloc_list_lock); -+} -+ -+void dxgdevice_release_alloc_list_lock_shared(struct dxgdevice *device) -+{ -+ up_read(&device->alloc_list_lock); -+} -+ - void dxgdevice_add_context(struct dxgdevice *device, struct dxgcontext *context) - { - down_write(&device->context_list_lock); -@@ -344,6 +402,161 @@ void dxgdevice_remove_context(struct dxgdevice *device, - } - } - -+void dxgdevice_add_alloc(struct dxgdevice *device, struct dxgallocation *alloc) -+{ -+ dxgdevice_acquire_alloc_list_lock(device); -+ list_add_tail(&alloc->alloc_list_entry, &device->alloc_list_head); -+ kref_get(&device->device_kref); -+ alloc->owner.device = device; -+ dxgdevice_release_alloc_list_lock(device); -+} -+ -+void dxgdevice_remove_alloc(struct dxgdevice *device, -+ struct dxgallocation *alloc) -+{ -+ if (alloc->alloc_list_entry.next) { -+ list_del(&alloc->alloc_list_entry); -+ alloc->alloc_list_entry.next = NULL; -+ kref_put(&device->device_kref, dxgdevice_release); -+ } -+} -+ -+void dxgdevice_remove_alloc_safe(struct dxgdevice *device, -+ struct dxgallocation *alloc) -+{ -+ dxgdevice_acquire_alloc_list_lock(device); -+ dxgdevice_remove_alloc(device, alloc); -+ dxgdevice_release_alloc_list_lock(device); -+} -+ -+void dxgdevice_add_resource(struct dxgdevice *device, struct dxgresource *res) -+{ -+ dxgdevice_acquire_alloc_list_lock(device); -+ list_add_tail(&res->resource_list_entry, &device->resource_list_head); -+ kref_get(&device->device_kref); -+ dxgdevice_release_alloc_list_lock(device); -+} -+ -+void dxgdevice_remove_resource(struct dxgdevice *device, -+ struct dxgresource *res) -+{ -+ if (res->resource_list_entry.next) { -+ list_del(&res->resource_list_entry); -+ res->resource_list_entry.next = NULL; -+ kref_put(&device->device_kref, dxgdevice_release); -+ } -+} -+ -+struct dxgresource *dxgresource_create(struct dxgdevice *device) -+{ -+ struct dxgresource *resource; -+ -+ resource = kzalloc(sizeof(struct dxgresource), GFP_KERNEL); -+ if (resource) { -+ kref_init(&resource->resource_kref); -+ resource->device = device; -+ resource->process = device->process; -+ resource->object_state = DXGOBJECTSTATE_ACTIVE; -+ mutex_init(&resource->resource_mutex); -+ INIT_LIST_HEAD(&resource->alloc_list_head); -+ dxgdevice_add_resource(device, resource); -+ } -+ return resource; -+} -+ -+void dxgresource_free_handle(struct dxgresource *resource) -+{ -+ struct dxgallocation *alloc; -+ struct dxgprocess *process; -+ -+ if (resource->handle_valid) { -+ process = resource->device->process; -+ hmgrtable_free_handle_safe(&process->handle_table, -+ HMGRENTRY_TYPE_DXGRESOURCE, -+ resource->handle); -+ resource->handle_valid = 0; -+ } -+ list_for_each_entry(alloc, &resource->alloc_list_head, -+ alloc_list_entry) { -+ dxgallocation_free_handle(alloc); -+ } -+} -+ -+void dxgresource_destroy(struct dxgresource *resource) -+{ -+ /* device->alloc_list_lock is held */ -+ struct dxgallocation *alloc; -+ struct dxgallocation *tmp; -+ struct d3dkmt_destroyallocation2 args = { }; -+ int destroyed = test_and_set_bit(0, &resource->flags); -+ struct dxgdevice *device = resource->device; -+ -+ if (!destroyed) { -+ dxgresource_free_handle(resource); -+ if (resource->handle.v) { -+ args.device = device->handle; -+ args.resource = resource->handle; -+ dxgvmb_send_destroy_allocation(device->process, -+ device, &args, NULL); -+ resource->handle.v = 0; -+ } -+ list_for_each_entry_safe(alloc, tmp, &resource->alloc_list_head, -+ alloc_list_entry) { -+ dxgallocation_destroy(alloc); -+ } -+ dxgdevice_remove_resource(device, resource); -+ } -+ kref_put(&resource->resource_kref, dxgresource_release); -+} -+ -+void dxgresource_release(struct kref *refcount) -+{ -+ struct dxgresource *resource; -+ -+ resource = container_of(refcount, struct dxgresource, resource_kref); -+ kfree(resource); -+} -+ -+bool dxgresource_is_active(struct dxgresource *resource) -+{ -+ return resource->object_state == DXGOBJECTSTATE_ACTIVE; -+} -+ -+int dxgresource_add_alloc(struct dxgresource *resource, -+ struct dxgallocation *alloc) -+{ -+ int ret = -ENODEV; -+ struct dxgdevice *device = resource->device; -+ -+ dxgdevice_acquire_alloc_list_lock(device); -+ if (dxgresource_is_active(resource)) { -+ list_add_tail(&alloc->alloc_list_entry, -+ &resource->alloc_list_head); -+ alloc->owner.resource = resource; -+ ret = 0; -+ } -+ alloc->resource_owner = 1; -+ dxgdevice_release_alloc_list_lock(device); -+ return ret; -+} -+ -+void dxgresource_remove_alloc(struct dxgresource *resource, -+ struct dxgallocation *alloc) -+{ -+ if (alloc->alloc_list_entry.next) { -+ list_del(&alloc->alloc_list_entry); -+ alloc->alloc_list_entry.next = NULL; -+ } -+} -+ -+void dxgresource_remove_alloc_safe(struct dxgresource *resource, -+ struct dxgallocation *alloc) -+{ -+ dxgdevice_acquire_alloc_list_lock(resource->device); -+ dxgresource_remove_alloc(resource, alloc); -+ dxgdevice_release_alloc_list_lock(resource->device); -+} -+ - void dxgdevice_release(struct kref *refcount) - { - struct dxgdevice *device; -@@ -413,6 +626,75 @@ void dxgcontext_release(struct kref *refcount) - kfree(context); - } - -+struct dxgallocation *dxgallocation_create(struct dxgprocess *process) -+{ -+ struct dxgallocation *alloc; -+ -+ alloc = kzalloc(sizeof(struct dxgallocation), GFP_KERNEL); -+ if (alloc) -+ alloc->process = process; -+ return alloc; -+} -+ -+void dxgallocation_stop(struct dxgallocation *alloc) -+{ -+ if (alloc->pages) { -+ release_pages(alloc->pages, alloc->num_pages); -+ vfree(alloc->pages); -+ alloc->pages = NULL; -+ } -+} -+ -+void dxgallocation_free_handle(struct dxgallocation *alloc) -+{ -+ dxgprocess_ht_lock_exclusive_down(alloc->process); -+ if (alloc->handle_valid) { -+ hmgrtable_free_handle(&alloc->process->handle_table, -+ HMGRENTRY_TYPE_DXGALLOCATION, -+ alloc->alloc_handle); -+ alloc->handle_valid = 0; -+ } -+ dxgprocess_ht_lock_exclusive_up(alloc->process); -+} -+ -+void dxgallocation_destroy(struct dxgallocation *alloc) -+{ -+ struct dxgprocess *process = alloc->process; -+ struct d3dkmt_destroyallocation2 args = { }; -+ -+ dxgallocation_stop(alloc); -+ if (alloc->resource_owner) -+ dxgresource_remove_alloc(alloc->owner.resource, alloc); -+ else if (alloc->owner.device) -+ dxgdevice_remove_alloc(alloc->owner.device, alloc); -+ dxgallocation_free_handle(alloc); -+ if (alloc->alloc_handle.v && !alloc->resource_owner) { -+ args.device = alloc->owner.device->handle; -+ args.alloc_count = 1; -+ dxgvmb_send_destroy_allocation(process, -+ alloc->owner.device, -+ &args, &alloc->alloc_handle); -+ } -+#ifdef _MAIN_KERNEL_ -+ if (alloc->gpadl.gpadl_handle) { -+ DXG_TRACE("Teardown gpadl %d", -+ alloc->gpadl.gpadl_handle); -+ vmbus_teardown_gpadl(dxgglobal_get_vmbus(), &alloc->gpadl); -+ alloc->gpadl.gpadl_handle = 0; -+ } -+else -+ if (alloc->gpadl) { -+ DXG_TRACE("Teardown gpadl %d", -+ alloc->gpadl); -+ vmbus_teardown_gpadl(dxgglobal_get_vmbus(), alloc->gpadl); -+ alloc->gpadl = 0; -+ } -+#endif -+ if (alloc->priv_drv_data) -+ vfree(alloc->priv_drv_data); -+ kfree(alloc); -+} -+ - struct dxgprocess_adapter *dxgprocess_adapter_create(struct dxgprocess *process, - struct dxgadapter *adapter) - { -diff --git a/drivers/hv/dxgkrnl/dxgkrnl.h b/drivers/hv/dxgkrnl/dxgkrnl.h -index 111111111111..222222222222 100644 ---- a/drivers/hv/dxgkrnl/dxgkrnl.h -+++ b/drivers/hv/dxgkrnl/dxgkrnl.h -@@ -36,6 +36,8 @@ struct dxgprocess; - struct dxgadapter; - struct dxgdevice; - struct dxgcontext; -+struct dxgallocation; -+struct dxgresource; - - /* - * Driver private data. -@@ -269,6 +271,8 @@ struct dxgadapter { - struct list_head adapter_list_entry; - /* The list of dxgprocess_adapter entries */ - struct list_head adapter_process_list_head; -+ /* This lock protects shared resource and syncobject lists */ -+ struct rw_semaphore shared_resource_list_lock; - struct pci_dev *pci_dev; - struct hv_device *hv_dev; - struct dxgvmbuschannel channel; -@@ -315,6 +319,10 @@ struct dxgdevice { - struct rw_semaphore device_lock; - struct rw_semaphore context_list_lock; - struct list_head context_list_head; -+ /* List of device allocations */ -+ struct rw_semaphore alloc_list_lock; -+ struct list_head alloc_list_head; -+ struct list_head resource_list_head; - /* List of paging queues. Protected by process handle table lock. */ - struct list_head pqueue_list_head; - struct d3dkmthandle handle; -@@ -331,9 +339,19 @@ void dxgdevice_release_lock_shared(struct dxgdevice *dev); - void dxgdevice_release(struct kref *refcount); - void dxgdevice_add_context(struct dxgdevice *dev, struct dxgcontext *ctx); - void dxgdevice_remove_context(struct dxgdevice *dev, struct dxgcontext *ctx); -+void dxgdevice_add_alloc(struct dxgdevice *dev, struct dxgallocation *a); -+void dxgdevice_remove_alloc(struct dxgdevice *dev, struct dxgallocation *a); -+void dxgdevice_remove_alloc_safe(struct dxgdevice *dev, -+ struct dxgallocation *a); -+void dxgdevice_add_resource(struct dxgdevice *dev, struct dxgresource *res); -+void dxgdevice_remove_resource(struct dxgdevice *dev, struct dxgresource *res); - bool dxgdevice_is_active(struct dxgdevice *dev); - void dxgdevice_acquire_context_list_lock(struct dxgdevice *dev); - void dxgdevice_release_context_list_lock(struct dxgdevice *dev); -+void dxgdevice_acquire_alloc_list_lock(struct dxgdevice *dev); -+void dxgdevice_release_alloc_list_lock(struct dxgdevice *dev); -+void dxgdevice_acquire_alloc_list_lock_shared(struct dxgdevice *dev); -+void dxgdevice_release_alloc_list_lock_shared(struct dxgdevice *dev); - - /* - * The object represent the execution context of a device. -@@ -357,6 +375,83 @@ void dxgcontext_destroy_safe(struct dxgprocess *pr, struct dxgcontext *ctx); - void dxgcontext_release(struct kref *refcount); - bool dxgcontext_is_active(struct dxgcontext *ctx); - -+struct dxgresource { -+ struct kref resource_kref; -+ enum dxgobjectstate object_state; -+ struct d3dkmthandle handle; -+ struct list_head alloc_list_head; -+ struct list_head resource_list_entry; -+ struct list_head shared_resource_list_entry; -+ struct dxgdevice *device; -+ struct dxgprocess *process; -+ /* Protects adding allocations to resource and resource destruction */ -+ struct mutex resource_mutex; -+ u64 private_runtime_handle; -+ union { -+ struct { -+ u32 destroyed:1; /* Must be the first */ -+ u32 handle_valid:1; -+ u32 reserved:30; -+ }; -+ long flags; -+ }; -+}; -+ -+struct dxgresource *dxgresource_create(struct dxgdevice *dev); -+void dxgresource_destroy(struct dxgresource *res); -+void dxgresource_free_handle(struct dxgresource *res); -+void dxgresource_release(struct kref *refcount); -+int dxgresource_add_alloc(struct dxgresource *res, -+ struct dxgallocation *a); -+void dxgresource_remove_alloc(struct dxgresource *res, struct dxgallocation *a); -+void dxgresource_remove_alloc_safe(struct dxgresource *res, -+ struct dxgallocation *a); -+bool dxgresource_is_active(struct dxgresource *res); -+ -+struct privdata { -+ u32 data_size; -+ u8 data[1]; -+}; -+ -+struct dxgallocation { -+ /* Entry in the device list or resource list (when resource exists) */ -+ struct list_head alloc_list_entry; -+ /* Allocation owner */ -+ union { -+ struct dxgdevice *device; -+ struct dxgresource *resource; -+ } owner; -+ struct dxgprocess *process; -+ /* Pointer to private driver data desc. Used for shared resources */ -+ struct privdata *priv_drv_data; -+ struct d3dkmthandle alloc_handle; -+ /* Set to 1 when allocation belongs to resource. */ -+ u32 resource_owner:1; -+ /* Set to 1 when the allocatio is mapped as cached */ -+ u32 cached:1; -+ u32 handle_valid:1; -+ /* GPADL address list for existing sysmem allocations */ -+#ifdef _MAIN_KERNEL_ -+ struct vmbus_gpadl gpadl; -+#else -+ u32 gpadl; -+#endif -+ /* Number of pages in the 'pages' array */ -+ u32 num_pages; -+ /* -+ * CPU address from the existing sysmem allocation, or -+ * mapped to the CPU visible backing store in the IO space -+ */ -+ void *cpu_address; -+ /* Describes pages for the existing sysmem allocation */ -+ struct page **pages; -+}; -+ -+struct dxgallocation *dxgallocation_create(struct dxgprocess *process); -+void dxgallocation_stop(struct dxgallocation *a); -+void dxgallocation_destroy(struct dxgallocation *a); -+void dxgallocation_free_handle(struct dxgallocation *a); -+ - long dxgk_compat_ioctl(struct file *f, unsigned int p1, unsigned long p2); - long dxgk_unlocked_ioctl(struct file *f, unsigned int p1, unsigned long p2); - -@@ -409,9 +504,27 @@ dxgvmb_send_create_context(struct dxgadapter *adapter, - int dxgvmb_send_destroy_context(struct dxgadapter *adapter, - struct dxgprocess *process, - struct d3dkmthandle h); -+int dxgvmb_send_create_allocation(struct dxgprocess *pr, struct dxgdevice *dev, -+ struct d3dkmt_createallocation *args, -+ struct d3dkmt_createallocation *__user inargs, -+ struct dxgresource *res, -+ struct dxgallocation **allocs, -+ struct d3dddi_allocationinfo2 *alloc_info, -+ struct d3dkmt_createstandardallocation *stda); -+int dxgvmb_send_destroy_allocation(struct dxgprocess *pr, struct dxgdevice *dev, -+ struct d3dkmt_destroyallocation2 *args, -+ struct d3dkmthandle *alloc_handles); - int dxgvmb_send_query_adapter_info(struct dxgprocess *process, - struct dxgadapter *adapter, - struct d3dkmt_queryadapterinfo *args); -+int dxgvmb_send_get_stdalloc_data(struct dxgdevice *device, -+ enum d3dkmdt_standardallocationtype t, -+ struct d3dkmdt_gdisurfacedata *data, -+ u32 physical_adapter_index, -+ u32 *alloc_priv_driver_size, -+ void *prive_alloc_data, -+ u32 *res_priv_data_size, -+ void *priv_res_data); - int dxgvmb_send_async_msg(struct dxgvmbuschannel *channel, - void *command, - u32 cmd_size); -diff --git a/drivers/hv/dxgkrnl/dxgmodule.c b/drivers/hv/dxgkrnl/dxgmodule.c -index 111111111111..222222222222 100644 ---- a/drivers/hv/dxgkrnl/dxgmodule.c -+++ b/drivers/hv/dxgkrnl/dxgmodule.c -@@ -162,6 +162,7 @@ int dxgglobal_create_adapter(struct pci_dev *dev, guid_t *guid, - init_rwsem(&adapter->core_lock); - - INIT_LIST_HEAD(&adapter->adapter_process_list_head); -+ init_rwsem(&adapter->shared_resource_list_lock); - adapter->pci_dev = dev; - guid_to_luid(guid, &adapter->luid); - -diff --git a/drivers/hv/dxgkrnl/dxgvmbus.c b/drivers/hv/dxgkrnl/dxgvmbus.c -index 111111111111..222222222222 100644 ---- a/drivers/hv/dxgkrnl/dxgvmbus.c -+++ b/drivers/hv/dxgkrnl/dxgvmbus.c -@@ -111,6 +111,41 @@ static int init_message(struct dxgvmbusmsg *msg, struct dxgadapter *adapter, - return 0; - } - -+static int init_message_res(struct dxgvmbusmsgres *msg, -+ struct dxgadapter *adapter, -+ struct dxgprocess *process, -+ u32 size, -+ u32 result_size) -+{ -+ struct dxgglobal *dxgglobal = dxggbl(); -+ bool use_ext_header = dxgglobal->vmbus_ver >= -+ DXGK_VMBUS_INTERFACE_VERSION; -+ -+ if (use_ext_header) -+ size += sizeof(struct dxgvmb_ext_header); -+ msg->size = size; -+ msg->res_size += (result_size + 7) & ~7; -+ size += msg->res_size; -+ msg->hdr = vzalloc(size); -+ if (msg->hdr == NULL) { -+ DXG_ERR("Failed to allocate VM bus message: %d", size); -+ return -ENOMEM; -+ } -+ if (use_ext_header) { -+ msg->msg = (char *)&msg->hdr[1]; -+ msg->hdr->command_offset = sizeof(msg->hdr[0]); -+ msg->hdr->vgpu_luid = adapter->host_vgpu_luid; -+ } else { -+ msg->msg = (char *)msg->hdr; -+ } -+ msg->res = (char *)msg->hdr + msg->size; -+ if (dxgglobal->async_msg_enabled) -+ msg->channel = &dxgglobal->channel; -+ else -+ msg->channel = &adapter->channel; -+ return 0; -+} -+ - static void free_message(struct dxgvmbusmsg *msg, struct dxgprocess *process) - { - if (msg->hdr && (char *)msg->hdr != msg->msg_on_stack) -@@ -852,6 +887,620 @@ int dxgvmb_send_destroy_context(struct dxgadapter *adapter, - return ret; - } - -+static int -+copy_private_data(struct d3dkmt_createallocation *args, -+ struct dxgkvmb_command_createallocation *command, -+ struct d3dddi_allocationinfo2 *input_alloc_info, -+ struct d3dkmt_createstandardallocation *standard_alloc) -+{ -+ struct dxgkvmb_command_createallocation_allocinfo *alloc_info; -+ struct d3dddi_allocationinfo2 *input_alloc; -+ int ret = 0; -+ int i; -+ u8 *private_data_dest = (u8 *) &command[1] + -+ (args->alloc_count * -+ sizeof(struct dxgkvmb_command_createallocation_allocinfo)); -+ -+ if (args->private_runtime_data_size) { -+ ret = copy_from_user(private_data_dest, -+ args->private_runtime_data, -+ args->private_runtime_data_size); -+ if (ret) { -+ DXG_ERR("failed to copy runtime data"); -+ ret = -EINVAL; -+ goto cleanup; -+ } -+ private_data_dest += args->private_runtime_data_size; -+ } -+ -+ if (args->flags.standard_allocation) { -+ DXG_TRACE("private data offset %d", -+ (u32) (private_data_dest - (u8 *) command)); -+ -+ args->priv_drv_data_size = sizeof(*args->standard_allocation); -+ memcpy(private_data_dest, standard_alloc, -+ sizeof(*standard_alloc)); -+ private_data_dest += args->priv_drv_data_size; -+ } else if (args->priv_drv_data_size) { -+ ret = copy_from_user(private_data_dest, -+ args->priv_drv_data, -+ args->priv_drv_data_size); -+ if (ret) { -+ DXG_ERR("failed to copy private data"); -+ ret = -EINVAL; -+ goto cleanup; -+ } -+ private_data_dest += args->priv_drv_data_size; -+ } -+ -+ alloc_info = (void *)&command[1]; -+ input_alloc = input_alloc_info; -+ if (input_alloc_info[0].sysmem) -+ command->flags.existing_sysmem = 1; -+ for (i = 0; i < args->alloc_count; i++) { -+ alloc_info->flags = input_alloc->flags.value; -+ alloc_info->vidpn_source_id = input_alloc->vidpn_source_id; -+ alloc_info->priv_drv_data_size = -+ input_alloc->priv_drv_data_size; -+ if (input_alloc->priv_drv_data_size) { -+ ret = copy_from_user(private_data_dest, -+ input_alloc->priv_drv_data, -+ input_alloc->priv_drv_data_size); -+ if (ret) { -+ DXG_ERR("failed to copy alloc data"); -+ ret = -EINVAL; -+ goto cleanup; -+ } -+ private_data_dest += input_alloc->priv_drv_data_size; -+ } -+ alloc_info++; -+ input_alloc++; -+ } -+ -+cleanup: -+ if (ret) -+ DXG_TRACE("err: %d", ret); -+ return ret; -+} -+ -+static -+int create_existing_sysmem(struct dxgdevice *device, -+ struct dxgkvmb_command_allocinfo_return *host_alloc, -+ struct dxgallocation *dxgalloc, -+ bool read_only, -+ const void *sysmem) -+{ -+ int ret1 = 0; -+ void *kmem = NULL; -+ int ret = 0; -+ struct dxgkvmb_command_setexistingsysmemstore *set_store_command; -+ u64 alloc_size = host_alloc->allocation_size; -+ u32 npages = alloc_size >> PAGE_SHIFT; -+ struct dxgvmbusmsg msg = {.hdr = NULL}; -+ -+ ret = init_message(&msg, device->adapter, device->process, -+ sizeof(*set_store_command)); -+ if (ret) -+ goto cleanup; -+ set_store_command = (void *)msg.msg; -+ -+ /* -+ * Create a guest physical address list and set it as the allocation -+ * backing store in the host. This is done after creating the host -+ * allocation, because only now the allocation size is known. -+ */ -+ -+ DXG_TRACE("Alloc size: %lld", alloc_size); -+ -+ dxgalloc->cpu_address = (void *)sysmem; -+ dxgalloc->pages = vzalloc(npages * sizeof(void *)); -+ if (dxgalloc->pages == NULL) { -+ DXG_ERR("failed to allocate pages"); -+ ret = -ENOMEM; -+ goto cleanup; -+ } -+ ret1 = get_user_pages_fast((unsigned long)sysmem, npages, !read_only, -+ dxgalloc->pages); -+ if (ret1 != npages) { -+ DXG_ERR("get_user_pages_fast failed: %d", ret1); -+ if (ret1 > 0 && ret1 < npages) -+ release_pages(dxgalloc->pages, ret1); -+ vfree(dxgalloc->pages); -+ dxgalloc->pages = NULL; -+ ret = -ENOMEM; -+ goto cleanup; -+ } -+ kmem = vmap(dxgalloc->pages, npages, VM_MAP, PAGE_KERNEL); -+ if (kmem == NULL) { -+ DXG_ERR("vmap failed"); -+ ret = -ENOMEM; -+ goto cleanup; -+ } -+ ret1 = vmbus_establish_gpadl(dxgglobal_get_vmbus(), kmem, -+ alloc_size, &dxgalloc->gpadl); -+ if (ret1) { -+ DXG_ERR("establish_gpadl failed: %d", ret1); -+ ret = -ENOMEM; -+ goto cleanup; -+ } -+ DXG_TRACE("New gpadl %d", dxgalloc->gpadl.gpadl_handle); -+ -+ command_vgpu_to_host_init2(&set_store_command->hdr, -+ DXGK_VMBCOMMAND_SETEXISTINGSYSMEMSTORE, -+ device->process->host_handle); -+ set_store_command->device = device->handle; -+ set_store_command->device = device->handle; -+ set_store_command->allocation = host_alloc->allocation; -+#ifdef _MAIN_KERNEL_ -+ set_store_command->gpadl = dxgalloc->gpadl.gpadl_handle; -+#else -+ set_store_command->gpadl = dxgalloc->gpadl; -+#endif -+ ret = dxgvmb_send_sync_msg_ntstatus(msg.channel, msg.hdr, msg.size); -+ if (ret < 0) -+ DXG_ERR("failed to set existing store: %x", ret); -+ -+cleanup: -+ if (kmem) -+ vunmap(kmem); -+ free_message(&msg, device->process); -+ if (ret) -+ DXG_TRACE("err: %d", ret); -+ return ret; -+} -+ -+static int -+process_allocation_handles(struct dxgprocess *process, -+ struct dxgdevice *device, -+ struct d3dkmt_createallocation *args, -+ struct dxgkvmb_command_createallocation_return *res, -+ struct dxgallocation **dxgalloc, -+ struct dxgresource *resource) -+{ -+ int ret = 0; -+ int i; -+ -+ hmgrtable_lock(&process->handle_table, DXGLOCK_EXCL); -+ if (args->flags.create_resource) { -+ ret = hmgrtable_assign_handle(&process->handle_table, resource, -+ HMGRENTRY_TYPE_DXGRESOURCE, -+ res->resource); -+ if (ret < 0) { -+ DXG_ERR("failed to assign resource handle %x", -+ res->resource.v); -+ } else { -+ resource->handle = res->resource; -+ resource->handle_valid = 1; -+ } -+ } -+ for (i = 0; i < args->alloc_count; i++) { -+ struct dxgkvmb_command_allocinfo_return *host_alloc; -+ -+ host_alloc = &res->allocation_info[i]; -+ ret = hmgrtable_assign_handle(&process->handle_table, -+ dxgalloc[i], -+ HMGRENTRY_TYPE_DXGALLOCATION, -+ host_alloc->allocation); -+ if (ret < 0) { -+ DXG_ERR("failed assign alloc handle %x %d %d", -+ host_alloc->allocation.v, -+ args->alloc_count, i); -+ break; -+ } -+ dxgalloc[i]->alloc_handle = host_alloc->allocation; -+ dxgalloc[i]->handle_valid = 1; -+ } -+ hmgrtable_unlock(&process->handle_table, DXGLOCK_EXCL); -+ -+ if (ret) -+ DXG_TRACE("err: %d", ret); -+ return ret; -+} -+ -+static int -+create_local_allocations(struct dxgprocess *process, -+ struct dxgdevice *device, -+ struct d3dkmt_createallocation *args, -+ struct d3dkmt_createallocation *__user input_args, -+ struct d3dddi_allocationinfo2 *alloc_info, -+ struct dxgkvmb_command_createallocation_return *result, -+ struct dxgresource *resource, -+ struct dxgallocation **dxgalloc, -+ u32 destroy_buffer_size) -+{ -+ int i; -+ int alloc_count = args->alloc_count; -+ u8 *alloc_private_data = NULL; -+ int ret = 0; -+ int ret1; -+ struct dxgkvmb_command_destroyallocation *destroy_buf; -+ struct dxgvmbusmsg msg = {.hdr = NULL}; -+ -+ ret = init_message(&msg, device->adapter, process, -+ destroy_buffer_size); -+ if (ret) -+ goto cleanup; -+ destroy_buf = (void *)msg.msg; -+ -+ /* Prepare the command to destroy allocation in case of failure */ -+ command_vgpu_to_host_init2(&destroy_buf->hdr, -+ DXGK_VMBCOMMAND_DESTROYALLOCATION, -+ process->host_handle); -+ destroy_buf->device = args->device; -+ destroy_buf->resource = args->resource; -+ destroy_buf->alloc_count = alloc_count; -+ destroy_buf->flags.assume_not_in_use = 1; -+ for (i = 0; i < alloc_count; i++) { -+ DXG_TRACE("host allocation: %d %x", -+ i, result->allocation_info[i].allocation.v); -+ destroy_buf->allocations[i] = -+ result->allocation_info[i].allocation; -+ } -+ -+ if (args->flags.create_resource) { -+ DXG_TRACE("new resource: %x", result->resource.v); -+ ret = copy_to_user(&input_args->resource, &result->resource, -+ sizeof(struct d3dkmthandle)); -+ if (ret) { -+ DXG_ERR("failed to copy resource handle"); -+ ret = -EINVAL; -+ goto cleanup; -+ } -+ } -+ -+ alloc_private_data = (u8 *) result + -+ sizeof(struct dxgkvmb_command_createallocation_return) + -+ sizeof(struct dxgkvmb_command_allocinfo_return) * (alloc_count - 1); -+ -+ for (i = 0; i < alloc_count; i++) { -+ struct dxgkvmb_command_allocinfo_return *host_alloc; -+ struct d3dddi_allocationinfo2 *user_alloc; -+ -+ host_alloc = &result->allocation_info[i]; -+ user_alloc = &alloc_info[i]; -+ dxgalloc[i]->num_pages = -+ host_alloc->allocation_size >> PAGE_SHIFT; -+ if (user_alloc->sysmem) { -+ ret = create_existing_sysmem(device, host_alloc, -+ dxgalloc[i], -+ args->flags.read_only != 0, -+ user_alloc->sysmem); -+ if (ret < 0) -+ goto cleanup; -+ } -+ dxgalloc[i]->cached = host_alloc->allocation_flags.cached; -+ if (host_alloc->priv_drv_data_size) { -+ ret = copy_to_user(user_alloc->priv_drv_data, -+ alloc_private_data, -+ host_alloc->priv_drv_data_size); -+ if (ret) { -+ DXG_ERR("failed to copy private data"); -+ ret = -EINVAL; -+ goto cleanup; -+ } -+ alloc_private_data += host_alloc->priv_drv_data_size; -+ } -+ ret = copy_to_user(&args->allocation_info[i].allocation, -+ &host_alloc->allocation, -+ sizeof(struct d3dkmthandle)); -+ if (ret) { -+ DXG_ERR("failed to copy alloc handle"); -+ ret = -EINVAL; -+ goto cleanup; -+ } -+ } -+ -+ ret = process_allocation_handles(process, device, args, result, -+ dxgalloc, resource); -+ if (ret < 0) -+ goto cleanup; -+ -+ ret = copy_to_user(&input_args->global_share, &args->global_share, -+ sizeof(struct d3dkmthandle)); -+ if (ret) { -+ DXG_ERR("failed to copy global share"); -+ ret = -EINVAL; -+ } -+ -+cleanup: -+ -+ if (ret < 0) { -+ /* Free local handles before freeing the handles in the host */ -+ dxgdevice_acquire_alloc_list_lock(device); -+ if (dxgalloc) -+ for (i = 0; i < alloc_count; i++) -+ if (dxgalloc[i]) -+ dxgallocation_free_handle(dxgalloc[i]); -+ if (resource && args->flags.create_resource) -+ dxgresource_free_handle(resource); -+ dxgdevice_release_alloc_list_lock(device); -+ -+ /* Destroy allocations in the host to unmap gpadls */ -+ ret1 = dxgvmb_send_sync_msg_ntstatus(msg.channel, msg.hdr, -+ msg.size); -+ if (ret1 < 0) -+ DXG_ERR("failed to destroy allocations: %x", -+ ret1); -+ -+ dxgdevice_acquire_alloc_list_lock(device); -+ if (dxgalloc) { -+ for (i = 0; i < alloc_count; i++) { -+ if (dxgalloc[i]) { -+ dxgalloc[i]->alloc_handle.v = 0; -+ dxgallocation_destroy(dxgalloc[i]); -+ dxgalloc[i] = NULL; -+ } -+ } -+ } -+ if (resource && args->flags.create_resource) { -+ /* -+ * Prevent the resource memory from freeing. -+ * It will be freed in the top level function. -+ */ -+ kref_get(&resource->resource_kref); -+ dxgresource_destroy(resource); -+ } -+ dxgdevice_release_alloc_list_lock(device); -+ } -+ -+ free_message(&msg, process); -+ if (ret) -+ DXG_TRACE("err: %d", ret); -+ return ret; -+} -+ -+int dxgvmb_send_create_allocation(struct dxgprocess *process, -+ struct dxgdevice *device, -+ struct d3dkmt_createallocation *args, -+ struct d3dkmt_createallocation *__user -+ input_args, -+ struct dxgresource *resource, -+ struct dxgallocation **dxgalloc, -+ struct d3dddi_allocationinfo2 *alloc_info, -+ struct d3dkmt_createstandardallocation -+ *standard_alloc) -+{ -+ struct dxgkvmb_command_createallocation *command = NULL; -+ struct dxgkvmb_command_createallocation_return *result = NULL; -+ int ret = -EINVAL; -+ int i; -+ u32 result_size = 0; -+ u32 cmd_size = 0; -+ u32 destroy_buffer_size = 0; -+ u32 priv_drv_data_size; -+ struct dxgvmbusmsg msg = {.hdr = NULL}; -+ -+ if (args->private_runtime_data_size >= DXG_MAX_VM_BUS_PACKET_SIZE || -+ args->priv_drv_data_size >= DXG_MAX_VM_BUS_PACKET_SIZE) { -+ ret = -EOVERFLOW; -+ goto cleanup; -+ } -+ -+ /* -+ * Preallocate the buffer, which will be used for destruction in case -+ * of a failure -+ */ -+ destroy_buffer_size = sizeof(struct dxgkvmb_command_destroyallocation) + -+ args->alloc_count * sizeof(struct d3dkmthandle); -+ -+ /* Compute the total private driver size */ -+ -+ priv_drv_data_size = 0; -+ -+ for (i = 0; i < args->alloc_count; i++) { -+ if (alloc_info[i].priv_drv_data_size >= -+ DXG_MAX_VM_BUS_PACKET_SIZE) { -+ ret = -EOVERFLOW; -+ goto cleanup; -+ } else { -+ priv_drv_data_size += alloc_info[i].priv_drv_data_size; -+ } -+ if (priv_drv_data_size >= DXG_MAX_VM_BUS_PACKET_SIZE) { -+ ret = -EOVERFLOW; -+ goto cleanup; -+ } -+ } -+ -+ /* -+ * Private driver data for the result includes only per allocation -+ * private data -+ */ -+ result_size = sizeof(struct dxgkvmb_command_createallocation_return) + -+ (args->alloc_count - 1) * -+ sizeof(struct dxgkvmb_command_allocinfo_return) + -+ priv_drv_data_size; -+ result = vzalloc(result_size); -+ if (result == NULL) { -+ ret = -ENOMEM; -+ goto cleanup; -+ } -+ -+ /* Private drv data for the command includes the global private data */ -+ priv_drv_data_size += args->priv_drv_data_size; -+ -+ cmd_size = sizeof(struct dxgkvmb_command_createallocation) + -+ args->alloc_count * -+ sizeof(struct dxgkvmb_command_createallocation_allocinfo) + -+ args->private_runtime_data_size + priv_drv_data_size; -+ if (cmd_size > DXG_MAX_VM_BUS_PACKET_SIZE) { -+ ret = -EOVERFLOW; -+ goto cleanup; -+ } -+ -+ DXG_TRACE("command size, driver_data_size %d %d %ld %ld", -+ cmd_size, priv_drv_data_size, -+ sizeof(struct dxgkvmb_command_createallocation), -+ sizeof(struct dxgkvmb_command_createallocation_allocinfo)); -+ -+ ret = init_message(&msg, device->adapter, process, -+ cmd_size); -+ if (ret) -+ goto cleanup; -+ command = (void *)msg.msg; -+ -+ command_vgpu_to_host_init2(&command->hdr, -+ DXGK_VMBCOMMAND_CREATEALLOCATION, -+ process->host_handle); -+ command->device = args->device; -+ command->flags = args->flags; -+ command->resource = args->resource; -+ command->private_runtime_resource_handle = -+ args->private_runtime_resource_handle; -+ command->alloc_count = args->alloc_count; -+ command->private_runtime_data_size = args->private_runtime_data_size; -+ command->priv_drv_data_size = args->priv_drv_data_size; -+ -+ ret = copy_private_data(args, command, alloc_info, standard_alloc); -+ if (ret < 0) -+ goto cleanup; -+ -+ ret = dxgvmb_send_sync_msg(msg.channel, msg.hdr, msg.size, -+ result, result_size); -+ if (ret < 0) { -+ DXG_ERR("send_create_allocation failed %x", ret); -+ goto cleanup; -+ } -+ -+ ret = create_local_allocations(process, device, args, input_args, -+ alloc_info, result, resource, dxgalloc, -+ destroy_buffer_size); -+cleanup: -+ -+ if (result) -+ vfree(result); -+ free_message(&msg, process); -+ -+ if (ret) -+ DXG_TRACE("err: %d", ret); -+ return ret; -+} -+ -+int dxgvmb_send_destroy_allocation(struct dxgprocess *process, -+ struct dxgdevice *device, -+ struct d3dkmt_destroyallocation2 *args, -+ struct d3dkmthandle *alloc_handles) -+{ -+ struct dxgkvmb_command_destroyallocation *destroy_buffer; -+ u32 destroy_buffer_size; -+ int ret; -+ int allocations_size = args->alloc_count * sizeof(struct d3dkmthandle); -+ struct dxgvmbusmsg msg = {.hdr = NULL}; -+ -+ destroy_buffer_size = sizeof(struct dxgkvmb_command_destroyallocation) + -+ allocations_size; -+ -+ ret = init_message(&msg, device->adapter, process, -+ destroy_buffer_size); -+ if (ret) -+ goto cleanup; -+ destroy_buffer = (void *)msg.msg; -+ -+ command_vgpu_to_host_init2(&destroy_buffer->hdr, -+ DXGK_VMBCOMMAND_DESTROYALLOCATION, -+ process->host_handle); -+ destroy_buffer->device = args->device; -+ destroy_buffer->resource = args->resource; -+ destroy_buffer->alloc_count = args->alloc_count; -+ destroy_buffer->flags = args->flags; -+ if (allocations_size) -+ memcpy(destroy_buffer->allocations, alloc_handles, -+ allocations_size); -+ -+ ret = dxgvmb_send_sync_msg_ntstatus(msg.channel, msg.hdr, msg.size); -+ -+cleanup: -+ -+ free_message(&msg, process); -+ if (ret) -+ DXG_TRACE("err: %d", ret); -+ return ret; -+} -+ -+int dxgvmb_send_get_stdalloc_data(struct dxgdevice *device, -+ enum d3dkmdt_standardallocationtype alloctype, -+ struct d3dkmdt_gdisurfacedata *alloc_data, -+ u32 physical_adapter_index, -+ u32 *alloc_priv_driver_size, -+ void *priv_alloc_data, -+ u32 *res_priv_data_size, -+ void *priv_res_data) -+{ -+ struct dxgkvmb_command_getstandardallocprivdata *command; -+ struct dxgkvmb_command_getstandardallocprivdata_return *result = NULL; -+ u32 result_size = sizeof(*result); -+ int ret; -+ struct dxgvmbusmsgres msg = {.hdr = NULL}; -+ -+ if (priv_alloc_data) -+ result_size += *alloc_priv_driver_size; -+ if (priv_res_data) -+ result_size += *res_priv_data_size; -+ ret = init_message_res(&msg, device->adapter, device->process, -+ sizeof(*command), result_size); -+ if (ret) -+ goto cleanup; -+ command = msg.msg; -+ result = msg.res; -+ -+ command_vgpu_to_host_init2(&command->hdr, -+ DXGK_VMBCOMMAND_DDIGETSTANDARDALLOCATIONDRIVERDATA, -+ device->process->host_handle); -+ -+ command->alloc_type = alloctype; -+ command->priv_driver_data_size = *alloc_priv_driver_size; -+ command->physical_adapter_index = physical_adapter_index; -+ command->priv_driver_resource_size = *res_priv_data_size; -+ switch (alloctype) { -+ case _D3DKMDT_STANDARDALLOCATION_GDISURFACE: -+ command->gdi_surface = *alloc_data; -+ break; -+ case _D3DKMDT_STANDARDALLOCATION_SHAREDPRIMARYSURFACE: -+ case _D3DKMDT_STANDARDALLOCATION_SHADOWSURFACE: -+ case _D3DKMDT_STANDARDALLOCATION_STAGINGSURFACE: -+ default: -+ DXG_ERR("Invalid standard alloc type"); -+ goto cleanup; -+ } -+ -+ ret = dxgvmb_send_sync_msg(msg.channel, msg.hdr, msg.size, -+ result, msg.res_size); -+ if (ret < 0) -+ goto cleanup; -+ -+ ret = ntstatus2int(result->status); -+ if (ret < 0) -+ goto cleanup; -+ -+ if (*alloc_priv_driver_size && -+ result->priv_driver_data_size != *alloc_priv_driver_size) { -+ DXG_ERR("Priv data size mismatch"); -+ goto cleanup; -+ } -+ if (*res_priv_data_size && -+ result->priv_driver_resource_size != *res_priv_data_size) { -+ DXG_ERR("Resource priv data size mismatch"); -+ goto cleanup; -+ } -+ *alloc_priv_driver_size = result->priv_driver_data_size; -+ *res_priv_data_size = result->priv_driver_resource_size; -+ if (priv_alloc_data) { -+ memcpy(priv_alloc_data, &result[1], -+ result->priv_driver_data_size); -+ } -+ if (priv_res_data) { -+ memcpy(priv_res_data, -+ (char *)(&result[1]) + result->priv_driver_data_size, -+ result->priv_driver_resource_size); -+ } -+ -+cleanup: -+ -+ free_message((struct dxgvmbusmsg *)&msg, device->process); -+ if (ret) -+ DXG_TRACE("err: %d", ret); -+ return ret; -+} -+ - int dxgvmb_send_query_adapter_info(struct dxgprocess *process, - struct dxgadapter *adapter, - struct d3dkmt_queryadapterinfo *args) -diff --git a/drivers/hv/dxgkrnl/dxgvmbus.h b/drivers/hv/dxgkrnl/dxgvmbus.h -index 111111111111..222222222222 100644 ---- a/drivers/hv/dxgkrnl/dxgvmbus.h -+++ b/drivers/hv/dxgkrnl/dxgvmbus.h -@@ -173,6 +173,14 @@ struct dxgkvmb_command_setiospaceregion { - u32 shared_page_gpadl; - }; - -+/* Returns ntstatus */ -+struct dxgkvmb_command_setexistingsysmemstore { -+ struct dxgkvmb_command_vgpu_to_host hdr; -+ struct d3dkmthandle device; -+ struct d3dkmthandle allocation; -+ u32 gpadl; -+}; -+ - struct dxgkvmb_command_createprocess { - struct dxgkvmb_command_vm_to_host hdr; - void *process; -@@ -269,6 +277,121 @@ struct dxgkvmb_command_flushdevice { - enum dxgdevice_flushschedulerreason reason; - }; - -+struct dxgkvmb_command_createallocation_allocinfo { -+ u32 flags; -+ u32 priv_drv_data_size; -+ u32 vidpn_source_id; -+}; -+ -+struct dxgkvmb_command_createallocation { -+ struct dxgkvmb_command_vgpu_to_host hdr; -+ struct d3dkmthandle device; -+ struct d3dkmthandle resource; -+ u32 private_runtime_data_size; -+ u32 priv_drv_data_size; -+ u32 alloc_count; -+ struct d3dkmt_createallocationflags flags; -+ u64 private_runtime_resource_handle; -+ bool make_resident; -+/* dxgkvmb_command_createallocation_allocinfo alloc_info[alloc_count]; */ -+/* u8 private_rutime_data[private_runtime_data_size] */ -+/* u8 priv_drv_data[] for each alloc_info */ -+}; -+ -+struct dxgkvmb_command_getstandardallocprivdata { -+ struct dxgkvmb_command_vgpu_to_host hdr; -+ enum d3dkmdt_standardallocationtype alloc_type; -+ u32 priv_driver_data_size; -+ u32 priv_driver_resource_size; -+ u32 physical_adapter_index; -+ union { -+ struct d3dkmdt_sharedprimarysurfacedata primary; -+ struct d3dkmdt_shadowsurfacedata shadow; -+ struct d3dkmdt_stagingsurfacedata staging; -+ struct d3dkmdt_gdisurfacedata gdi_surface; -+ }; -+}; -+ -+struct dxgkvmb_command_getstandardallocprivdata_return { -+ struct ntstatus status; -+ u32 priv_driver_data_size; -+ u32 priv_driver_resource_size; -+ union { -+ struct d3dkmdt_sharedprimarysurfacedata primary; -+ struct d3dkmdt_shadowsurfacedata shadow; -+ struct d3dkmdt_stagingsurfacedata staging; -+ struct d3dkmdt_gdisurfacedata gdi_surface; -+ }; -+/* char alloc_priv_data[priv_driver_data_size]; */ -+/* char resource_priv_data[priv_driver_resource_size]; */ -+}; -+ -+struct dxgkarg_describeallocation { -+ u64 allocation; -+ u32 width; -+ u32 height; -+ u32 format; -+ u32 multisample_method; -+ struct d3dddi_rational refresh_rate; -+ u32 private_driver_attribute; -+ u32 flags; -+ u32 rotation; -+}; -+ -+struct dxgkvmb_allocflags { -+ union { -+ u32 flags; -+ struct { -+ u32 primary:1; -+ u32 cdd_primary:1; -+ u32 dod_primary:1; -+ u32 overlay:1; -+ u32 reserved6:1; -+ u32 capture:1; -+ u32 reserved0:4; -+ u32 reserved1:1; -+ u32 existing_sysmem:1; -+ u32 stereo:1; -+ u32 direct_flip:1; -+ u32 hardware_protected:1; -+ u32 reserved2:1; -+ u32 reserved3:1; -+ u32 reserved4:1; -+ u32 protected:1; -+ u32 cached:1; -+ u32 independent_primary:1; -+ u32 reserved:11; -+ }; -+ }; -+}; -+ -+struct dxgkvmb_command_allocinfo_return { -+ struct d3dkmthandle allocation; -+ u32 priv_drv_data_size; -+ struct dxgkvmb_allocflags allocation_flags; -+ u64 allocation_size; -+ struct dxgkarg_describeallocation driver_info; -+}; -+ -+struct dxgkvmb_command_createallocation_return { -+ struct d3dkmt_createallocationflags flags; -+ struct d3dkmthandle resource; -+ struct d3dkmthandle global_share; -+ u32 vgpu_flags; -+ struct dxgkvmb_command_allocinfo_return allocation_info[1]; -+ /* Private driver data for allocations */ -+}; -+ -+/* The command returns ntstatus */ -+struct dxgkvmb_command_destroyallocation { -+ struct dxgkvmb_command_vgpu_to_host hdr; -+ struct d3dkmthandle device; -+ struct d3dkmthandle resource; -+ u32 alloc_count; -+ struct d3dddicb_destroyallocation2flags flags; -+ struct d3dkmthandle allocations[1]; -+}; -+ - struct dxgkvmb_command_createcontextvirtual { - struct dxgkvmb_command_vgpu_to_host hdr; - struct d3dkmthandle context; -diff --git a/drivers/hv/dxgkrnl/ioctl.c b/drivers/hv/dxgkrnl/ioctl.c -index 111111111111..222222222222 100644 ---- a/drivers/hv/dxgkrnl/ioctl.c -+++ b/drivers/hv/dxgkrnl/ioctl.c -@@ -714,6 +714,633 @@ dxgkio_destroy_context(struct dxgprocess *process, void *__user inargs) - return ret; - } - -+static int -+get_standard_alloc_priv_data(struct dxgdevice *device, -+ struct d3dkmt_createstandardallocation *alloc_info, -+ u32 *standard_alloc_priv_data_size, -+ void **standard_alloc_priv_data, -+ u32 *standard_res_priv_data_size, -+ void **standard_res_priv_data) -+{ -+ int ret; -+ struct d3dkmdt_gdisurfacedata gdi_data = { }; -+ u32 priv_data_size = 0; -+ u32 res_priv_data_size = 0; -+ void *priv_data = NULL; -+ void *res_priv_data = NULL; -+ -+ gdi_data.type = _D3DKMDT_GDISURFACE_TEXTURE_CROSSADAPTER; -+ gdi_data.width = alloc_info->existing_heap_data.size; -+ gdi_data.height = 1; -+ gdi_data.format = _D3DDDIFMT_UNKNOWN; -+ -+ *standard_alloc_priv_data_size = 0; -+ ret = dxgvmb_send_get_stdalloc_data(device, -+ _D3DKMDT_STANDARDALLOCATION_GDISURFACE, -+ &gdi_data, 0, -+ &priv_data_size, NULL, -+ &res_priv_data_size, -+ NULL); -+ if (ret < 0) -+ goto cleanup; -+ DXG_TRACE("Priv data size: %d", priv_data_size); -+ if (priv_data_size == 0) { -+ ret = -EINVAL; -+ goto cleanup; -+ } -+ priv_data = vzalloc(priv_data_size); -+ if (priv_data == NULL) { -+ ret = -ENOMEM; -+ DXG_ERR("failed to allocate memory for priv data: %d", -+ priv_data_size); -+ goto cleanup; -+ } -+ if (res_priv_data_size) { -+ res_priv_data = vzalloc(res_priv_data_size); -+ if (res_priv_data == NULL) { -+ ret = -ENOMEM; -+ dev_err(DXGDEV, -+ "failed to alloc memory for res priv data: %d", -+ res_priv_data_size); -+ goto cleanup; -+ } -+ } -+ ret = dxgvmb_send_get_stdalloc_data(device, -+ _D3DKMDT_STANDARDALLOCATION_GDISURFACE, -+ &gdi_data, 0, -+ &priv_data_size, -+ priv_data, -+ &res_priv_data_size, -+ res_priv_data); -+ if (ret < 0) -+ goto cleanup; -+ *standard_alloc_priv_data_size = priv_data_size; -+ *standard_alloc_priv_data = priv_data; -+ *standard_res_priv_data_size = res_priv_data_size; -+ *standard_res_priv_data = res_priv_data; -+ priv_data = NULL; -+ res_priv_data = NULL; -+ -+cleanup: -+ if (priv_data) -+ vfree(priv_data); -+ if (res_priv_data) -+ vfree(res_priv_data); -+ if (ret) -+ DXG_TRACE("err: %d", ret); -+ return ret; -+} -+ -+static int -+dxgkio_create_allocation(struct dxgprocess *process, void *__user inargs) -+{ -+ struct d3dkmt_createallocation args; -+ int ret; -+ struct dxgadapter *adapter = NULL; -+ struct dxgdevice *device = NULL; -+ struct d3dddi_allocationinfo2 *alloc_info = NULL; -+ struct d3dkmt_createstandardallocation standard_alloc; -+ u32 alloc_info_size = 0; -+ struct dxgresource *resource = NULL; -+ struct dxgallocation **dxgalloc = NULL; -+ bool resource_mutex_acquired = false; -+ u32 standard_alloc_priv_data_size = 0; -+ void *standard_alloc_priv_data = NULL; -+ u32 res_priv_data_size = 0; -+ void *res_priv_data = NULL; -+ int i; -+ -+ ret = copy_from_user(&args, inargs, sizeof(args)); -+ if (ret) { -+ DXG_ERR("failed to copy input args"); -+ ret = -EINVAL; -+ goto cleanup; -+ } -+ -+ if (args.alloc_count > D3DKMT_CREATEALLOCATION_MAX || -+ args.alloc_count == 0) { -+ DXG_ERR("invalid number of allocations to create"); -+ ret = -EINVAL; -+ goto cleanup; -+ } -+ -+ alloc_info_size = sizeof(struct d3dddi_allocationinfo2) * -+ args.alloc_count; -+ alloc_info = vzalloc(alloc_info_size); -+ if (alloc_info == NULL) { -+ ret = -ENOMEM; -+ goto cleanup; -+ } -+ ret = copy_from_user(alloc_info, args.allocation_info, -+ alloc_info_size); -+ if (ret) { -+ DXG_ERR("failed to copy alloc info"); -+ ret = -EINVAL; -+ goto cleanup; -+ } -+ -+ for (i = 0; i < args.alloc_count; i++) { -+ if (args.flags.standard_allocation) { -+ if (alloc_info[i].priv_drv_data_size != 0) { -+ DXG_ERR("private data size not zero"); -+ ret = -EINVAL; -+ goto cleanup; -+ } -+ } -+ if (alloc_info[i].priv_drv_data_size >= -+ DXG_MAX_VM_BUS_PACKET_SIZE) { -+ DXG_ERR("private data size too big: %d %d %ld", -+ i, alloc_info[i].priv_drv_data_size, -+ sizeof(alloc_info[0])); -+ ret = -EINVAL; -+ goto cleanup; -+ } -+ } -+ -+ if (args.flags.existing_section || args.flags.create_protected) { -+ DXG_ERR("invalid allocation flags"); -+ ret = -EINVAL; -+ goto cleanup; -+ } -+ -+ if (args.flags.standard_allocation) { -+ if (args.standard_allocation == NULL) { -+ DXG_ERR("invalid standard allocation"); -+ ret = -EINVAL; -+ goto cleanup; -+ } -+ ret = copy_from_user(&standard_alloc, -+ args.standard_allocation, -+ sizeof(standard_alloc)); -+ if (ret) { -+ DXG_ERR("failed to copy std alloc data"); -+ ret = -EINVAL; -+ goto cleanup; -+ } -+ if (standard_alloc.type == -+ _D3DKMT_STANDARDALLOCATIONTYPE_EXISTINGHEAP) { -+ if (alloc_info[0].sysmem == NULL || -+ (unsigned long)alloc_info[0].sysmem & -+ (PAGE_SIZE - 1)) { -+ DXG_ERR("invalid sysmem pointer"); -+ ret = STATUS_INVALID_PARAMETER; -+ goto cleanup; -+ } -+ if (!args.flags.existing_sysmem) { -+ DXG_ERR("expect existing_sysmem flag"); -+ ret = STATUS_INVALID_PARAMETER; -+ goto cleanup; -+ } -+ } else if (standard_alloc.type == -+ _D3DKMT_STANDARDALLOCATIONTYPE_CROSSADAPTER) { -+ if (args.flags.existing_sysmem) { -+ DXG_ERR("existing_sysmem flag invalid"); -+ ret = STATUS_INVALID_PARAMETER; -+ goto cleanup; -+ -+ } -+ if (alloc_info[0].sysmem != NULL) { -+ DXG_ERR("sysmem should be NULL"); -+ ret = STATUS_INVALID_PARAMETER; -+ goto cleanup; -+ } -+ } else { -+ DXG_ERR("invalid standard allocation type"); -+ ret = STATUS_INVALID_PARAMETER; -+ goto cleanup; -+ } -+ -+ if (args.priv_drv_data_size != 0 || -+ args.alloc_count != 1 || -+ standard_alloc.existing_heap_data.size == 0 || -+ standard_alloc.existing_heap_data.size & (PAGE_SIZE - 1)) { -+ DXG_ERR("invalid standard allocation"); -+ ret = -EINVAL; -+ goto cleanup; -+ } -+ args.priv_drv_data_size = -+ sizeof(struct d3dkmt_createstandardallocation); -+ } -+ -+ if (args.flags.create_shared && !args.flags.create_resource) { -+ DXG_ERR("create_resource must be set for create_shared"); -+ ret = -EINVAL; -+ goto cleanup; -+ } -+ -+ /* -+ * The call acquires reference on the device. It is safe to access the -+ * adapter, because the device holds reference on it. -+ */ -+ device = dxgprocess_device_by_handle(process, args.device); -+ if (device == NULL) { -+ ret = -EINVAL; -+ goto cleanup; -+ } -+ -+ ret = dxgdevice_acquire_lock_shared(device); -+ if (ret < 0) { -+ kref_put(&device->device_kref, dxgdevice_release); -+ device = NULL; -+ goto cleanup; -+ } -+ -+ adapter = device->adapter; -+ ret = dxgadapter_acquire_lock_shared(adapter); -+ if (ret < 0) { -+ adapter = NULL; -+ goto cleanup; -+ } -+ -+ if (args.flags.standard_allocation) { -+ ret = get_standard_alloc_priv_data(device, -+ &standard_alloc, -+ &standard_alloc_priv_data_size, -+ &standard_alloc_priv_data, -+ &res_priv_data_size, -+ &res_priv_data); -+ if (ret < 0) -+ goto cleanup; -+ DXG_TRACE("Alloc private data: %d", -+ standard_alloc_priv_data_size); -+ } -+ -+ if (args.flags.create_resource) { -+ resource = dxgresource_create(device); -+ if (resource == NULL) { -+ ret = -ENOMEM; -+ goto cleanup; -+ } -+ resource->private_runtime_handle = -+ args.private_runtime_resource_handle; -+ } else { -+ if (args.resource.v) { -+ /* Adding new allocations to the given resource */ -+ -+ dxgprocess_ht_lock_shared_down(process); -+ resource = hmgrtable_get_object_by_type( -+ &process->handle_table, -+ HMGRENTRY_TYPE_DXGRESOURCE, -+ args.resource); -+ kref_get(&resource->resource_kref); -+ dxgprocess_ht_lock_shared_up(process); -+ -+ if (resource == NULL || resource->device != device) { -+ DXG_ERR("invalid resource handle %x", -+ args.resource.v); -+ ret = -EINVAL; -+ goto cleanup; -+ } -+ /* Synchronize with resource destruction */ -+ mutex_lock(&resource->resource_mutex); -+ if (!dxgresource_is_active(resource)) { -+ mutex_unlock(&resource->resource_mutex); -+ ret = -EINVAL; -+ goto cleanup; -+ } -+ resource_mutex_acquired = true; -+ } -+ } -+ -+ dxgalloc = vzalloc(sizeof(struct dxgallocation *) * args.alloc_count); -+ if (dxgalloc == NULL) { -+ ret = -ENOMEM; -+ goto cleanup; -+ } -+ -+ for (i = 0; i < args.alloc_count; i++) { -+ struct dxgallocation *alloc; -+ u32 priv_data_size; -+ -+ if (args.flags.standard_allocation) -+ priv_data_size = standard_alloc_priv_data_size; -+ else -+ priv_data_size = alloc_info[i].priv_drv_data_size; -+ -+ if (alloc_info[i].sysmem && !args.flags.standard_allocation) { -+ if ((unsigned long) -+ alloc_info[i].sysmem & (PAGE_SIZE - 1)) { -+ DXG_ERR("invalid sysmem alloc %d, %p", -+ i, alloc_info[i].sysmem); -+ ret = -EINVAL; -+ goto cleanup; -+ } -+ } -+ if ((alloc_info[0].sysmem == NULL) != -+ (alloc_info[i].sysmem == NULL)) { -+ DXG_ERR("All allocs must have sysmem pointer"); -+ ret = -EINVAL; -+ goto cleanup; -+ } -+ -+ dxgalloc[i] = dxgallocation_create(process); -+ if (dxgalloc[i] == NULL) { -+ ret = -ENOMEM; -+ goto cleanup; -+ } -+ alloc = dxgalloc[i]; -+ -+ if (resource) { -+ ret = dxgresource_add_alloc(resource, alloc); -+ if (ret < 0) -+ goto cleanup; -+ } else { -+ dxgdevice_add_alloc(device, alloc); -+ } -+ if (args.flags.create_shared) { -+ /* Remember alloc private data to use it during open */ -+ alloc->priv_drv_data = vzalloc(priv_data_size + -+ offsetof(struct privdata, data)); -+ if (alloc->priv_drv_data == NULL) { -+ ret = -ENOMEM; -+ goto cleanup; -+ } -+ if (args.flags.standard_allocation) { -+ memcpy(alloc->priv_drv_data->data, -+ standard_alloc_priv_data, -+ priv_data_size); -+ } else { -+ ret = copy_from_user( -+ alloc->priv_drv_data->data, -+ alloc_info[i].priv_drv_data, -+ priv_data_size); -+ if (ret) { -+ dev_err(DXGDEV, -+ "failed to copy priv data"); -+ ret = -EFAULT; -+ goto cleanup; -+ } -+ } -+ alloc->priv_drv_data->data_size = priv_data_size; -+ } -+ } -+ -+ ret = dxgvmb_send_create_allocation(process, device, &args, inargs, -+ resource, dxgalloc, alloc_info, -+ &standard_alloc); -+cleanup: -+ -+ if (resource_mutex_acquired) { -+ mutex_unlock(&resource->resource_mutex); -+ kref_put(&resource->resource_kref, dxgresource_release); -+ } -+ if (ret < 0) { -+ if (dxgalloc) { -+ for (i = 0; i < args.alloc_count; i++) { -+ if (dxgalloc[i]) -+ dxgallocation_destroy(dxgalloc[i]); -+ } -+ } -+ if (resource && args.flags.create_resource) { -+ dxgresource_destroy(resource); -+ } -+ } -+ if (dxgalloc) -+ vfree(dxgalloc); -+ if (standard_alloc_priv_data) -+ vfree(standard_alloc_priv_data); -+ if (res_priv_data) -+ vfree(res_priv_data); -+ if (alloc_info) -+ vfree(alloc_info); -+ -+ if (adapter) -+ dxgadapter_release_lock_shared(adapter); -+ -+ if (device) { -+ dxgdevice_release_lock_shared(device); -+ kref_put(&device->device_kref, dxgdevice_release); -+ } -+ -+ DXG_TRACE("ioctl:%s %d", errorstr(ret), ret); -+ return ret; -+} -+ -+static int validate_alloc(struct dxgallocation *alloc0, -+ struct dxgallocation *alloc, -+ struct dxgdevice *device, -+ struct d3dkmthandle alloc_handle) -+{ -+ u32 fail_reason; -+ -+ if (alloc == NULL) { -+ fail_reason = 1; -+ goto cleanup; -+ } -+ if (alloc->resource_owner != alloc0->resource_owner) { -+ fail_reason = 2; -+ goto cleanup; -+ } -+ if (alloc->resource_owner) { -+ if (alloc->owner.resource != alloc0->owner.resource) { -+ fail_reason = 3; -+ goto cleanup; -+ } -+ if (alloc->owner.resource->device != device) { -+ fail_reason = 4; -+ goto cleanup; -+ } -+ } else { -+ if (alloc->owner.device != device) { -+ fail_reason = 6; -+ goto cleanup; -+ } -+ } -+ return 0; -+cleanup: -+ DXG_ERR("Alloc validation failed: reason: %d %x", -+ fail_reason, alloc_handle.v); -+ return -EINVAL; -+} -+ -+static int -+dxgkio_destroy_allocation(struct dxgprocess *process, void *__user inargs) -+{ -+ struct d3dkmt_destroyallocation2 args; -+ struct dxgdevice *device = NULL; -+ struct dxgadapter *adapter = NULL; -+ int ret; -+ struct d3dkmthandle *alloc_handles = NULL; -+ struct dxgallocation **allocs = NULL; -+ struct dxgresource *resource = NULL; -+ int i; -+ -+ ret = copy_from_user(&args, inargs, sizeof(args)); -+ if (ret) { -+ DXG_ERR("failed to copy input args"); -+ ret = -EINVAL; -+ goto cleanup; -+ } -+ -+ if (args.alloc_count > D3DKMT_CREATEALLOCATION_MAX || -+ ((args.alloc_count == 0) == (args.resource.v == 0))) { -+ DXG_ERR("invalid number of allocations"); -+ ret = -EINVAL; -+ goto cleanup; -+ } -+ -+ if (args.alloc_count) { -+ u32 handle_size = sizeof(struct d3dkmthandle) * -+ args.alloc_count; -+ -+ alloc_handles = vzalloc(handle_size); -+ if (alloc_handles == NULL) { -+ ret = -ENOMEM; -+ goto cleanup; -+ } -+ allocs = vzalloc(sizeof(struct dxgallocation *) * -+ args.alloc_count); -+ if (allocs == NULL) { -+ ret = -ENOMEM; -+ goto cleanup; -+ } -+ ret = copy_from_user(alloc_handles, args.allocations, -+ handle_size); -+ if (ret) { -+ DXG_ERR("failed to copy alloc handles"); -+ ret = -EINVAL; -+ goto cleanup; -+ } -+ } -+ -+ /* -+ * The call acquires reference on the device. It is safe to access the -+ * adapter, because the device holds reference on it. -+ */ -+ device = dxgprocess_device_by_handle(process, args.device); -+ if (device == NULL) { -+ ret = -EINVAL; -+ goto cleanup; -+ } -+ -+ /* Acquire the device lock to synchronize with the device destriction */ -+ ret = dxgdevice_acquire_lock_shared(device); -+ if (ret < 0) { -+ kref_put(&device->device_kref, dxgdevice_release); -+ device = NULL; -+ goto cleanup; -+ } -+ -+ adapter = device->adapter; -+ ret = dxgadapter_acquire_lock_shared(adapter); -+ if (ret < 0) { -+ adapter = NULL; -+ goto cleanup; -+ } -+ -+ /* -+ * Destroy the local allocation handles first. If the host handle -+ * is destroyed first, another object could be assigned to the process -+ * table at the same place as the allocation handle and it will fail. -+ */ -+ if (args.alloc_count) { -+ dxgprocess_ht_lock_exclusive_down(process); -+ for (i = 0; i < args.alloc_count; i++) { -+ allocs[i] = -+ hmgrtable_get_object_by_type(&process->handle_table, -+ HMGRENTRY_TYPE_DXGALLOCATION, -+ alloc_handles[i]); -+ ret = -+ validate_alloc(allocs[0], allocs[i], device, -+ alloc_handles[i]); -+ if (ret < 0) { -+ dxgprocess_ht_lock_exclusive_up(process); -+ goto cleanup; -+ } -+ } -+ dxgprocess_ht_lock_exclusive_up(process); -+ for (i = 0; i < args.alloc_count; i++) -+ dxgallocation_free_handle(allocs[i]); -+ } else { -+ struct dxgallocation *alloc; -+ -+ dxgprocess_ht_lock_exclusive_down(process); -+ resource = hmgrtable_get_object_by_type(&process->handle_table, -+ HMGRENTRY_TYPE_DXGRESOURCE, -+ args.resource); -+ if (resource == NULL) { -+ DXG_ERR("Invalid resource handle: %x", -+ args.resource.v); -+ ret = -EINVAL; -+ } else if (resource->device != device) { -+ DXG_ERR("Resource belongs to wrong device: %x", -+ args.resource.v); -+ ret = -EINVAL; -+ } else { -+ hmgrtable_free_handle(&process->handle_table, -+ HMGRENTRY_TYPE_DXGRESOURCE, -+ args.resource); -+ resource->object_state = DXGOBJECTSTATE_DESTROYED; -+ resource->handle.v = 0; -+ resource->handle_valid = 0; -+ } -+ dxgprocess_ht_lock_exclusive_up(process); -+ -+ if (ret < 0) -+ goto cleanup; -+ -+ dxgdevice_acquire_alloc_list_lock_shared(device); -+ list_for_each_entry(alloc, &resource->alloc_list_head, -+ alloc_list_entry) { -+ dxgallocation_free_handle(alloc); -+ } -+ dxgdevice_release_alloc_list_lock_shared(device); -+ } -+ -+ if (args.alloc_count && allocs[0]->resource_owner) -+ resource = allocs[0]->owner.resource; -+ -+ if (resource) { -+ kref_get(&resource->resource_kref); -+ mutex_lock(&resource->resource_mutex); -+ } -+ -+ ret = dxgvmb_send_destroy_allocation(process, device, &args, -+ alloc_handles); -+ -+ /* -+ * Destroy the allocations after the host destroyed it. -+ * The allocation gpadl teardown will wait until the host unmaps its -+ * gpadl. -+ */ -+ dxgdevice_acquire_alloc_list_lock(device); -+ if (args.alloc_count) { -+ for (i = 0; i < args.alloc_count; i++) { -+ if (allocs[i]) { -+ allocs[i]->alloc_handle.v = 0; -+ dxgallocation_destroy(allocs[i]); -+ } -+ } -+ } else { -+ dxgresource_destroy(resource); -+ } -+ dxgdevice_release_alloc_list_lock(device); -+ -+ if (resource) { -+ mutex_unlock(&resource->resource_mutex); -+ kref_put(&resource->resource_kref, dxgresource_release); -+ } -+ -+cleanup: -+ -+ if (adapter) -+ dxgadapter_release_lock_shared(adapter); -+ -+ if (device) { -+ dxgdevice_release_lock_shared(device); -+ kref_put(&device->device_kref, dxgdevice_release); -+ } -+ -+ if (alloc_handles) -+ vfree(alloc_handles); -+ -+ if (allocs) -+ vfree(allocs); -+ -+ DXG_TRACE("ioctl:%s %d", errorstr(ret), ret); -+ return ret; -+} -+ - static struct ioctl_desc ioctls[] = { - /* 0x00 */ {}, - /* 0x01 */ {dxgkio_open_adapter_from_luid, LX_DXOPENADAPTERFROMLUID}, -@@ -721,7 +1348,7 @@ static struct ioctl_desc ioctls[] = { - /* 0x03 */ {}, - /* 0x04 */ {dxgkio_create_context_virtual, LX_DXCREATECONTEXTVIRTUAL}, - /* 0x05 */ {dxgkio_destroy_context, LX_DXDESTROYCONTEXT}, --/* 0x06 */ {}, -+/* 0x06 */ {dxgkio_create_allocation, LX_DXCREATEALLOCATION}, - /* 0x07 */ {}, - /* 0x08 */ {}, - /* 0x09 */ {dxgkio_query_adapter_info, LX_DXQUERYADAPTERINFO}, -@@ -734,7 +1361,7 @@ static struct ioctl_desc ioctls[] = { - /* 0x10 */ {}, - /* 0x11 */ {}, - /* 0x12 */ {}, --/* 0x13 */ {}, -+/* 0x13 */ {dxgkio_destroy_allocation, LX_DXDESTROYALLOCATION2}, - /* 0x14 */ {dxgkio_enum_adapters, LX_DXENUMADAPTERS2}, - /* 0x15 */ {dxgkio_close_adapter, LX_DXCLOSEADAPTER}, - /* 0x16 */ {}, -diff --git a/drivers/hv/dxgkrnl/misc.h b/drivers/hv/dxgkrnl/misc.h -index 111111111111..222222222222 100644 ---- a/drivers/hv/dxgkrnl/misc.h -+++ b/drivers/hv/dxgkrnl/misc.h -@@ -30,6 +30,9 @@ extern const struct d3dkmthandle zerohandle; - * plistmutex (process list mutex) - * table_lock (handle table lock) - * context_list_lock -+ * alloc_list_lock -+ * resource_mutex -+ * shared_resource_list_lock - * core_lock (dxgadapter lock) - * device_lock (dxgdevice lock) - * process_adapter_mutex -diff --git a/include/uapi/misc/d3dkmthk.h b/include/uapi/misc/d3dkmthk.h -index 111111111111..222222222222 100644 ---- a/include/uapi/misc/d3dkmthk.h -+++ b/include/uapi/misc/d3dkmthk.h -@@ -58,6 +58,7 @@ struct winluid { - __u32 b; - }; - -+#define D3DKMT_CREATEALLOCATION_MAX 1024 - #define D3DKMT_ADAPTERS_MAX 64 - - struct d3dkmt_adapterinfo { -@@ -197,6 +198,205 @@ struct d3dkmt_createcontextvirtual { - struct d3dkmthandle context; - }; - -+enum d3dkmdt_gdisurfacetype { -+ _D3DKMDT_GDISURFACE_INVALID = 0, -+ _D3DKMDT_GDISURFACE_TEXTURE = 1, -+ _D3DKMDT_GDISURFACE_STAGING_CPUVISIBLE = 2, -+ _D3DKMDT_GDISURFACE_STAGING = 3, -+ _D3DKMDT_GDISURFACE_LOOKUPTABLE = 4, -+ _D3DKMDT_GDISURFACE_EXISTINGSYSMEM = 5, -+ _D3DKMDT_GDISURFACE_TEXTURE_CPUVISIBLE = 6, -+ _D3DKMDT_GDISURFACE_TEXTURE_CROSSADAPTER = 7, -+ _D3DKMDT_GDISURFACE_TEXTURE_CPUVISIBLE_CROSSADAPTER = 8, -+}; -+ -+struct d3dddi_rational { -+ __u32 numerator; -+ __u32 denominator; -+}; -+ -+enum d3dddiformat { -+ _D3DDDIFMT_UNKNOWN = 0, -+}; -+ -+struct d3dkmdt_gdisurfacedata { -+ __u32 width; -+ __u32 height; -+ __u32 format; -+ enum d3dkmdt_gdisurfacetype type; -+ __u32 flags; -+ __u32 pitch; -+}; -+ -+struct d3dkmdt_stagingsurfacedata { -+ __u32 width; -+ __u32 height; -+ __u32 pitch; -+}; -+ -+struct d3dkmdt_sharedprimarysurfacedata { -+ __u32 width; -+ __u32 height; -+ enum d3dddiformat format; -+ struct d3dddi_rational refresh_rate; -+ __u32 vidpn_source_id; -+}; -+ -+struct d3dkmdt_shadowsurfacedata { -+ __u32 width; -+ __u32 height; -+ enum d3dddiformat format; -+ __u32 pitch; -+}; -+ -+enum d3dkmdt_standardallocationtype { -+ _D3DKMDT_STANDARDALLOCATION_SHAREDPRIMARYSURFACE = 1, -+ _D3DKMDT_STANDARDALLOCATION_SHADOWSURFACE = 2, -+ _D3DKMDT_STANDARDALLOCATION_STAGINGSURFACE = 3, -+ _D3DKMDT_STANDARDALLOCATION_GDISURFACE = 4, -+}; -+ -+enum d3dkmt_standardallocationtype { -+ _D3DKMT_STANDARDALLOCATIONTYPE_EXISTINGHEAP = 1, -+ _D3DKMT_STANDARDALLOCATIONTYPE_CROSSADAPTER = 2, -+}; -+ -+struct d3dkmt_standardallocation_existingheap { -+ __u64 size; -+}; -+ -+struct d3dkmt_createstandardallocationflags { -+ union { -+ struct { -+ __u32 reserved:32; -+ }; -+ __u32 value; -+ }; -+}; -+ -+struct d3dkmt_createstandardallocation { -+ enum d3dkmt_standardallocationtype type; -+ __u32 reserved; -+ struct d3dkmt_standardallocation_existingheap existing_heap_data; -+ struct d3dkmt_createstandardallocationflags flags; -+ __u32 reserved1; -+}; -+ -+struct d3dddi_allocationinfo2 { -+ struct d3dkmthandle allocation; -+#ifdef __KERNEL__ -+ const void *sysmem; -+#else -+ __u64 sysmem; -+#endif -+#ifdef __KERNEL__ -+ void *priv_drv_data; -+#else -+ __u64 priv_drv_data; -+#endif -+ __u32 priv_drv_data_size; -+ __u32 vidpn_source_id; -+ union { -+ struct { -+ __u32 primary:1; -+ __u32 stereo:1; -+ __u32 override_priority:1; -+ __u32 reserved:29; -+ }; -+ __u32 value; -+ } flags; -+ __u64 gpu_virtual_address; -+ union { -+ __u32 priority; -+ __u64 unused; -+ }; -+ __u64 reserved[5]; -+}; -+ -+struct d3dkmt_createallocationflags { -+ union { -+ struct { -+ __u32 create_resource:1; -+ __u32 create_shared:1; -+ __u32 non_secure:1; -+ __u32 create_protected:1; -+ __u32 restrict_shared_access:1; -+ __u32 existing_sysmem:1; -+ __u32 nt_security_sharing:1; -+ __u32 read_only:1; -+ __u32 create_write_combined:1; -+ __u32 create_cached:1; -+ __u32 swap_chain_back_buffer:1; -+ __u32 cross_adapter:1; -+ __u32 open_cross_adapter:1; -+ __u32 partial_shared_creation:1; -+ __u32 zeroed:1; -+ __u32 write_watch:1; -+ __u32 standard_allocation:1; -+ __u32 existing_section:1; -+ __u32 reserved:14; -+ }; -+ __u32 value; -+ }; -+}; -+ -+struct d3dkmt_createallocation { -+ struct d3dkmthandle device; -+ struct d3dkmthandle resource; -+ struct d3dkmthandle global_share; -+ __u32 reserved; -+#ifdef __KERNEL__ -+ const void *private_runtime_data; -+#else -+ __u64 private_runtime_data; -+#endif -+ __u32 private_runtime_data_size; -+ __u32 reserved1; -+ union { -+#ifdef __KERNEL__ -+ struct d3dkmt_createstandardallocation *standard_allocation; -+ const void *priv_drv_data; -+#else -+ __u64 standard_allocation; -+ __u64 priv_drv_data; -+#endif -+ }; -+ __u32 priv_drv_data_size; -+ __u32 alloc_count; -+#ifdef __KERNEL__ -+ struct d3dddi_allocationinfo2 *allocation_info; -+#else -+ __u64 allocation_info; -+#endif -+ struct d3dkmt_createallocationflags flags; -+ __u32 reserved2; -+ __u64 private_runtime_resource_handle; -+}; -+ -+struct d3dddicb_destroyallocation2flags { -+ union { -+ struct { -+ __u32 assume_not_in_use:1; -+ __u32 synchronous_destroy:1; -+ __u32 reserved:29; -+ __u32 system_use_only:1; -+ }; -+ __u32 value; -+ }; -+}; -+ -+struct d3dkmt_destroyallocation2 { -+ struct d3dkmthandle device; -+ struct d3dkmthandle resource; -+#ifdef __KERNEL__ -+ const struct d3dkmthandle *allocations; -+#else -+ __u64 allocations; -+#endif -+ __u32 alloc_count; -+ struct d3dddicb_destroyallocation2flags flags; -+}; -+ - struct d3dkmt_adaptertype { - union { - struct { -@@ -279,8 +479,12 @@ struct d3dkmt_enumadapters3 { - _IOWR(0x47, 0x04, struct d3dkmt_createcontextvirtual) - #define LX_DXDESTROYCONTEXT \ - _IOWR(0x47, 0x05, struct d3dkmt_destroycontext) -+#define LX_DXCREATEALLOCATION \ -+ _IOWR(0x47, 0x06, struct d3dkmt_createallocation) - #define LX_DXQUERYADAPTERINFO \ - _IOWR(0x47, 0x09, struct d3dkmt_queryadapterinfo) -+#define LX_DXDESTROYALLOCATION2 \ -+ _IOWR(0x47, 0x13, struct d3dkmt_destroyallocation2) - #define LX_DXENUMADAPTERS2 \ - _IOWR(0x47, 0x14, struct d3dkmt_enumadapters2) - #define LX_DXCLOSEADAPTER \ --- -Armbian - diff --git a/patch/kernel/archive/wsl2-arm64-6.1/1677-drivers-hv-dxgkrnl-Creation-of-compute-device-sync-objects.patch b/patch/kernel/archive/wsl2-arm64-6.1/1677-drivers-hv-dxgkrnl-Creation-of-compute-device-sync-objects.patch deleted file mode 100644 index b53d55f96842..000000000000 --- a/patch/kernel/archive/wsl2-arm64-6.1/1677-drivers-hv-dxgkrnl-Creation-of-compute-device-sync-objects.patch +++ /dev/null @@ -1,1016 +0,0 @@ -From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001 -From: Iouri Tarassov -Date: Tue, 1 Feb 2022 14:38:32 -0800 -Subject: drivers: hv: dxgkrnl: Creation of compute device sync objects - -Implement ioctls to create and destroy compute devicesync objects: - - the LX_DXCREATESYNCHRONIZATIONOBJECT ioctl, - - the LX_DXDESTROYSYNCHRONIZATIONOBJECT ioctl. - -Compute device synchronization objects are used to synchronize -execution of compute device commands, which are queued to -different execution contexts (dxgcontext objects). - -There are several types of sync objects (mutex, monitored -fence, CPU event, fence). A "signal" or a "wait" operation -could be queued to an execution context. - -Monitored fence sync objects are particular important. -A monitored fence object has a fence value, which could be -monitored by the compute device or by CPU. Therefore, a CPU -virtual address is allocated during object creation to allow -an application to read the fence value. dxg_map_iospace and -dxg_unmap_iospace implement creation of the CPU virtual address. -This is done as follow: -- The host allocates a portion of the guest IO space, which is mapped - to the actual fence value memory on the host -- The host returns the guest IO space address to the guest -- The guest allocates a CPU virtual address and updates page tables - to point to the IO space address - -Signed-off-by: Iouri Tarassov -[kms: Forward port to v6.1] -Signed-off-by: Kelsey Steele ---- - drivers/hv/dxgkrnl/dxgadapter.c | 184 +++++++++ - drivers/hv/dxgkrnl/dxgkrnl.h | 80 ++++ - drivers/hv/dxgkrnl/dxgmodule.c | 1 + - drivers/hv/dxgkrnl/dxgprocess.c | 16 + - drivers/hv/dxgkrnl/dxgvmbus.c | 205 ++++++++++ - drivers/hv/dxgkrnl/dxgvmbus.h | 20 + - drivers/hv/dxgkrnl/ioctl.c | 130 +++++- - include/uapi/misc/d3dkmthk.h | 95 +++++ - 8 files changed, 729 insertions(+), 2 deletions(-) - -diff --git a/drivers/hv/dxgkrnl/dxgadapter.c b/drivers/hv/dxgkrnl/dxgadapter.c -index 111111111111..222222222222 100644 ---- a/drivers/hv/dxgkrnl/dxgadapter.c -+++ b/drivers/hv/dxgkrnl/dxgadapter.c -@@ -160,6 +160,24 @@ void dxgadapter_remove_process(struct dxgprocess_adapter *process_info) - list_del(&process_info->adapter_process_list_entry); - } - -+void dxgadapter_add_syncobj(struct dxgadapter *adapter, -+ struct dxgsyncobject *object) -+{ -+ down_write(&adapter->shared_resource_list_lock); -+ list_add_tail(&object->syncobj_list_entry, &adapter->syncobj_list_head); -+ up_write(&adapter->shared_resource_list_lock); -+} -+ -+void dxgadapter_remove_syncobj(struct dxgsyncobject *object) -+{ -+ down_write(&object->adapter->shared_resource_list_lock); -+ if (object->syncobj_list_entry.next) { -+ list_del(&object->syncobj_list_entry); -+ object->syncobj_list_entry.next = NULL; -+ } -+ up_write(&object->adapter->shared_resource_list_lock); -+} -+ - int dxgadapter_acquire_lock_exclusive(struct dxgadapter *adapter) - { - down_write(&adapter->core_lock); -@@ -213,6 +231,7 @@ struct dxgdevice *dxgdevice_create(struct dxgadapter *adapter, - init_rwsem(&device->context_list_lock); - init_rwsem(&device->alloc_list_lock); - INIT_LIST_HEAD(&device->pqueue_list_head); -+ INIT_LIST_HEAD(&device->syncobj_list_head); - device->object_state = DXGOBJECTSTATE_CREATED; - device->execution_state = _D3DKMT_DEVICEEXECUTION_ACTIVE; - -@@ -228,6 +247,7 @@ struct dxgdevice *dxgdevice_create(struct dxgadapter *adapter, - void dxgdevice_stop(struct dxgdevice *device) - { - struct dxgallocation *alloc; -+ struct dxgsyncobject *syncobj; - - DXG_TRACE("Destroying device: %p", device); - dxgdevice_acquire_alloc_list_lock(device); -@@ -235,6 +255,14 @@ void dxgdevice_stop(struct dxgdevice *device) - dxgallocation_stop(alloc); - } - dxgdevice_release_alloc_list_lock(device); -+ -+ hmgrtable_lock(&device->process->handle_table, DXGLOCK_EXCL); -+ list_for_each_entry(syncobj, &device->syncobj_list_head, -+ syncobj_list_entry) { -+ dxgsyncobject_stop(syncobj); -+ } -+ hmgrtable_unlock(&device->process->handle_table, DXGLOCK_EXCL); -+ DXG_TRACE("Device stopped: %p", device); - } - - void dxgdevice_mark_destroyed(struct dxgdevice *device) -@@ -263,6 +291,20 @@ void dxgdevice_destroy(struct dxgdevice *device) - - dxgdevice_acquire_alloc_list_lock(device); - -+ while (!list_empty(&device->syncobj_list_head)) { -+ struct dxgsyncobject *syncobj = -+ list_first_entry(&device->syncobj_list_head, -+ struct dxgsyncobject, -+ syncobj_list_entry); -+ list_del(&syncobj->syncobj_list_entry); -+ syncobj->syncobj_list_entry.next = NULL; -+ dxgdevice_release_alloc_list_lock(device); -+ -+ dxgsyncobject_destroy(process, syncobj); -+ -+ dxgdevice_acquire_alloc_list_lock(device); -+ } -+ - { - struct dxgallocation *alloc; - struct dxgallocation *tmp; -@@ -565,6 +607,30 @@ void dxgdevice_release(struct kref *refcount) - kfree(device); - } - -+void dxgdevice_add_syncobj(struct dxgdevice *device, -+ struct dxgsyncobject *syncobj) -+{ -+ dxgdevice_acquire_alloc_list_lock(device); -+ list_add_tail(&syncobj->syncobj_list_entry, &device->syncobj_list_head); -+ kref_get(&syncobj->syncobj_kref); -+ dxgdevice_release_alloc_list_lock(device); -+} -+ -+void dxgdevice_remove_syncobj(struct dxgsyncobject *entry) -+{ -+ struct dxgdevice *device = entry->device; -+ -+ dxgdevice_acquire_alloc_list_lock(device); -+ if (entry->syncobj_list_entry.next) { -+ list_del(&entry->syncobj_list_entry); -+ entry->syncobj_list_entry.next = NULL; -+ kref_put(&entry->syncobj_kref, dxgsyncobject_release); -+ } -+ dxgdevice_release_alloc_list_lock(device); -+ kref_put(&device->device_kref, dxgdevice_release); -+ entry->device = NULL; -+} -+ - struct dxgcontext *dxgcontext_create(struct dxgdevice *device) - { - struct dxgcontext *context; -@@ -812,3 +878,121 @@ void dxgprocess_adapter_remove_device(struct dxgdevice *device) - } - mutex_unlock(&device->adapter_info->device_list_mutex); - } -+ -+struct dxgsyncobject *dxgsyncobject_create(struct dxgprocess *process, -+ struct dxgdevice *device, -+ struct dxgadapter *adapter, -+ enum -+ d3dddi_synchronizationobject_type -+ type, -+ struct -+ d3dddi_synchronizationobject_flags -+ flags) -+{ -+ struct dxgsyncobject *syncobj; -+ -+ syncobj = kzalloc(sizeof(*syncobj), GFP_KERNEL); -+ if (syncobj == NULL) -+ goto cleanup; -+ syncobj->type = type; -+ syncobj->process = process; -+ switch (type) { -+ case _D3DDDI_MONITORED_FENCE: -+ case _D3DDDI_PERIODIC_MONITORED_FENCE: -+ syncobj->monitored_fence = 1; -+ break; -+ default: -+ break; -+ } -+ if (flags.shared) { -+ syncobj->shared = 1; -+ if (!flags.nt_security_sharing) { -+ DXG_ERR("nt_security_sharing must be set"); -+ goto cleanup; -+ } -+ } -+ -+ kref_init(&syncobj->syncobj_kref); -+ -+ if (syncobj->monitored_fence) { -+ syncobj->device = device; -+ syncobj->device_handle = device->handle; -+ kref_get(&device->device_kref); -+ dxgdevice_add_syncobj(device, syncobj); -+ } else { -+ dxgadapter_add_syncobj(adapter, syncobj); -+ } -+ syncobj->adapter = adapter; -+ kref_get(&adapter->adapter_kref); -+ -+ DXG_TRACE("Syncobj created: %p", syncobj); -+ return syncobj; -+cleanup: -+ if (syncobj) -+ kfree(syncobj); -+ return NULL; -+} -+ -+void dxgsyncobject_destroy(struct dxgprocess *process, -+ struct dxgsyncobject *syncobj) -+{ -+ int destroyed; -+ -+ DXG_TRACE("Destroying syncobj: %p", syncobj); -+ -+ dxgsyncobject_stop(syncobj); -+ -+ destroyed = test_and_set_bit(0, &syncobj->flags); -+ if (!destroyed) { -+ DXG_TRACE("Deleting handle: %x", syncobj->handle.v); -+ hmgrtable_lock(&process->handle_table, DXGLOCK_EXCL); -+ if (syncobj->handle.v) { -+ hmgrtable_free_handle(&process->handle_table, -+ HMGRENTRY_TYPE_DXGSYNCOBJECT, -+ syncobj->handle); -+ syncobj->handle.v = 0; -+ kref_put(&syncobj->syncobj_kref, dxgsyncobject_release); -+ } -+ hmgrtable_unlock(&process->handle_table, DXGLOCK_EXCL); -+ -+ if (syncobj->monitored_fence) -+ dxgdevice_remove_syncobj(syncobj); -+ else -+ dxgadapter_remove_syncobj(syncobj); -+ if (syncobj->adapter) { -+ kref_put(&syncobj->adapter->adapter_kref, -+ dxgadapter_release); -+ syncobj->adapter = NULL; -+ } -+ } -+ kref_put(&syncobj->syncobj_kref, dxgsyncobject_release); -+} -+ -+void dxgsyncobject_stop(struct dxgsyncobject *syncobj) -+{ -+ int stopped = test_and_set_bit(1, &syncobj->flags); -+ -+ if (!stopped) { -+ DXG_TRACE("Stopping syncobj"); -+ if (syncobj->monitored_fence) { -+ if (syncobj->mapped_address) { -+ int ret = -+ dxg_unmap_iospace(syncobj->mapped_address, -+ PAGE_SIZE); -+ -+ (void)ret; -+ DXG_TRACE("unmap fence %d %p", -+ ret, syncobj->mapped_address); -+ syncobj->mapped_address = NULL; -+ } -+ } -+ } -+} -+ -+void dxgsyncobject_release(struct kref *refcount) -+{ -+ struct dxgsyncobject *syncobj; -+ -+ syncobj = container_of(refcount, struct dxgsyncobject, syncobj_kref); -+ kfree(syncobj); -+} -diff --git a/drivers/hv/dxgkrnl/dxgkrnl.h b/drivers/hv/dxgkrnl/dxgkrnl.h -index 111111111111..222222222222 100644 ---- a/drivers/hv/dxgkrnl/dxgkrnl.h -+++ b/drivers/hv/dxgkrnl/dxgkrnl.h -@@ -38,6 +38,7 @@ struct dxgdevice; - struct dxgcontext; - struct dxgallocation; - struct dxgresource; -+struct dxgsyncobject; - - /* - * Driver private data. -@@ -100,6 +101,56 @@ int dxgvmbuschannel_init(struct dxgvmbuschannel *ch, struct hv_device *hdev); - void dxgvmbuschannel_destroy(struct dxgvmbuschannel *ch); - void dxgvmbuschannel_receive(void *ctx); - -+/* -+ * This is GPU synchronization object, which is used to synchronize execution -+ * between GPU contextx/hardware queues or for tracking GPU execution progress. -+ * A dxgsyncobject is created when somebody creates a syncobject or opens a -+ * shared syncobject. -+ * A syncobject belongs to an adapter, unless it is a cross-adapter object. -+ * Cross adapter syncobjects are currently not implemented. -+ * -+ * D3DDDI_MONITORED_FENCE and D3DDDI_PERIODIC_MONITORED_FENCE are called -+ * "device" syncobject, because the belong to a device (dxgdevice). -+ * Device syncobjects are inserted to a list in dxgdevice. -+ * -+ */ -+struct dxgsyncobject { -+ struct kref syncobj_kref; -+ enum d3dddi_synchronizationobject_type type; -+ /* -+ * List entry in dxgdevice for device sync objects. -+ * List entry in dxgadapter for other objects -+ */ -+ struct list_head syncobj_list_entry; -+ /* Adapter, the syncobject belongs to. NULL for stopped sync obejcts. */ -+ struct dxgadapter *adapter; -+ /* -+ * Pointer to the device, which was used to create the object. -+ * This is NULL for non-device syncbjects -+ */ -+ struct dxgdevice *device; -+ struct dxgprocess *process; -+ /* CPU virtual address of the fence value for "device" syncobjects */ -+ void *mapped_address; -+ /* Handle in the process handle table */ -+ struct d3dkmthandle handle; -+ /* Cached handle of the device. Used to avoid device dereference. */ -+ struct d3dkmthandle device_handle; -+ union { -+ struct { -+ /* Must be the first bit */ -+ u32 destroyed:1; -+ /* Must be the second bit */ -+ u32 stopped:1; -+ /* device syncobject */ -+ u32 monitored_fence:1; -+ u32 shared:1; -+ u32 reserved:27; -+ }; -+ long flags; -+ }; -+}; -+ - /* - * The structure defines an offered vGPU vm bus channel. - */ -@@ -109,6 +160,20 @@ struct dxgvgpuchannel { - struct hv_device *hdev; - }; - -+struct dxgsyncobject *dxgsyncobject_create(struct dxgprocess *process, -+ struct dxgdevice *device, -+ struct dxgadapter *adapter, -+ enum -+ d3dddi_synchronizationobject_type -+ type, -+ struct -+ d3dddi_synchronizationobject_flags -+ flags); -+void dxgsyncobject_destroy(struct dxgprocess *process, -+ struct dxgsyncobject *syncobj); -+void dxgsyncobject_stop(struct dxgsyncobject *syncobj); -+void dxgsyncobject_release(struct kref *refcount); -+ - struct dxgglobal { - struct dxgdriver *drvdata; - struct dxgvmbuschannel channel; -@@ -271,6 +336,8 @@ struct dxgadapter { - struct list_head adapter_list_entry; - /* The list of dxgprocess_adapter entries */ - struct list_head adapter_process_list_head; -+ /* List of all non-device dxgsyncobject objects */ -+ struct list_head syncobj_list_head; - /* This lock protects shared resource and syncobject lists */ - struct rw_semaphore shared_resource_list_lock; - struct pci_dev *pci_dev; -@@ -296,6 +363,9 @@ void dxgadapter_release_lock_shared(struct dxgadapter *adapter); - int dxgadapter_acquire_lock_exclusive(struct dxgadapter *adapter); - void dxgadapter_acquire_lock_forced(struct dxgadapter *adapter); - void dxgadapter_release_lock_exclusive(struct dxgadapter *adapter); -+void dxgadapter_add_syncobj(struct dxgadapter *adapter, -+ struct dxgsyncobject *so); -+void dxgadapter_remove_syncobj(struct dxgsyncobject *so); - void dxgadapter_add_process(struct dxgadapter *adapter, - struct dxgprocess_adapter *process_info); - void dxgadapter_remove_process(struct dxgprocess_adapter *process_info); -@@ -325,6 +395,7 @@ struct dxgdevice { - struct list_head resource_list_head; - /* List of paging queues. Protected by process handle table lock. */ - struct list_head pqueue_list_head; -+ struct list_head syncobj_list_head; - struct d3dkmthandle handle; - enum d3dkmt_deviceexecution_state execution_state; - u32 handle_valid; -@@ -345,6 +416,8 @@ void dxgdevice_remove_alloc_safe(struct dxgdevice *dev, - struct dxgallocation *a); - void dxgdevice_add_resource(struct dxgdevice *dev, struct dxgresource *res); - void dxgdevice_remove_resource(struct dxgdevice *dev, struct dxgresource *res); -+void dxgdevice_add_syncobj(struct dxgdevice *dev, struct dxgsyncobject *so); -+void dxgdevice_remove_syncobj(struct dxgsyncobject *so); - bool dxgdevice_is_active(struct dxgdevice *dev); - void dxgdevice_acquire_context_list_lock(struct dxgdevice *dev); - void dxgdevice_release_context_list_lock(struct dxgdevice *dev); -@@ -455,6 +528,7 @@ void dxgallocation_free_handle(struct dxgallocation *a); - long dxgk_compat_ioctl(struct file *f, unsigned int p1, unsigned long p2); - long dxgk_unlocked_ioctl(struct file *f, unsigned int p1, unsigned long p2); - -+int dxg_unmap_iospace(void *va, u32 size); - /* - * The convention is that VNBus instance id is a GUID, but the host sets - * the lower part of the value to the host adapter LUID. The function -@@ -514,6 +588,12 @@ int dxgvmb_send_create_allocation(struct dxgprocess *pr, struct dxgdevice *dev, - int dxgvmb_send_destroy_allocation(struct dxgprocess *pr, struct dxgdevice *dev, - struct d3dkmt_destroyallocation2 *args, - struct d3dkmthandle *alloc_handles); -+int dxgvmb_send_create_sync_object(struct dxgprocess *pr, -+ struct dxgadapter *adapter, -+ struct d3dkmt_createsynchronizationobject2 -+ *args, struct dxgsyncobject *so); -+int dxgvmb_send_destroy_sync_object(struct dxgprocess *pr, -+ struct d3dkmthandle h); - int dxgvmb_send_query_adapter_info(struct dxgprocess *process, - struct dxgadapter *adapter, - struct d3dkmt_queryadapterinfo *args); -diff --git a/drivers/hv/dxgkrnl/dxgmodule.c b/drivers/hv/dxgkrnl/dxgmodule.c -index 111111111111..222222222222 100644 ---- a/drivers/hv/dxgkrnl/dxgmodule.c -+++ b/drivers/hv/dxgkrnl/dxgmodule.c -@@ -162,6 +162,7 @@ int dxgglobal_create_adapter(struct pci_dev *dev, guid_t *guid, - init_rwsem(&adapter->core_lock); - - INIT_LIST_HEAD(&adapter->adapter_process_list_head); -+ INIT_LIST_HEAD(&adapter->syncobj_list_head); - init_rwsem(&adapter->shared_resource_list_lock); - adapter->pci_dev = dev; - guid_to_luid(guid, &adapter->luid); -diff --git a/drivers/hv/dxgkrnl/dxgprocess.c b/drivers/hv/dxgkrnl/dxgprocess.c -index 111111111111..222222222222 100644 ---- a/drivers/hv/dxgkrnl/dxgprocess.c -+++ b/drivers/hv/dxgkrnl/dxgprocess.c -@@ -59,6 +59,7 @@ void dxgprocess_destroy(struct dxgprocess *process) - enum hmgrentry_type t; - struct d3dkmthandle h; - void *o; -+ struct dxgsyncobject *syncobj; - struct dxgprocess_adapter *entry; - struct dxgprocess_adapter *tmp; - -@@ -84,6 +85,21 @@ void dxgprocess_destroy(struct dxgprocess *process) - } - } - -+ i = 0; -+ while (hmgrtable_next_entry(&process->handle_table, &i, &t, &h, &o)) { -+ switch (t) { -+ case HMGRENTRY_TYPE_DXGSYNCOBJECT: -+ DXG_TRACE("Destroy syncobj: %p %d", o, i); -+ syncobj = o; -+ syncobj->handle.v = 0; -+ dxgsyncobject_destroy(process, syncobj); -+ break; -+ default: -+ DXG_ERR("invalid entry in handle table %d", t); -+ break; -+ } -+ } -+ - hmgrtable_destroy(&process->handle_table); - hmgrtable_destroy(&process->local_handle_table); - } -diff --git a/drivers/hv/dxgkrnl/dxgvmbus.c b/drivers/hv/dxgkrnl/dxgvmbus.c -index 111111111111..222222222222 100644 ---- a/drivers/hv/dxgkrnl/dxgvmbus.c -+++ b/drivers/hv/dxgkrnl/dxgvmbus.c -@@ -495,6 +495,88 @@ dxgvmb_send_sync_msg_ntstatus(struct dxgvmbuschannel *channel, - return ret; - } - -+static int check_iospace_address(unsigned long address, u32 size) -+{ -+ struct dxgglobal *dxgglobal = dxggbl(); -+ -+ if (address < dxgglobal->mmiospace_base || -+ size > dxgglobal->mmiospace_size || -+ address >= (dxgglobal->mmiospace_base + -+ dxgglobal->mmiospace_size - size)) { -+ DXG_ERR("invalid iospace address %lx", address); -+ return -EINVAL; -+ } -+ return 0; -+} -+ -+int dxg_unmap_iospace(void *va, u32 size) -+{ -+ int ret = 0; -+ -+ DXG_TRACE("Unmapping io space: %p %x", va, size); -+ -+ /* -+ * When an app calls exit(), dxgkrnl is called to close the device -+ * with current->mm equal to NULL. -+ */ -+ if (current->mm) { -+ ret = vm_munmap((unsigned long)va, size); -+ if (ret) { -+ DXG_ERR("vm_munmap failed %d", ret); -+ return -ENOTRECOVERABLE; -+ } -+ } -+ return 0; -+} -+ -+static u8 *dxg_map_iospace(u64 iospace_address, u32 size, -+ unsigned long protection, bool cached) -+{ -+ struct vm_area_struct *vma; -+ unsigned long va; -+ int ret = 0; -+ -+ DXG_TRACE("Mapping io space: %llx %x %lx", -+ iospace_address, size, protection); -+ if (check_iospace_address(iospace_address, size) < 0) { -+ DXG_ERR("invalid address to map"); -+ return NULL; -+ } -+ -+ va = vm_mmap(NULL, 0, size, protection, MAP_SHARED | MAP_ANONYMOUS, 0); -+ if ((long)va <= 0) { -+ DXG_ERR("vm_mmap failed %lx %d", va, size); -+ return NULL; -+ } -+ -+ mmap_read_lock(current->mm); -+ vma = find_vma(current->mm, (unsigned long)va); -+ if (vma) { -+ pgprot_t prot = vma->vm_page_prot; -+ -+ if (!cached) -+ prot = pgprot_writecombine(prot); -+ DXG_TRACE("vma: %lx %lx %lx", -+ vma->vm_start, vma->vm_end, va); -+ vma->vm_pgoff = iospace_address >> PAGE_SHIFT; -+ ret = io_remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff, -+ size, prot); -+ if (ret) -+ DXG_ERR("io_remap_pfn_range failed: %d", ret); -+ } else { -+ DXG_ERR("failed to find vma: %p %lx", vma, va); -+ ret = -ENOMEM; -+ } -+ mmap_read_unlock(current->mm); -+ -+ if (ret) { -+ dxg_unmap_iospace((void *)va, size); -+ return NULL; -+ } -+ DXG_TRACE("Mapped VA: %lx", va); -+ return (u8 *) va; -+} -+ - /* - * Global messages to the host - */ -@@ -613,6 +695,39 @@ int dxgvmb_send_destroy_process(struct d3dkmthandle process) - return ret; - } - -+int dxgvmb_send_destroy_sync_object(struct dxgprocess *process, -+ struct d3dkmthandle sync_object) -+{ -+ struct dxgkvmb_command_destroysyncobject *command; -+ int ret; -+ struct dxgvmbusmsg msg = {.hdr = NULL}; -+ -+ ret = init_message(&msg, NULL, process, sizeof(*command)); -+ if (ret) -+ return ret; -+ command = (void *)msg.msg; -+ -+ ret = dxgglobal_acquire_channel_lock(); -+ if (ret < 0) -+ goto cleanup; -+ -+ command_vm_to_host_init2(&command->hdr, -+ DXGK_VMBCOMMAND_DESTROYSYNCOBJECT, -+ process->host_handle); -+ command->sync_object = sync_object; -+ -+ ret = dxgvmb_send_sync_msg_ntstatus(dxgglobal_get_dxgvmbuschannel(), -+ msg.hdr, msg.size); -+ -+ dxgglobal_release_channel_lock(); -+ -+cleanup: -+ free_message(&msg, process); -+ if (ret) -+ DXG_TRACE("err: %d", ret); -+ return ret; -+} -+ - /* - * Virtual GPU messages to the host - */ -@@ -1023,7 +1138,11 @@ int create_existing_sysmem(struct dxgdevice *device, - ret = -ENOMEM; - goto cleanup; - } -+#ifdef _MAIN_KERNEL_ - DXG_TRACE("New gpadl %d", dxgalloc->gpadl.gpadl_handle); -+#else -+ DXG_TRACE("New gpadl %d", dxgalloc->gpadl); -+#endif - - command_vgpu_to_host_init2(&set_store_command->hdr, - DXGK_VMBCOMMAND_SETEXISTINGSYSMEMSTORE, -@@ -1501,6 +1620,92 @@ int dxgvmb_send_get_stdalloc_data(struct dxgdevice *device, - return ret; - } - -+static void set_result(struct d3dkmt_createsynchronizationobject2 *args, -+ u64 fence_gpu_va, u8 *va) -+{ -+ args->info.periodic_monitored_fence.fence_gpu_virtual_address = -+ fence_gpu_va; -+ args->info.periodic_monitored_fence.fence_cpu_virtual_address = va; -+} -+ -+int -+dxgvmb_send_create_sync_object(struct dxgprocess *process, -+ struct dxgadapter *adapter, -+ struct d3dkmt_createsynchronizationobject2 *args, -+ struct dxgsyncobject *syncobj) -+{ -+ struct dxgkvmb_command_createsyncobject_return result = { }; -+ struct dxgkvmb_command_createsyncobject *command; -+ int ret; -+ u8 *va = 0; -+ struct dxgvmbusmsg msg = {.hdr = NULL}; -+ -+ ret = init_message(&msg, adapter, process, sizeof(*command)); -+ if (ret) -+ goto cleanup; -+ command = (void *)msg.msg; -+ -+ command_vgpu_to_host_init2(&command->hdr, -+ DXGK_VMBCOMMAND_CREATESYNCOBJECT, -+ process->host_handle); -+ command->args = *args; -+ command->client_hint = 1; /* CLIENTHINT_UMD */ -+ -+ ret = dxgvmb_send_sync_msg(msg.channel, msg.hdr, msg.size, &result, -+ sizeof(result)); -+ if (ret < 0) { -+ DXG_ERR("failed %d", ret); -+ goto cleanup; -+ } -+ args->sync_object = result.sync_object; -+ if (syncobj->shared) { -+ if (result.global_sync_object.v == 0) { -+ DXG_ERR("shared handle is 0"); -+ ret = -EINVAL; -+ goto cleanup; -+ } -+ args->info.shared_handle = result.global_sync_object; -+ } -+ -+ if (syncobj->monitored_fence) { -+ va = dxg_map_iospace(result.fence_storage_address, PAGE_SIZE, -+ PROT_READ | PROT_WRITE, true); -+ if (va == NULL) { -+ ret = -ENOMEM; -+ goto cleanup; -+ } -+ if (args->info.type == _D3DDDI_MONITORED_FENCE) { -+ args->info.monitored_fence.fence_gpu_virtual_address = -+ result.fence_gpu_va; -+ args->info.monitored_fence.fence_cpu_virtual_address = -+ va; -+ { -+ unsigned long value; -+ -+ DXG_TRACE("fence cpu va: %p", va); -+ ret = copy_from_user(&value, va, -+ sizeof(u64)); -+ if (ret) { -+ DXG_ERR("failed to read fence"); -+ ret = -EINVAL; -+ } else { -+ DXG_TRACE("fence value:%lx", -+ value); -+ } -+ } -+ } else { -+ set_result(args, result.fence_gpu_va, va); -+ } -+ syncobj->mapped_address = va; -+ } -+ -+cleanup: -+ free_message(&msg, process); -+ if (ret) -+ DXG_TRACE("err: %d", ret); -+ return ret; -+} -+ - int dxgvmb_send_query_adapter_info(struct dxgprocess *process, - struct dxgadapter *adapter, - struct d3dkmt_queryadapterinfo *args) -diff --git a/drivers/hv/dxgkrnl/dxgvmbus.h b/drivers/hv/dxgkrnl/dxgvmbus.h -index 111111111111..222222222222 100644 ---- a/drivers/hv/dxgkrnl/dxgvmbus.h -+++ b/drivers/hv/dxgkrnl/dxgvmbus.h -@@ -410,4 +410,24 @@ struct dxgkvmb_command_destroycontext { - struct d3dkmthandle context; - }; - -+struct dxgkvmb_command_createsyncobject { -+ struct dxgkvmb_command_vgpu_to_host hdr; -+ struct d3dkmt_createsynchronizationobject2 args; -+ u32 client_hint; -+}; -+ -+struct dxgkvmb_command_createsyncobject_return { -+ struct d3dkmthandle sync_object; -+ struct d3dkmthandle global_sync_object; -+ u64 fence_gpu_va; -+ u64 fence_storage_address; -+ u32 fence_storage_offset; -+}; -+ -+/* The command returns ntstatus */ -+struct dxgkvmb_command_destroysyncobject { -+ struct dxgkvmb_command_vm_to_host hdr; -+ struct d3dkmthandle sync_object; -+}; -+ - #endif /* _DXGVMBUS_H */ -diff --git a/drivers/hv/dxgkrnl/ioctl.c b/drivers/hv/dxgkrnl/ioctl.c -index 111111111111..222222222222 100644 ---- a/drivers/hv/dxgkrnl/ioctl.c -+++ b/drivers/hv/dxgkrnl/ioctl.c -@@ -1341,6 +1341,132 @@ dxgkio_destroy_allocation(struct dxgprocess *process, void *__user inargs) - return ret; - } - -+static int -+dxgkio_create_sync_object(struct dxgprocess *process, void *__user inargs) -+{ -+ int ret; -+ struct d3dkmt_createsynchronizationobject2 args; -+ struct dxgdevice *device = NULL; -+ struct dxgadapter *adapter = NULL; -+ struct dxgsyncobject *syncobj = NULL; -+ bool device_lock_acquired = false; -+ -+ ret = copy_from_user(&args, inargs, sizeof(args)); -+ if (ret) { -+ DXG_ERR("failed to copy input args"); -+ ret = -EINVAL; -+ goto cleanup; -+ } -+ -+ device = dxgprocess_device_by_handle(process, args.device); -+ if (device == NULL) { -+ ret = -EINVAL; -+ goto cleanup; -+ } -+ -+ ret = dxgdevice_acquire_lock_shared(device); -+ if (ret < 0) -+ goto cleanup; -+ -+ device_lock_acquired = true; -+ -+ adapter = device->adapter; -+ ret = dxgadapter_acquire_lock_shared(adapter); -+ if (ret < 0) { -+ adapter = NULL; -+ goto cleanup; -+ } -+ -+ syncobj = dxgsyncobject_create(process, device, adapter, args.info.type, -+ args.info.flags); -+ if (syncobj == NULL) { -+ ret = -EINVAL; -+ goto cleanup; -+ } -+ -+ ret = dxgvmb_send_create_sync_object(process, adapter, &args, syncobj); -+ if (ret < 0) -+ goto cleanup; -+ -+ ret = copy_to_user(inargs, &args, sizeof(args)); -+ if (ret) { -+ DXG_ERR("failed to copy output args"); -+ ret = -EINVAL; -+ goto cleanup; -+ } -+ -+ hmgrtable_lock(&process->handle_table, DXGLOCK_EXCL); -+ ret = hmgrtable_assign_handle(&process->handle_table, syncobj, -+ HMGRENTRY_TYPE_DXGSYNCOBJECT, -+ args.sync_object); -+ if (ret >= 0) -+ syncobj->handle = args.sync_object; -+ hmgrtable_unlock(&process->handle_table, DXGLOCK_EXCL); -+ -+cleanup: -+ -+ if (ret < 0) { -+ if (syncobj) { -+ dxgsyncobject_destroy(process, syncobj); -+ if (args.sync_object.v) -+ dxgvmb_send_destroy_sync_object(process, -+ args.sync_object); -+ } -+ } -+ if (adapter) -+ dxgadapter_release_lock_shared(adapter); -+ if (device_lock_acquired) -+ dxgdevice_release_lock_shared(device); -+ if (device) -+ kref_put(&device->device_kref, dxgdevice_release); -+ -+ DXG_TRACE("ioctl:%s %d", errorstr(ret), ret); -+ return ret; -+} -+ -+static int -+dxgkio_destroy_sync_object(struct dxgprocess *process, void *__user inargs) -+{ -+ struct d3dkmt_destroysynchronizationobject args; -+ struct dxgsyncobject *syncobj = NULL; -+ int ret; -+ -+ ret = copy_from_user(&args, inargs, sizeof(args)); -+ if (ret) { -+ DXG_ERR("failed to copy input args"); -+ ret = -EINVAL; -+ goto cleanup; -+ } -+ -+ DXG_TRACE("handle 0x%x", args.sync_object.v); -+ hmgrtable_lock(&process->handle_table, DXGLOCK_EXCL); -+ syncobj = hmgrtable_get_object_by_type(&process->handle_table, -+ HMGRENTRY_TYPE_DXGSYNCOBJECT, -+ args.sync_object); -+ if (syncobj) { -+ DXG_TRACE("syncobj 0x%p", syncobj); -+ syncobj->handle.v = 0; -+ hmgrtable_free_handle(&process->handle_table, -+ HMGRENTRY_TYPE_DXGSYNCOBJECT, -+ args.sync_object); -+ } -+ hmgrtable_unlock(&process->handle_table, DXGLOCK_EXCL); -+ -+ if (syncobj == NULL) { -+ ret = -EINVAL; -+ goto cleanup; -+ } -+ -+ dxgsyncobject_destroy(process, syncobj); -+ -+ ret = dxgvmb_send_destroy_sync_object(process, args.sync_object); -+ -+cleanup: -+ -+ DXG_TRACE("ioctl:%s %d", errorstr(ret), ret); -+ return ret; -+} -+ - static struct ioctl_desc ioctls[] = { - /* 0x00 */ {}, - /* 0x01 */ {dxgkio_open_adapter_from_luid, LX_DXOPENADAPTERFROMLUID}, -@@ -1358,7 +1484,7 @@ static struct ioctl_desc ioctls[] = { - /* 0x0d */ {}, - /* 0x0e */ {}, - /* 0x0f */ {}, --/* 0x10 */ {}, -+/* 0x10 */ {dxgkio_create_sync_object, LX_DXCREATESYNCHRONIZATIONOBJECT}, - /* 0x11 */ {}, - /* 0x12 */ {}, - /* 0x13 */ {dxgkio_destroy_allocation, LX_DXDESTROYALLOCATION2}, -@@ -1371,7 +1497,7 @@ static struct ioctl_desc ioctls[] = { - /* 0x1a */ {}, - /* 0x1b */ {}, - /* 0x1c */ {}, --/* 0x1d */ {}, -+/* 0x1d */ {dxgkio_destroy_sync_object, LX_DXDESTROYSYNCHRONIZATIONOBJECT}, - /* 0x1e */ {}, - /* 0x1f */ {}, - /* 0x20 */ {}, -diff --git a/include/uapi/misc/d3dkmthk.h b/include/uapi/misc/d3dkmthk.h -index 111111111111..222222222222 100644 ---- a/include/uapi/misc/d3dkmthk.h -+++ b/include/uapi/misc/d3dkmthk.h -@@ -256,6 +256,97 @@ enum d3dkmdt_standardallocationtype { - _D3DKMDT_STANDARDALLOCATION_GDISURFACE = 4, - }; - -+struct d3dddi_synchronizationobject_flags { -+ union { -+ struct { -+ __u32 shared:1; -+ __u32 nt_security_sharing:1; -+ __u32 cross_adapter:1; -+ __u32 top_of_pipeline:1; -+ __u32 no_signal:1; -+ __u32 no_wait:1; -+ __u32 no_signal_max_value_on_tdr:1; -+ __u32 no_gpu_access:1; -+ __u32 reserved:23; -+ }; -+ __u32 value; -+ }; -+}; -+ -+enum d3dddi_synchronizationobject_type { -+ _D3DDDI_SYNCHRONIZATION_MUTEX = 1, -+ _D3DDDI_SEMAPHORE = 2, -+ _D3DDDI_FENCE = 3, -+ _D3DDDI_CPU_NOTIFICATION = 4, -+ _D3DDDI_MONITORED_FENCE = 5, -+ _D3DDDI_PERIODIC_MONITORED_FENCE = 6, -+ _D3DDDI_SYNCHRONIZATION_TYPE_LIMIT -+}; -+ -+struct d3dddi_synchronizationobjectinfo2 { -+ enum d3dddi_synchronizationobject_type type; -+ struct d3dddi_synchronizationobject_flags flags; -+ union { -+ struct { -+ __u32 initial_state; -+ } synchronization_mutex; -+ -+ struct { -+ __u32 max_count; -+ __u32 initial_count; -+ } semaphore; -+ -+ struct { -+ __u64 fence_value; -+ } fence; -+ -+ struct { -+ __u64 event; -+ } cpu_notification; -+ -+ struct { -+ __u64 initial_fence_value; -+#ifdef __KERNEL__ -+ void *fence_cpu_virtual_address; -+#else -+ __u64 *fence_cpu_virtual_address; -+#endif -+ __u64 fence_gpu_virtual_address; -+ __u32 engine_affinity; -+ } monitored_fence; -+ -+ struct { -+ struct d3dkmthandle adapter; -+ __u32 vidpn_target_id; -+ __u64 time; -+#ifdef __KERNEL__ -+ void *fence_cpu_virtual_address; -+#else -+ __u64 fence_cpu_virtual_address; -+#endif -+ __u64 fence_gpu_virtual_address; -+ __u32 engine_affinity; -+ } periodic_monitored_fence; -+ -+ struct { -+ __u64 reserved[8]; -+ } reserved; -+ }; -+ struct d3dkmthandle shared_handle; -+}; -+ -+struct d3dkmt_createsynchronizationobject2 { -+ struct d3dkmthandle device; -+ __u32 reserved; -+ struct d3dddi_synchronizationobjectinfo2 info; -+ struct d3dkmthandle sync_object; -+ __u32 reserved1; -+}; -+ -+struct d3dkmt_destroysynchronizationobject { -+ struct d3dkmthandle sync_object; -+}; -+ - enum d3dkmt_standardallocationtype { - _D3DKMT_STANDARDALLOCATIONTYPE_EXISTINGHEAP = 1, - _D3DKMT_STANDARDALLOCATIONTYPE_CROSSADAPTER = 2, -@@ -483,6 +574,8 @@ struct d3dkmt_enumadapters3 { - _IOWR(0x47, 0x06, struct d3dkmt_createallocation) - #define LX_DXQUERYADAPTERINFO \ - _IOWR(0x47, 0x09, struct d3dkmt_queryadapterinfo) -+#define LX_DXCREATESYNCHRONIZATIONOBJECT \ -+ _IOWR(0x47, 0x10, struct d3dkmt_createsynchronizationobject2) - #define LX_DXDESTROYALLOCATION2 \ - _IOWR(0x47, 0x13, struct d3dkmt_destroyallocation2) - #define LX_DXENUMADAPTERS2 \ -@@ -491,6 +584,8 @@ struct d3dkmt_enumadapters3 { - _IOWR(0x47, 0x15, struct d3dkmt_closeadapter) - #define LX_DXDESTROYDEVICE \ - _IOWR(0x47, 0x19, struct d3dkmt_destroydevice) -+#define LX_DXDESTROYSYNCHRONIZATIONOBJECT \ -+ _IOWR(0x47, 0x1d, struct d3dkmt_destroysynchronizationobject) - #define LX_DXENUMADAPTERS3 \ - _IOWR(0x47, 0x3e, struct d3dkmt_enumadapters3) - --- -Armbian - diff --git a/patch/kernel/archive/wsl2-arm64-6.1/1678-drivers-hv-dxgkrnl-Operations-using-sync-objects.patch b/patch/kernel/archive/wsl2-arm64-6.1/1678-drivers-hv-dxgkrnl-Operations-using-sync-objects.patch deleted file mode 100644 index 4ea7e161dae8..000000000000 --- a/patch/kernel/archive/wsl2-arm64-6.1/1678-drivers-hv-dxgkrnl-Operations-using-sync-objects.patch +++ /dev/null @@ -1,1689 +0,0 @@ -From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001 -From: Iouri Tarassov -Date: Tue, 1 Feb 2022 13:59:23 -0800 -Subject: drivers: hv: dxgkrnl: Operations using sync objects - -Implement ioctls to submit operations with compute device -sync objects: - - the LX_DXSIGNALSYNCHRONIZATIONOBJECT ioctl. - The ioctl is used to submit a signal to a sync object. - - the LX_DXWAITFORSYNCHRONIZATIONOBJECT ioctl. - The ioctl is used to submit a wait for a sync object - - the LX_DXSIGNALSYNCHRONIZATIONOBJECTFROMCPU ioctl - The ioctl is used to signal to a monitored fence sync object - from a CPU thread. - - the LX_DXSIGNALSYNCHRONIZATIONOBJECTFROMGPU ioctl. - The ioctl is used to submit a signal to a monitored fence - sync object.. - - the LX_DXSIGNALSYNCHRONIZATIONOBJECTFROMGPU2 ioctl. - The ioctl is used to submit a signal to a monitored fence - sync object. - - the LX_DXWAITFORSYNCHRONIZATIONOBJECTFROMGPU ioctl. - The ioctl is used to submit a wait for a monitored fence - sync object. - -Compute device synchronization objects are used to synchronize -execution of DMA buffers between different execution contexts. -Operations with sync objects include "signal" and "wait". A wait -for a sync object is satisfied when the sync object is signaled. - -A signal operation could be submitted to a compute device context or -the sync object could be signaled by a CPU thread. - -To improve performance, submitting operations to the host is done -asynchronously when the host supports it. - -Signed-off-by: Iouri Tarassov -[kms: Forward port to v6.1] -Signed-off-by: Kelsey Steele ---- - drivers/hv/dxgkrnl/dxgadapter.c | 38 +- - drivers/hv/dxgkrnl/dxgkrnl.h | 62 + - drivers/hv/dxgkrnl/dxgmodule.c | 102 +- - drivers/hv/dxgkrnl/dxgvmbus.c | 219 ++- - drivers/hv/dxgkrnl/dxgvmbus.h | 48 + - drivers/hv/dxgkrnl/ioctl.c | 702 +++++++++- - drivers/hv/dxgkrnl/misc.h | 2 + - include/uapi/misc/d3dkmthk.h | 159 +++ - 8 files changed, 1311 insertions(+), 21 deletions(-) - -diff --git a/drivers/hv/dxgkrnl/dxgadapter.c b/drivers/hv/dxgkrnl/dxgadapter.c -index 111111111111..222222222222 100644 ---- a/drivers/hv/dxgkrnl/dxgadapter.c -+++ b/drivers/hv/dxgkrnl/dxgadapter.c -@@ -249,7 +249,7 @@ void dxgdevice_stop(struct dxgdevice *device) - struct dxgallocation *alloc; - struct dxgsyncobject *syncobj; - -- DXG_TRACE("Destroying device: %p", device); -+ DXG_TRACE("Stopping device: %p", device); - dxgdevice_acquire_alloc_list_lock(device); - list_for_each_entry(alloc, &device->alloc_list_head, alloc_list_entry) { - dxgallocation_stop(alloc); -@@ -743,15 +743,13 @@ void dxgallocation_destroy(struct dxgallocation *alloc) - } - #ifdef _MAIN_KERNEL_ - if (alloc->gpadl.gpadl_handle) { -- DXG_TRACE("Teardown gpadl %d", -- alloc->gpadl.gpadl_handle); -+ DXG_TRACE("Teardown gpadl %d", alloc->gpadl.gpadl_handle); - vmbus_teardown_gpadl(dxgglobal_get_vmbus(), &alloc->gpadl); - alloc->gpadl.gpadl_handle = 0; - } - else - if (alloc->gpadl) { -- DXG_TRACE("Teardown gpadl %d", -- alloc->gpadl); -+ DXG_TRACE("Teardown gpadl %d", alloc->gpadl); - vmbus_teardown_gpadl(dxgglobal_get_vmbus(), alloc->gpadl); - alloc->gpadl = 0; - } -@@ -901,6 +899,13 @@ struct dxgsyncobject *dxgsyncobject_create(struct dxgprocess *process, - case _D3DDDI_PERIODIC_MONITORED_FENCE: - syncobj->monitored_fence = 1; - break; -+ case _D3DDDI_CPU_NOTIFICATION: -+ syncobj->cpu_event = 1; -+ syncobj->host_event = kzalloc(sizeof(*syncobj->host_event), -+ GFP_KERNEL); -+ if (syncobj->host_event == NULL) -+ goto cleanup; -+ break; - default: - break; - } -@@ -928,6 +933,8 @@ struct dxgsyncobject *dxgsyncobject_create(struct dxgprocess *process, - DXG_TRACE("Syncobj created: %p", syncobj); - return syncobj; - cleanup: -+ if (syncobj->host_event) -+ kfree(syncobj->host_event); - if (syncobj) - kfree(syncobj); - return NULL; -@@ -937,6 +944,7 @@ void dxgsyncobject_destroy(struct dxgprocess *process, - struct dxgsyncobject *syncobj) - { - int destroyed; -+ struct dxghosteventcpu *host_event; - - DXG_TRACE("Destroying syncobj: %p", syncobj); - -@@ -955,6 +963,16 @@ void dxgsyncobject_destroy(struct dxgprocess *process, - } - hmgrtable_unlock(&process->handle_table, DXGLOCK_EXCL); - -+ if (syncobj->cpu_event) { -+ host_event = syncobj->host_event; -+ if (host_event->cpu_event) { -+ eventfd_ctx_put(host_event->cpu_event); -+ if (host_event->hdr.event_id) -+ dxgglobal_remove_host_event( -+ &host_event->hdr); -+ host_event->cpu_event = NULL; -+ } -+ } - if (syncobj->monitored_fence) - dxgdevice_remove_syncobj(syncobj); - else -@@ -971,16 +989,14 @@ void dxgsyncobject_destroy(struct dxgprocess *process, - void dxgsyncobject_stop(struct dxgsyncobject *syncobj) - { - int stopped = test_and_set_bit(1, &syncobj->flags); -+ int ret; - - if (!stopped) { - DXG_TRACE("Stopping syncobj"); - if (syncobj->monitored_fence) { - if (syncobj->mapped_address) { -- int ret = -- dxg_unmap_iospace(syncobj->mapped_address, -- PAGE_SIZE); -- -- (void)ret; -+ ret = dxg_unmap_iospace(syncobj->mapped_address, -+ PAGE_SIZE); - DXG_TRACE("unmap fence %d %p", - ret, syncobj->mapped_address); - syncobj->mapped_address = NULL; -@@ -994,5 +1010,7 @@ void dxgsyncobject_release(struct kref *refcount) - struct dxgsyncobject *syncobj; - - syncobj = container_of(refcount, struct dxgsyncobject, syncobj_kref); -+ if (syncobj->host_event) -+ kfree(syncobj->host_event); - kfree(syncobj); - } -diff --git a/drivers/hv/dxgkrnl/dxgkrnl.h b/drivers/hv/dxgkrnl/dxgkrnl.h -index 111111111111..222222222222 100644 ---- a/drivers/hv/dxgkrnl/dxgkrnl.h -+++ b/drivers/hv/dxgkrnl/dxgkrnl.h -@@ -101,6 +101,29 @@ int dxgvmbuschannel_init(struct dxgvmbuschannel *ch, struct hv_device *hdev); - void dxgvmbuschannel_destroy(struct dxgvmbuschannel *ch); - void dxgvmbuschannel_receive(void *ctx); - -+/* -+ * The structure describes an event, which will be signaled by -+ * a message from host. -+ */ -+enum dxghosteventtype { -+ dxghostevent_cpu_event = 1, -+}; -+ -+struct dxghostevent { -+ struct list_head host_event_list_entry; -+ u64 event_id; -+ enum dxghosteventtype event_type; -+}; -+ -+struct dxghosteventcpu { -+ struct dxghostevent hdr; -+ struct dxgprocess *process; -+ struct eventfd_ctx *cpu_event; -+ struct completion *completion_event; -+ bool destroy_after_signal; -+ bool remove_from_list; -+}; -+ - /* - * This is GPU synchronization object, which is used to synchronize execution - * between GPU contextx/hardware queues or for tracking GPU execution progress. -@@ -130,6 +153,8 @@ struct dxgsyncobject { - */ - struct dxgdevice *device; - struct dxgprocess *process; -+ /* Used by D3DDDI_CPU_NOTIFICATION objects */ -+ struct dxghosteventcpu *host_event; - /* CPU virtual address of the fence value for "device" syncobjects */ - void *mapped_address; - /* Handle in the process handle table */ -@@ -144,6 +169,7 @@ struct dxgsyncobject { - u32 stopped:1; - /* device syncobject */ - u32 monitored_fence:1; -+ u32 cpu_event:1; - u32 shared:1; - u32 reserved:27; - }; -@@ -206,6 +232,11 @@ struct dxgglobal { - /* protects the dxgprocess_adapter lists */ - struct mutex process_adapter_mutex; - -+ /* list of events, waiting to be signaled by the host */ -+ struct list_head host_event_list_head; -+ spinlock_t host_event_list_mutex; -+ atomic64_t host_event_id; -+ - bool global_channel_initialized; - bool async_msg_enabled; - bool misc_registered; -@@ -228,6 +259,11 @@ struct vmbus_channel *dxgglobal_get_vmbus(void); - struct dxgvmbuschannel *dxgglobal_get_dxgvmbuschannel(void); - void dxgglobal_acquire_process_adapter_lock(void); - void dxgglobal_release_process_adapter_lock(void); -+void dxgglobal_add_host_event(struct dxghostevent *hostevent); -+void dxgglobal_remove_host_event(struct dxghostevent *hostevent); -+u64 dxgglobal_new_host_event_id(void); -+void dxgglobal_signal_host_event(u64 event_id); -+struct dxghostevent *dxgglobal_get_host_event(u64 event_id); - int dxgglobal_acquire_channel_lock(void); - void dxgglobal_release_channel_lock(void); - -@@ -594,6 +630,31 @@ int dxgvmb_send_create_sync_object(struct dxgprocess *pr, - *args, struct dxgsyncobject *so); - int dxgvmb_send_destroy_sync_object(struct dxgprocess *pr, - struct d3dkmthandle h); -+int dxgvmb_send_signal_sync_object(struct dxgprocess *process, -+ struct dxgadapter *adapter, -+ struct d3dddicb_signalflags flags, -+ u64 legacy_fence_value, -+ struct d3dkmthandle context, -+ u32 object_count, -+ struct d3dkmthandle *object, -+ u32 context_count, -+ struct d3dkmthandle *contexts, -+ u32 fence_count, u64 *fences, -+ struct eventfd_ctx *cpu_event, -+ struct d3dkmthandle device); -+int dxgvmb_send_wait_sync_object_gpu(struct dxgprocess *process, -+ struct dxgadapter *adapter, -+ struct d3dkmthandle context, -+ u32 object_count, -+ struct d3dkmthandle *objects, -+ u64 *fences, -+ bool legacy_fence); -+int dxgvmb_send_wait_sync_object_cpu(struct dxgprocess *process, -+ struct dxgadapter *adapter, -+ struct -+ d3dkmt_waitforsynchronizationobjectfromcpu -+ *args, -+ u64 cpu_event); - int dxgvmb_send_query_adapter_info(struct dxgprocess *process, - struct dxgadapter *adapter, - struct d3dkmt_queryadapterinfo *args); -@@ -609,6 +670,7 @@ int dxgvmb_send_async_msg(struct dxgvmbuschannel *channel, - void *command, - u32 cmd_size); - -+void signal_host_cpu_event(struct dxghostevent *eventhdr); - int ntstatus2int(struct ntstatus status); - - #ifdef DEBUG -diff --git a/drivers/hv/dxgkrnl/dxgmodule.c b/drivers/hv/dxgkrnl/dxgmodule.c -index 111111111111..222222222222 100644 ---- a/drivers/hv/dxgkrnl/dxgmodule.c -+++ b/drivers/hv/dxgkrnl/dxgmodule.c -@@ -123,6 +123,102 @@ static struct dxgadapter *find_adapter(struct winluid *luid) - return adapter; - } - -+void dxgglobal_add_host_event(struct dxghostevent *event) -+{ -+ struct dxgglobal *dxgglobal = dxggbl(); -+ -+ spin_lock_irq(&dxgglobal->host_event_list_mutex); -+ list_add_tail(&event->host_event_list_entry, -+ &dxgglobal->host_event_list_head); -+ spin_unlock_irq(&dxgglobal->host_event_list_mutex); -+} -+ -+void dxgglobal_remove_host_event(struct dxghostevent *event) -+{ -+ struct dxgglobal *dxgglobal = dxggbl(); -+ -+ spin_lock_irq(&dxgglobal->host_event_list_mutex); -+ if (event->host_event_list_entry.next != NULL) { -+ list_del(&event->host_event_list_entry); -+ event->host_event_list_entry.next = NULL; -+ } -+ spin_unlock_irq(&dxgglobal->host_event_list_mutex); -+} -+ -+void signal_host_cpu_event(struct dxghostevent *eventhdr) -+{ -+ struct dxghosteventcpu *event = (struct dxghosteventcpu *)eventhdr; -+ -+ if (event->remove_from_list || -+ event->destroy_after_signal) { -+ list_del(&eventhdr->host_event_list_entry); -+ eventhdr->host_event_list_entry.next = NULL; -+ } -+ if (event->cpu_event) { -+ DXG_TRACE("signal cpu event"); -+ eventfd_signal(event->cpu_event, 1); -+ if (event->destroy_after_signal) -+ eventfd_ctx_put(event->cpu_event); -+ } else { -+ DXG_TRACE("signal completion"); -+ complete(event->completion_event); -+ } -+ if (event->destroy_after_signal) { -+ DXG_TRACE("destroying event %p", event); -+ kfree(event); -+ } -+} -+ -+void dxgglobal_signal_host_event(u64 event_id) -+{ -+ struct dxghostevent *event; -+ unsigned long flags; -+ struct dxgglobal *dxgglobal = dxggbl(); -+ -+ DXG_TRACE("Signaling host event %lld", event_id); -+ -+ spin_lock_irqsave(&dxgglobal->host_event_list_mutex, flags); -+ list_for_each_entry(event, &dxgglobal->host_event_list_head, -+ host_event_list_entry) { -+ if (event->event_id == event_id) { -+ DXG_TRACE("found event to signal"); -+ if (event->event_type == dxghostevent_cpu_event) -+ signal_host_cpu_event(event); -+ else -+ DXG_ERR("Unknown host event type"); -+ break; -+ } -+ } -+ spin_unlock_irqrestore(&dxgglobal->host_event_list_mutex, flags); -+} -+ -+struct dxghostevent *dxgglobal_get_host_event(u64 event_id) -+{ -+ struct dxghostevent *entry; -+ struct dxghostevent *event = NULL; -+ struct dxgglobal *dxgglobal = dxggbl(); -+ -+ spin_lock_irq(&dxgglobal->host_event_list_mutex); -+ list_for_each_entry(entry, &dxgglobal->host_event_list_head, -+ host_event_list_entry) { -+ if (entry->event_id == event_id) { -+ list_del(&entry->host_event_list_entry); -+ entry->host_event_list_entry.next = NULL; -+ event = entry; -+ break; -+ } -+ } -+ spin_unlock_irq(&dxgglobal->host_event_list_mutex); -+ return event; -+} -+ -+u64 dxgglobal_new_host_event_id(void) -+{ -+ struct dxgglobal *dxgglobal = dxggbl(); -+ -+ return atomic64_inc_return(&dxgglobal->host_event_id); -+} -+ - void dxgglobal_acquire_process_adapter_lock(void) - { - struct dxgglobal *dxgglobal = dxggbl(); -@@ -720,12 +816,16 @@ static struct dxgglobal *dxgglobal_create(void) - INIT_LIST_HEAD(&dxgglobal->vgpu_ch_list_head); - INIT_LIST_HEAD(&dxgglobal->adapter_list_head); - init_rwsem(&dxgglobal->adapter_list_lock); -- - init_rwsem(&dxgglobal->channel_lock); - -+ INIT_LIST_HEAD(&dxgglobal->host_event_list_head); -+ spin_lock_init(&dxgglobal->host_event_list_mutex); -+ atomic64_set(&dxgglobal->host_event_id, 1); -+ - #ifdef DEBUG - dxgk_validate_ioctls(); - #endif -+ - return dxgglobal; - } - -diff --git a/drivers/hv/dxgkrnl/dxgvmbus.c b/drivers/hv/dxgkrnl/dxgvmbus.c -index 111111111111..222222222222 100644 ---- a/drivers/hv/dxgkrnl/dxgvmbus.c -+++ b/drivers/hv/dxgkrnl/dxgvmbus.c -@@ -281,6 +281,22 @@ static void command_vm_to_host_init1(struct dxgkvmb_command_vm_to_host *command, - command->channel_type = DXGKVMB_VM_TO_HOST; - } - -+static void signal_guest_event(struct dxgkvmb_command_host_to_vm *packet, -+ u32 packet_length) -+{ -+ struct dxgkvmb_command_signalguestevent *command = (void *)packet; -+ -+ if (packet_length < sizeof(struct dxgkvmb_command_signalguestevent)) { -+ DXG_ERR("invalid signal guest event packet size"); -+ return; -+ } -+ if (command->event == 0) { -+ DXG_ERR("invalid event pointer"); -+ return; -+ } -+ dxgglobal_signal_host_event(command->event); -+} -+ - static void process_inband_packet(struct dxgvmbuschannel *channel, - struct vmpacket_descriptor *desc) - { -@@ -297,6 +313,7 @@ static void process_inband_packet(struct dxgvmbuschannel *channel, - switch (packet->command_type) { - case DXGK_VMBCOMMAND_SIGNALGUESTEVENT: - case DXGK_VMBCOMMAND_SIGNALGUESTEVENTPASSIVE: -+ signal_guest_event(packet, packet_length); - break; - case DXGK_VMBCOMMAND_SENDWNFNOTIFICATION: - break; -@@ -959,7 +976,7 @@ dxgvmb_send_create_context(struct dxgadapter *adapter, - command->priv_drv_data, - args->priv_drv_data_size); - if (ret) { -- dev_err(DXGDEV, -+ DXG_ERR( - "Faled to copy private data to user"); - ret = -EINVAL; - dxgvmb_send_destroy_context(adapter, process, -@@ -1706,6 +1723,206 @@ dxgvmb_send_create_sync_object(struct dxgprocess *process, - return ret; - } - -+int dxgvmb_send_signal_sync_object(struct dxgprocess *process, -+ struct dxgadapter *adapter, -+ struct d3dddicb_signalflags flags, -+ u64 legacy_fence_value, -+ struct d3dkmthandle context, -+ u32 object_count, -+ struct d3dkmthandle __user *objects, -+ u32 context_count, -+ struct d3dkmthandle __user *contexts, -+ u32 fence_count, -+ u64 __user *fences, -+ struct eventfd_ctx *cpu_event_handle, -+ struct d3dkmthandle device) -+{ -+ int ret; -+ struct dxgkvmb_command_signalsyncobject *command; -+ u32 object_size = object_count * sizeof(struct d3dkmthandle); -+ u32 context_size = context_count * sizeof(struct d3dkmthandle); -+ u32 fence_size = fences ? fence_count * sizeof(u64) : 0; -+ u8 *current_pos; -+ u32 cmd_size = sizeof(struct dxgkvmb_command_signalsyncobject) + -+ object_size + context_size + fence_size; -+ struct dxgvmbusmsg msg = {.hdr = NULL}; -+ struct dxgglobal *dxgglobal = dxggbl(); -+ -+ if (context.v) -+ cmd_size += sizeof(struct d3dkmthandle); -+ -+ ret = init_message(&msg, adapter, process, cmd_size); -+ if (ret) -+ goto cleanup; -+ command = (void *)msg.msg; -+ -+ command_vgpu_to_host_init2(&command->hdr, -+ DXGK_VMBCOMMAND_SIGNALSYNCOBJECT, -+ process->host_handle); -+ -+ if (flags.enqueue_cpu_event) -+ command->cpu_event_handle = (u64) cpu_event_handle; -+ else -+ command->device = device; -+ command->flags = flags; -+ command->fence_value = legacy_fence_value; -+ command->object_count = object_count; -+ command->context_count = context_count; -+ current_pos = (u8 *) &command[1]; -+ ret = copy_from_user(current_pos, objects, object_size); -+ if (ret) { -+ DXG_ERR("Failed to read objects %p %d", -+ objects, object_size); -+ ret = -EINVAL; -+ goto cleanup; -+ } -+ current_pos += object_size; -+ if (context.v) { -+ command->context_count++; -+ *(struct d3dkmthandle *) current_pos = context; -+ current_pos += sizeof(struct d3dkmthandle); -+ } -+ if (context_size) { -+ ret = copy_from_user(current_pos, contexts, context_size); -+ if (ret) { -+ DXG_ERR("Failed to read contexts %p %d", -+ contexts, context_size); -+ ret = -EINVAL; -+ goto cleanup; -+ } -+ current_pos += context_size; -+ } -+ if (fence_size) { -+ ret = copy_from_user(current_pos, fences, fence_size); -+ if (ret) { -+ DXG_ERR("Failed to read fences %p %d", -+ fences, fence_size); -+ ret = -EINVAL; -+ goto cleanup; -+ } -+ } -+ -+ if (dxgglobal->async_msg_enabled) { -+ command->hdr.async_msg = 1; -+ ret = dxgvmb_send_async_msg(msg.channel, msg.hdr, msg.size); -+ } else { -+ ret = dxgvmb_send_sync_msg_ntstatus(msg.channel, msg.hdr, -+ msg.size); -+ } -+ -+cleanup: -+ free_message(&msg, process); -+ if (ret) -+ DXG_TRACE("err: %d", ret); -+ return ret; -+} -+ -+int dxgvmb_send_wait_sync_object_cpu(struct dxgprocess *process, -+ struct dxgadapter *adapter, -+ struct -+ d3dkmt_waitforsynchronizationobjectfromcpu -+ *args, -+ u64 cpu_event) -+{ -+ int ret = -EINVAL; -+ struct dxgkvmb_command_waitforsyncobjectfromcpu *command; -+ u32 object_size = args->object_count * sizeof(struct d3dkmthandle); -+ u32 fence_size = args->object_count * sizeof(u64); -+ u8 *current_pos; -+ u32 cmd_size = sizeof(*command) + object_size + fence_size; -+ struct dxgvmbusmsg msg = {.hdr = NULL}; -+ -+ ret = init_message(&msg, adapter, process, cmd_size); -+ if (ret) -+ goto cleanup; -+ command = (void *)msg.msg; -+ -+ command_vgpu_to_host_init2(&command->hdr, -+ DXGK_VMBCOMMAND_WAITFORSYNCOBJECTFROMCPU, -+ process->host_handle); -+ command->device = args->device; -+ command->flags = args->flags; -+ command->object_count = args->object_count; -+ command->guest_event_pointer = (u64) cpu_event; -+ current_pos = (u8 *) &command[1]; -+ -+ ret = copy_from_user(current_pos, args->objects, object_size); -+ if (ret) { -+ DXG_ERR("failed to copy objects"); -+ ret = -EINVAL; -+ goto cleanup; -+ } -+ current_pos += object_size; -+ ret = copy_from_user(current_pos, args->fence_values, -+ fence_size); -+ if (ret) { -+ DXG_ERR("failed to copy fences"); -+ ret = -EINVAL; -+ goto cleanup; -+ } -+ -+ ret = dxgvmb_send_sync_msg_ntstatus(msg.channel, msg.hdr, msg.size); -+ -+cleanup: -+ free_message(&msg, process); -+ if (ret) -+ DXG_TRACE("err: %d", ret); -+ return ret; -+} -+ -+int dxgvmb_send_wait_sync_object_gpu(struct dxgprocess *process, -+ struct dxgadapter *adapter, -+ struct d3dkmthandle context, -+ u32 object_count, -+ struct d3dkmthandle *objects, -+ u64 *fences, -+ bool legacy_fence) -+{ -+ int ret; -+ struct dxgkvmb_command_waitforsyncobjectfromgpu *command; -+ u32 fence_size = object_count * sizeof(u64); -+ u32 object_size = object_count * sizeof(struct d3dkmthandle); -+ u8 *current_pos; -+ u32 cmd_size = object_size + fence_size - sizeof(u64) + -+ sizeof(struct dxgkvmb_command_waitforsyncobjectfromgpu); -+ struct dxgvmbusmsg msg = {.hdr = NULL}; -+ struct dxgglobal *dxgglobal = dxggbl(); -+ -+ if (object_count == 0 || object_count > D3DDDI_MAX_OBJECT_WAITED_ON) { -+ ret = -EINVAL; -+ goto cleanup; -+ } -+ ret = init_message(&msg, adapter, process, cmd_size); -+ if (ret) -+ goto cleanup; -+ command = (void *)msg.msg; -+ -+ command_vgpu_to_host_init2(&command->hdr, -+ DXGK_VMBCOMMAND_WAITFORSYNCOBJECTFROMGPU, -+ process->host_handle); -+ command->context = context; -+ command->object_count = object_count; -+ command->legacy_fence_object = legacy_fence; -+ current_pos = (u8 *) command->fence_values; -+ memcpy(current_pos, fences, fence_size); -+ current_pos += fence_size; -+ memcpy(current_pos, objects, object_size); -+ -+ if (dxgglobal->async_msg_enabled) { -+ command->hdr.async_msg = 1; -+ ret = dxgvmb_send_async_msg(msg.channel, msg.hdr, msg.size); -+ } else { -+ ret = dxgvmb_send_sync_msg_ntstatus(msg.channel, msg.hdr, -+ msg.size); -+ } -+ -+cleanup: -+ free_message(&msg, process); -+ if (ret) -+ DXG_TRACE("err: %d", ret); -+ return ret; -+} -+ - int dxgvmb_send_query_adapter_info(struct dxgprocess *process, - struct dxgadapter *adapter, - struct d3dkmt_queryadapterinfo *args) -diff --git a/drivers/hv/dxgkrnl/dxgvmbus.h b/drivers/hv/dxgkrnl/dxgvmbus.h -index 111111111111..222222222222 100644 ---- a/drivers/hv/dxgkrnl/dxgvmbus.h -+++ b/drivers/hv/dxgkrnl/dxgvmbus.h -@@ -165,6 +165,13 @@ struct dxgkvmb_command_host_to_vm { - enum dxgkvmb_commandtype_host_to_vm command_type; - }; - -+struct dxgkvmb_command_signalguestevent { -+ struct dxgkvmb_command_host_to_vm hdr; -+ u64 event; -+ u64 process_id; -+ bool dereference_event; -+}; -+ - /* Returns ntstatus */ - struct dxgkvmb_command_setiospaceregion { - struct dxgkvmb_command_vm_to_host hdr; -@@ -430,4 +437,45 @@ struct dxgkvmb_command_destroysyncobject { - struct d3dkmthandle sync_object; - }; - -+/* The command returns ntstatus */ -+struct dxgkvmb_command_signalsyncobject { -+ struct dxgkvmb_command_vgpu_to_host hdr; -+ u32 object_count; -+ struct d3dddicb_signalflags flags; -+ u32 context_count; -+ u64 fence_value; -+ union { -+ /* Pointer to the guest event object */ -+ u64 cpu_event_handle; -+ /* Non zero when signal from CPU is done */ -+ struct d3dkmthandle device; -+ }; -+ /* struct d3dkmthandle ObjectHandleArray[object_count] */ -+ /* struct d3dkmthandle ContextArray[context_count] */ -+ /* u64 MonitoredFenceValueArray[object_count] */ -+}; -+ -+/* The command returns ntstatus */ -+struct dxgkvmb_command_waitforsyncobjectfromcpu { -+ struct dxgkvmb_command_vgpu_to_host hdr; -+ struct d3dkmthandle device; -+ u32 object_count; -+ struct d3dddi_waitforsynchronizationobjectfromcpu_flags flags; -+ u64 guest_event_pointer; -+ bool dereference_event; -+ /* struct d3dkmthandle ObjectHandleArray[object_count] */ -+ /* u64 FenceValueArray [object_count] */ -+}; -+ -+/* The command returns ntstatus */ -+struct dxgkvmb_command_waitforsyncobjectfromgpu { -+ struct dxgkvmb_command_vgpu_to_host hdr; -+ struct d3dkmthandle context; -+ /* Must be 1 when bLegacyFenceObject is TRUE */ -+ u32 object_count; -+ bool legacy_fence_object; -+ u64 fence_values[1]; -+ /* struct d3dkmthandle ObjectHandles[object_count] */ -+}; -+ - #endif /* _DXGVMBUS_H */ -diff --git a/drivers/hv/dxgkrnl/ioctl.c b/drivers/hv/dxgkrnl/ioctl.c -index 111111111111..222222222222 100644 ---- a/drivers/hv/dxgkrnl/ioctl.c -+++ b/drivers/hv/dxgkrnl/ioctl.c -@@ -759,7 +759,7 @@ get_standard_alloc_priv_data(struct dxgdevice *device, - res_priv_data = vzalloc(res_priv_data_size); - if (res_priv_data == NULL) { - ret = -ENOMEM; -- dev_err(DXGDEV, -+ DXG_ERR( - "failed to alloc memory for res priv data: %d", - res_priv_data_size); - goto cleanup; -@@ -1065,7 +1065,7 @@ dxgkio_create_allocation(struct dxgprocess *process, void *__user inargs) - alloc_info[i].priv_drv_data, - priv_data_size); - if (ret) { -- dev_err(DXGDEV, -+ DXG_ERR( - "failed to copy priv data"); - ret = -EFAULT; - goto cleanup; -@@ -1348,8 +1348,10 @@ dxgkio_create_sync_object(struct dxgprocess *process, void *__user inargs) - struct d3dkmt_createsynchronizationobject2 args; - struct dxgdevice *device = NULL; - struct dxgadapter *adapter = NULL; -+ struct eventfd_ctx *event = NULL; - struct dxgsyncobject *syncobj = NULL; - bool device_lock_acquired = false; -+ struct dxghosteventcpu *host_event = NULL; - - ret = copy_from_user(&args, inargs, sizeof(args)); - if (ret) { -@@ -1384,6 +1386,27 @@ dxgkio_create_sync_object(struct dxgprocess *process, void *__user inargs) - goto cleanup; - } - -+ if (args.info.type == _D3DDDI_CPU_NOTIFICATION) { -+ event = eventfd_ctx_fdget((int) -+ args.info.cpu_notification.event); -+ if (IS_ERR(event)) { -+ DXG_ERR("failed to reference the event"); -+ event = NULL; -+ ret = -EINVAL; -+ goto cleanup; -+ } -+ host_event = syncobj->host_event; -+ host_event->hdr.event_id = dxgglobal_new_host_event_id(); -+ host_event->cpu_event = event; -+ host_event->remove_from_list = false; -+ host_event->destroy_after_signal = false; -+ host_event->hdr.event_type = dxghostevent_cpu_event; -+ dxgglobal_add_host_event(&host_event->hdr); -+ args.info.cpu_notification.event = host_event->hdr.event_id; -+ DXG_TRACE("creating CPU notification event: %lld", -+ args.info.cpu_notification.event); -+ } -+ - ret = dxgvmb_send_create_sync_object(process, adapter, &args, syncobj); - if (ret < 0) - goto cleanup; -@@ -1411,7 +1434,10 @@ dxgkio_create_sync_object(struct dxgprocess *process, void *__user inargs) - if (args.sync_object.v) - dxgvmb_send_destroy_sync_object(process, - args.sync_object); -+ event = NULL; - } -+ if (event) -+ eventfd_ctx_put(event); - } - if (adapter) - dxgadapter_release_lock_shared(adapter); -@@ -1467,6 +1493,659 @@ dxgkio_destroy_sync_object(struct dxgprocess *process, void *__user inargs) - return ret; - } - -+static int -+dxgkio_signal_sync_object(struct dxgprocess *process, void *__user inargs) -+{ -+ struct d3dkmt_signalsynchronizationobject2 args; -+ struct d3dkmt_signalsynchronizationobject2 *__user in_args = inargs; -+ struct dxgdevice *device = NULL; -+ struct dxgadapter *adapter = NULL; -+ int ret; -+ u32 fence_count = 1; -+ struct eventfd_ctx *event = NULL; -+ struct dxghosteventcpu *host_event = NULL; -+ bool host_event_added = false; -+ u64 host_event_id = 0; -+ -+ ret = copy_from_user(&args, inargs, sizeof(args)); -+ if (ret) { -+ DXG_ERR("failed to copy input args"); -+ ret = -EINVAL; -+ goto cleanup; -+ } -+ -+ if (args.context_count >= D3DDDI_MAX_BROADCAST_CONTEXT || -+ args.object_count > D3DDDI_MAX_OBJECT_SIGNALED) { -+ ret = -EINVAL; -+ goto cleanup; -+ } -+ -+ if (args.flags.enqueue_cpu_event) { -+ host_event = kzalloc(sizeof(*host_event), GFP_KERNEL); -+ if (host_event == NULL) { -+ ret = -ENOMEM; -+ goto cleanup; -+ } -+ host_event->process = process; -+ event = eventfd_ctx_fdget((int)args.cpu_event_handle); -+ if (IS_ERR(event)) { -+ DXG_ERR("failed to reference the event"); -+ event = NULL; -+ ret = -EINVAL; -+ goto cleanup; -+ } -+ fence_count = 0; -+ host_event->cpu_event = event; -+ host_event_id = dxgglobal_new_host_event_id(); -+ host_event->hdr.event_type = dxghostevent_cpu_event; -+ host_event->hdr.event_id = host_event_id; -+ host_event->remove_from_list = true; -+ host_event->destroy_after_signal = true; -+ dxgglobal_add_host_event(&host_event->hdr); -+ host_event_added = true; -+ } -+ -+ device = dxgprocess_device_by_object_handle(process, -+ HMGRENTRY_TYPE_DXGCONTEXT, -+ args.context); -+ if (device == NULL) { -+ ret = -EINVAL; -+ goto cleanup; -+ } -+ -+ adapter = device->adapter; -+ ret = dxgadapter_acquire_lock_shared(adapter); -+ if (ret < 0) { -+ adapter = NULL; -+ goto cleanup; -+ } -+ -+ ret = dxgvmb_send_signal_sync_object(process, adapter, -+ args.flags, args.fence.fence_value, -+ args.context, args.object_count, -+ in_args->object_array, -+ args.context_count, -+ in_args->contexts, fence_count, -+ NULL, (void *)host_event_id, -+ zerohandle); -+ -+ /* -+ * When the send operation succeeds, the host event will be destroyed -+ * after signal from the host -+ */ -+ -+cleanup: -+ -+ if (ret < 0) { -+ if (host_event_added) { -+ /* The event might be signaled and destroyed by host */ -+ host_event = (struct dxghosteventcpu *) -+ dxgglobal_get_host_event(host_event_id); -+ if (host_event) { -+ eventfd_ctx_put(event); -+ event = NULL; -+ kfree(host_event); -+ host_event = NULL; -+ } -+ } -+ if (event) -+ eventfd_ctx_put(event); -+ if (host_event) -+ kfree(host_event); -+ } -+ if (adapter) -+ dxgadapter_release_lock_shared(adapter); -+ if (device) -+ kref_put(&device->device_kref, dxgdevice_release); -+ -+ DXG_TRACE("ioctl:%s %d", errorstr(ret), ret); -+ return ret; -+} -+ -+static int -+dxgkio_signal_sync_object_cpu(struct dxgprocess *process, void *__user inargs) -+{ -+ struct d3dkmt_signalsynchronizationobjectfromcpu args; -+ struct dxgdevice *device = NULL; -+ struct dxgadapter *adapter = NULL; -+ int ret; -+ -+ ret = copy_from_user(&args, inargs, sizeof(args)); -+ if (ret) { -+ DXG_ERR("failed to copy input args"); -+ ret = -EINVAL; -+ goto cleanup; -+ } -+ if (args.object_count == 0 || -+ args.object_count > D3DDDI_MAX_OBJECT_SIGNALED) { -+ DXG_TRACE("Too many syncobjects : %d", args.object_count); -+ ret = -EINVAL; -+ goto cleanup; -+ } -+ -+ device = dxgprocess_device_by_handle(process, args.device); -+ if (device == NULL) { -+ ret = -EINVAL; -+ goto cleanup; -+ } -+ -+ adapter = device->adapter; -+ ret = dxgadapter_acquire_lock_shared(adapter); -+ if (ret < 0) { -+ adapter = NULL; -+ goto cleanup; -+ } -+ -+ ret = dxgvmb_send_signal_sync_object(process, adapter, -+ args.flags, 0, zerohandle, -+ args.object_count, args.objects, 0, -+ NULL, args.object_count, -+ args.fence_values, NULL, -+ args.device); -+ -+cleanup: -+ -+ if (adapter) -+ dxgadapter_release_lock_shared(adapter); -+ if (device) -+ kref_put(&device->device_kref, dxgdevice_release); -+ -+ DXG_TRACE("ioctl:%s %d", errorstr(ret), ret); -+ return ret; -+} -+ -+static int -+dxgkio_signal_sync_object_gpu(struct dxgprocess *process, void *__user inargs) -+{ -+ struct d3dkmt_signalsynchronizationobjectfromgpu args; -+ struct d3dkmt_signalsynchronizationobjectfromgpu *__user user_args = -+ inargs; -+ struct dxgdevice *device = NULL; -+ struct dxgadapter *adapter = NULL; -+ struct d3dddicb_signalflags flags = { }; -+ int ret; -+ -+ ret = copy_from_user(&args, inargs, sizeof(args)); -+ if (ret) { -+ DXG_ERR("failed to copy input args"); -+ ret = -EINVAL; -+ goto cleanup; -+ } -+ -+ if (args.object_count == 0 || -+ args.object_count > DXG_MAX_VM_BUS_PACKET_SIZE) { -+ ret = -EINVAL; -+ goto cleanup; -+ } -+ -+ device = dxgprocess_device_by_object_handle(process, -+ HMGRENTRY_TYPE_DXGCONTEXT, -+ args.context); -+ if (device == NULL) { -+ ret = -EINVAL; -+ goto cleanup; -+ } -+ -+ adapter = device->adapter; -+ ret = dxgadapter_acquire_lock_shared(adapter); -+ if (ret < 0) { -+ adapter = NULL; -+ goto cleanup; -+ } -+ -+ ret = dxgvmb_send_signal_sync_object(process, adapter, -+ flags, 0, zerohandle, -+ args.object_count, -+ args.objects, 1, -+ &user_args->context, -+ args.object_count, -+ args.monitored_fence_values, NULL, -+ zerohandle); -+ -+cleanup: -+ -+ if (adapter) -+ dxgadapter_release_lock_shared(adapter); -+ if (device) -+ kref_put(&device->device_kref, dxgdevice_release); -+ -+ DXG_TRACE("ioctl:%s %d", errorstr(ret), ret); -+ return ret; -+} -+ -+static int -+dxgkio_signal_sync_object_gpu2(struct dxgprocess *process, void *__user inargs) -+{ -+ struct d3dkmt_signalsynchronizationobjectfromgpu2 args; -+ struct dxgdevice *device = NULL; -+ struct dxgadapter *adapter = NULL; -+ struct d3dkmthandle context_handle; -+ struct eventfd_ctx *event = NULL; -+ u64 *fences = NULL; -+ u32 fence_count = 0; -+ int ret; -+ struct dxghosteventcpu *host_event = NULL; -+ bool host_event_added = false; -+ u64 host_event_id = 0; -+ -+ ret = copy_from_user(&args, inargs, sizeof(args)); -+ if (ret) { -+ DXG_ERR("failed to copy input args"); -+ ret = -EINVAL; -+ goto cleanup; -+ } -+ -+ if (args.flags.enqueue_cpu_event) { -+ if (args.object_count != 0 || args.cpu_event_handle == 0) { -+ DXG_ERR("Bad input in EnqueueCpuEvent: %d %lld", -+ args.object_count, args.cpu_event_handle); -+ ret = -EINVAL; -+ goto cleanup; -+ } -+ } else if (args.object_count == 0 || -+ args.object_count > DXG_MAX_VM_BUS_PACKET_SIZE || -+ args.context_count == 0 || -+ args.context_count > DXG_MAX_VM_BUS_PACKET_SIZE) { -+ DXG_ERR("Invalid input: %d %d", -+ args.object_count, args.context_count); -+ ret = -EINVAL; -+ goto cleanup; -+ } -+ -+ ret = copy_from_user(&context_handle, args.contexts, -+ sizeof(struct d3dkmthandle)); -+ if (ret) { -+ DXG_ERR("failed to copy context handle"); -+ ret = -EINVAL; -+ goto cleanup; -+ } -+ -+ if (args.flags.enqueue_cpu_event) { -+ host_event = kzalloc(sizeof(*host_event), GFP_KERNEL); -+ if (host_event == NULL) { -+ ret = -ENOMEM; -+ goto cleanup; -+ } -+ host_event->process = process; -+ event = eventfd_ctx_fdget((int)args.cpu_event_handle); -+ if (IS_ERR(event)) { -+ DXG_ERR("failed to reference the event"); -+ event = NULL; -+ ret = -EINVAL; -+ goto cleanup; -+ } -+ fence_count = 0; -+ host_event->cpu_event = event; -+ host_event_id = dxgglobal_new_host_event_id(); -+ host_event->hdr.event_id = host_event_id; -+ host_event->hdr.event_type = dxghostevent_cpu_event; -+ host_event->remove_from_list = true; -+ host_event->destroy_after_signal = true; -+ dxgglobal_add_host_event(&host_event->hdr); -+ host_event_added = true; -+ } else { -+ fences = args.monitored_fence_values; -+ fence_count = args.object_count; -+ } -+ -+ device = dxgprocess_device_by_object_handle(process, -+ HMGRENTRY_TYPE_DXGCONTEXT, -+ context_handle); -+ if (device == NULL) { -+ ret = -EINVAL; -+ goto cleanup; -+ } -+ -+ adapter = device->adapter; -+ ret = dxgadapter_acquire_lock_shared(adapter); -+ if (ret < 0) { -+ adapter = NULL; -+ goto cleanup; -+ } -+ -+ ret = dxgvmb_send_signal_sync_object(process, adapter, -+ args.flags, 0, zerohandle, -+ args.object_count, args.objects, -+ args.context_count, args.contexts, -+ fence_count, fences, -+ (void *)host_event_id, zerohandle); -+ -+cleanup: -+ -+ if (ret < 0) { -+ if (host_event_added) { -+ /* The event might be signaled and destroyed by host */ -+ host_event = (struct dxghosteventcpu *) -+ dxgglobal_get_host_event(host_event_id); -+ if (host_event) { -+ eventfd_ctx_put(event); -+ event = NULL; -+ kfree(host_event); -+ host_event = NULL; -+ } -+ } -+ if (event) -+ eventfd_ctx_put(event); -+ if (host_event) -+ kfree(host_event); -+ } -+ if (adapter) -+ dxgadapter_release_lock_shared(adapter); -+ if (device) -+ kref_put(&device->device_kref, dxgdevice_release); -+ -+ DXG_TRACE("ioctl:%s %d", errorstr(ret), ret); -+ return ret; -+} -+ -+static int -+dxgkio_wait_sync_object(struct dxgprocess *process, void *__user inargs) -+{ -+ struct d3dkmt_waitforsynchronizationobject2 args; -+ struct dxgdevice *device = NULL; -+ struct dxgadapter *adapter = NULL; -+ int ret; -+ -+ ret = copy_from_user(&args, inargs, sizeof(args)); -+ if (ret) { -+ DXG_ERR("failed to copy input args"); -+ ret = -EINVAL; -+ goto cleanup; -+ } -+ -+ if (args.object_count > D3DDDI_MAX_OBJECT_WAITED_ON || -+ args.object_count == 0) { -+ ret = -EINVAL; -+ goto cleanup; -+ } -+ -+ device = dxgprocess_device_by_object_handle(process, -+ HMGRENTRY_TYPE_DXGCONTEXT, -+ args.context); -+ if (device == NULL) { -+ ret = -EINVAL; -+ goto cleanup; -+ } -+ -+ adapter = device->adapter; -+ ret = dxgadapter_acquire_lock_shared(adapter); -+ if (ret < 0) { -+ adapter = NULL; -+ goto cleanup; -+ } -+ -+ DXG_TRACE("Fence value: %lld", args.fence.fence_value); -+ ret = dxgvmb_send_wait_sync_object_gpu(process, adapter, -+ args.context, args.object_count, -+ args.object_array, -+ &args.fence.fence_value, true); -+ -+cleanup: -+ -+ if (adapter) -+ dxgadapter_release_lock_shared(adapter); -+ if (device) -+ kref_put(&device->device_kref, dxgdevice_release); -+ -+ DXG_TRACE("ioctl:%s %d", errorstr(ret), ret); -+ return ret; -+} -+ -+static int -+dxgkio_wait_sync_object_cpu(struct dxgprocess *process, void *__user inargs) -+{ -+ struct d3dkmt_waitforsynchronizationobjectfromcpu args; -+ struct dxgdevice *device = NULL; -+ struct dxgadapter *adapter = NULL; -+ struct eventfd_ctx *event = NULL; -+ struct dxghosteventcpu host_event = { }; -+ struct dxghosteventcpu *async_host_event = NULL; -+ struct completion local_event = { }; -+ u64 event_id = 0; -+ int ret; -+ bool host_event_added = false; -+ -+ ret = copy_from_user(&args, inargs, sizeof(args)); -+ if (ret) { -+ DXG_ERR("failed to copy input args"); -+ ret = -EINVAL; -+ goto cleanup; -+ } -+ -+ if (args.object_count > DXG_MAX_VM_BUS_PACKET_SIZE || -+ args.object_count == 0) { -+ ret = -EINVAL; -+ goto cleanup; -+ } -+ -+ if (args.async_event) { -+ async_host_event = kzalloc(sizeof(*async_host_event), -+ GFP_KERNEL); -+ if (async_host_event == NULL) { -+ ret = -EINVAL; -+ goto cleanup; -+ } -+ async_host_event->process = process; -+ event = eventfd_ctx_fdget((int)args.async_event); -+ if (IS_ERR(event)) { -+ DXG_ERR("failed to reference the event"); -+ event = NULL; -+ ret = -EINVAL; -+ goto cleanup; -+ } -+ async_host_event->cpu_event = event; -+ async_host_event->hdr.event_id = dxgglobal_new_host_event_id(); -+ async_host_event->destroy_after_signal = true; -+ async_host_event->hdr.event_type = dxghostevent_cpu_event; -+ dxgglobal_add_host_event(&async_host_event->hdr); -+ event_id = async_host_event->hdr.event_id; -+ host_event_added = true; -+ } else { -+ init_completion(&local_event); -+ host_event.completion_event = &local_event; -+ host_event.hdr.event_id = dxgglobal_new_host_event_id(); -+ host_event.hdr.event_type = dxghostevent_cpu_event; -+ dxgglobal_add_host_event(&host_event.hdr); -+ event_id = host_event.hdr.event_id; -+ } -+ -+ device = dxgprocess_device_by_handle(process, args.device); -+ if (device == NULL) { -+ ret = -EINVAL; -+ goto cleanup; -+ } -+ -+ adapter = device->adapter; -+ ret = dxgadapter_acquire_lock_shared(adapter); -+ if (ret < 0) { -+ adapter = NULL; -+ goto cleanup; -+ } -+ -+ ret = dxgvmb_send_wait_sync_object_cpu(process, adapter, -+ &args, event_id); -+ if (ret < 0) -+ goto cleanup; -+ -+ if (args.async_event == 0) { -+ dxgadapter_release_lock_shared(adapter); -+ adapter = NULL; -+ ret = wait_for_completion_interruptible(&local_event); -+ if (ret) { -+ DXG_ERR("wait_completion_interruptible: %d", -+ ret); -+ ret = -ERESTARTSYS; -+ } -+ } -+ -+cleanup: -+ -+ if (adapter) -+ dxgadapter_release_lock_shared(adapter); -+ if (device) -+ kref_put(&device->device_kref, dxgdevice_release); -+ if (host_event.hdr.event_id) -+ dxgglobal_remove_host_event(&host_event.hdr); -+ if (ret < 0) { -+ if (host_event_added) { -+ async_host_event = (struct dxghosteventcpu *) -+ dxgglobal_get_host_event(event_id); -+ if (async_host_event) { -+ if (async_host_event->hdr.event_type == -+ dxghostevent_cpu_event) { -+ eventfd_ctx_put(event); -+ event = NULL; -+ kfree(async_host_event); -+ async_host_event = NULL; -+ } else { -+ DXG_ERR("Invalid event type"); -+ DXGKRNL_ASSERT(0); -+ } -+ } -+ } -+ if (event) -+ eventfd_ctx_put(event); -+ if (async_host_event) -+ kfree(async_host_event); -+ } -+ -+ DXG_TRACE("ioctl:%s %d", errorstr(ret), ret); -+ return ret; -+} -+ -+static int -+dxgkio_wait_sync_object_gpu(struct dxgprocess *process, void *__user inargs) -+{ -+ struct d3dkmt_waitforsynchronizationobjectfromgpu args; -+ struct dxgcontext *context = NULL; -+ struct d3dkmthandle device_handle = {}; -+ struct dxgdevice *device = NULL; -+ struct dxgadapter *adapter = NULL; -+ struct dxgsyncobject *syncobj = NULL; -+ struct d3dkmthandle *objects = NULL; -+ u32 object_size; -+ u64 *fences = NULL; -+ int ret; -+ enum hmgrentry_type syncobj_type = HMGRENTRY_TYPE_FREE; -+ bool monitored_fence = false; -+ -+ ret = copy_from_user(&args, inargs, sizeof(args)); -+ if (ret) { -+ DXG_ERR("failed to copy input args"); -+ ret = -EINVAL; -+ goto cleanup; -+ } -+ -+ if (args.object_count > DXG_MAX_VM_BUS_PACKET_SIZE || -+ args.object_count == 0) { -+ DXG_ERR("Invalid object count: %d", args.object_count); -+ ret = -EINVAL; -+ goto cleanup; -+ } -+ -+ object_size = sizeof(struct d3dkmthandle) * args.object_count; -+ objects = vzalloc(object_size); -+ if (objects == NULL) { -+ ret = -ENOMEM; -+ goto cleanup; -+ } -+ ret = copy_from_user(objects, args.objects, object_size); -+ if (ret) { -+ DXG_ERR("failed to copy objects"); -+ ret = -EINVAL; -+ goto cleanup; -+ } -+ -+ hmgrtable_lock(&process->handle_table, DXGLOCK_SHARED); -+ context = hmgrtable_get_object_by_type(&process->handle_table, -+ HMGRENTRY_TYPE_DXGCONTEXT, -+ args.context); -+ if (context) { -+ device_handle = context->device_handle; -+ syncobj_type = -+ hmgrtable_get_object_type(&process->handle_table, -+ objects[0]); -+ } -+ if (device_handle.v == 0) { -+ DXG_ERR("Invalid context handle: %x", args.context.v); -+ ret = -EINVAL; -+ } else { -+ if (syncobj_type == HMGRENTRY_TYPE_MONITOREDFENCE) { -+ monitored_fence = true; -+ } else if (syncobj_type == HMGRENTRY_TYPE_DXGSYNCOBJECT) { -+ syncobj = -+ hmgrtable_get_object_by_type(&process->handle_table, -+ HMGRENTRY_TYPE_DXGSYNCOBJECT, -+ objects[0]); -+ if (syncobj == NULL) { -+ DXG_ERR("Invalid syncobj: %x", -+ objects[0].v); -+ ret = -EINVAL; -+ } else { -+ monitored_fence = syncobj->monitored_fence; -+ } -+ } else { -+ DXG_ERR("Invalid syncobj type: %x", -+ objects[0].v); -+ ret = -EINVAL; -+ } -+ } -+ hmgrtable_unlock(&process->handle_table, DXGLOCK_SHARED); -+ -+ if (ret < 0) -+ goto cleanup; -+ -+ if (monitored_fence) { -+ object_size = sizeof(u64) * args.object_count; -+ fences = vzalloc(object_size); -+ if (fences == NULL) { -+ ret = -ENOMEM; -+ goto cleanup; -+ } -+ ret = copy_from_user(fences, args.monitored_fence_values, -+ object_size); -+ if (ret) { -+ DXG_ERR("failed to copy fences"); -+ ret = -EINVAL; -+ goto cleanup; -+ } -+ } else { -+ fences = &args.fence_value; -+ } -+ -+ device = dxgprocess_device_by_handle(process, device_handle); -+ if (device == NULL) { -+ ret = -EINVAL; -+ goto cleanup; -+ } -+ -+ adapter = device->adapter; -+ ret = dxgadapter_acquire_lock_shared(adapter); -+ if (ret < 0) { -+ adapter = NULL; -+ goto cleanup; -+ } -+ -+ ret = dxgvmb_send_wait_sync_object_gpu(process, adapter, -+ args.context, args.object_count, -+ objects, fences, -+ !monitored_fence); -+ -+cleanup: -+ -+ if (adapter) -+ dxgadapter_release_lock_shared(adapter); -+ if (device) -+ kref_put(&device->device_kref, dxgdevice_release); -+ if (objects) -+ vfree(objects); -+ if (fences && fences != &args.fence_value) -+ vfree(fences); -+ -+ DXG_TRACE("ioctl:%s %d", errorstr(ret), ret); -+ return ret; -+} -+ - static struct ioctl_desc ioctls[] = { - /* 0x00 */ {}, - /* 0x01 */ {dxgkio_open_adapter_from_luid, LX_DXOPENADAPTERFROMLUID}, -@@ -1485,8 +2164,8 @@ static struct ioctl_desc ioctls[] = { - /* 0x0e */ {}, - /* 0x0f */ {}, - /* 0x10 */ {dxgkio_create_sync_object, LX_DXCREATESYNCHRONIZATIONOBJECT}, --/* 0x11 */ {}, --/* 0x12 */ {}, -+/* 0x11 */ {dxgkio_signal_sync_object, LX_DXSIGNALSYNCHRONIZATIONOBJECT}, -+/* 0x12 */ {dxgkio_wait_sync_object, LX_DXWAITFORSYNCHRONIZATIONOBJECT}, - /* 0x13 */ {dxgkio_destroy_allocation, LX_DXDESTROYALLOCATION2}, - /* 0x14 */ {dxgkio_enum_adapters, LX_DXENUMADAPTERS2}, - /* 0x15 */ {dxgkio_close_adapter, LX_DXCLOSEADAPTER}, -@@ -1517,17 +2196,22 @@ static struct ioctl_desc ioctls[] = { - /* 0x2e */ {}, - /* 0x2f */ {}, - /* 0x30 */ {}, --/* 0x31 */ {}, --/* 0x32 */ {}, --/* 0x33 */ {}, -+/* 0x31 */ {dxgkio_signal_sync_object_cpu, -+ LX_DXSIGNALSYNCHRONIZATIONOBJECTFROMCPU}, -+/* 0x32 */ {dxgkio_signal_sync_object_gpu, -+ LX_DXSIGNALSYNCHRONIZATIONOBJECTFROMGPU}, -+/* 0x33 */ {dxgkio_signal_sync_object_gpu2, -+ LX_DXSIGNALSYNCHRONIZATIONOBJECTFROMGPU2}, - /* 0x34 */ {}, - /* 0x35 */ {}, - /* 0x36 */ {}, - /* 0x37 */ {}, - /* 0x38 */ {}, - /* 0x39 */ {}, --/* 0x3a */ {}, --/* 0x3b */ {}, -+/* 0x3a */ {dxgkio_wait_sync_object_cpu, -+ LX_DXWAITFORSYNCHRONIZATIONOBJECTFROMCPU}, -+/* 0x3b */ {dxgkio_wait_sync_object_gpu, -+ LX_DXWAITFORSYNCHRONIZATIONOBJECTFROMGPU}, - /* 0x3c */ {}, - /* 0x3d */ {}, - /* 0x3e */ {dxgkio_enum_adapters3, LX_DXENUMADAPTERS3}, -diff --git a/drivers/hv/dxgkrnl/misc.h b/drivers/hv/dxgkrnl/misc.h -index 111111111111..222222222222 100644 ---- a/drivers/hv/dxgkrnl/misc.h -+++ b/drivers/hv/dxgkrnl/misc.h -@@ -25,6 +25,8 @@ extern const struct d3dkmthandle zerohandle; - * The locks here are in the order from lowest to highest. - * When a lower lock is held, the higher lock should not be acquired. - * -+ * device_list_mutex -+ * host_event_list_mutex - * channel_lock (VMBus channel lock) - * fd_mutex - * plistmutex (process list mutex) -diff --git a/include/uapi/misc/d3dkmthk.h b/include/uapi/misc/d3dkmthk.h -index 111111111111..222222222222 100644 ---- a/include/uapi/misc/d3dkmthk.h -+++ b/include/uapi/misc/d3dkmthk.h -@@ -60,6 +60,9 @@ struct winluid { - - #define D3DKMT_CREATEALLOCATION_MAX 1024 - #define D3DKMT_ADAPTERS_MAX 64 -+#define D3DDDI_MAX_BROADCAST_CONTEXT 64 -+#define D3DDDI_MAX_OBJECT_WAITED_ON 32 -+#define D3DDDI_MAX_OBJECT_SIGNALED 32 - - struct d3dkmt_adapterinfo { - struct d3dkmthandle adapter_handle; -@@ -343,6 +346,148 @@ struct d3dkmt_createsynchronizationobject2 { - __u32 reserved1; - }; - -+struct d3dkmt_waitforsynchronizationobject2 { -+ struct d3dkmthandle context; -+ __u32 object_count; -+ struct d3dkmthandle object_array[D3DDDI_MAX_OBJECT_WAITED_ON]; -+ union { -+ struct { -+ __u64 fence_value; -+ } fence; -+ __u64 reserved[8]; -+ }; -+}; -+ -+struct d3dddicb_signalflags { -+ union { -+ struct { -+ __u32 signal_at_submission:1; -+ __u32 enqueue_cpu_event:1; -+ __u32 allow_fence_rewind:1; -+ __u32 reserved:28; -+ __u32 DXGK_SIGNAL_FLAG_INTERNAL0:1; -+ }; -+ __u32 value; -+ }; -+}; -+ -+struct d3dkmt_signalsynchronizationobject2 { -+ struct d3dkmthandle context; -+ __u32 object_count; -+ struct d3dkmthandle object_array[D3DDDI_MAX_OBJECT_SIGNALED]; -+ struct d3dddicb_signalflags flags; -+ __u32 context_count; -+ struct d3dkmthandle contexts[D3DDDI_MAX_BROADCAST_CONTEXT]; -+ union { -+ struct { -+ __u64 fence_value; -+ } fence; -+ __u64 cpu_event_handle; -+ __u64 reserved[8]; -+ }; -+}; -+ -+struct d3dddi_waitforsynchronizationobjectfromcpu_flags { -+ union { -+ struct { -+ __u32 wait_any:1; -+ __u32 reserved:31; -+ }; -+ __u32 value; -+ }; -+}; -+ -+struct d3dkmt_waitforsynchronizationobjectfromcpu { -+ struct d3dkmthandle device; -+ __u32 object_count; -+#ifdef __KERNEL__ -+ struct d3dkmthandle *objects; -+ __u64 *fence_values; -+#else -+ __u64 objects; -+ __u64 fence_values; -+#endif -+ __u64 async_event; -+ struct d3dddi_waitforsynchronizationobjectfromcpu_flags flags; -+}; -+ -+struct d3dkmt_signalsynchronizationobjectfromcpu { -+ struct d3dkmthandle device; -+ __u32 object_count; -+#ifdef __KERNEL__ -+ struct d3dkmthandle *objects; -+ __u64 *fence_values; -+#else -+ __u64 objects; -+ __u64 fence_values; -+#endif -+ struct d3dddicb_signalflags flags; -+}; -+ -+struct d3dkmt_waitforsynchronizationobjectfromgpu { -+ struct d3dkmthandle context; -+ __u32 object_count; -+#ifdef __KERNEL__ -+ struct d3dkmthandle *objects; -+#else -+ __u64 objects; -+#endif -+ union { -+#ifdef __KERNEL__ -+ __u64 *monitored_fence_values; -+#else -+ __u64 monitored_fence_values; -+#endif -+ __u64 fence_value; -+ __u64 reserved[8]; -+ }; -+}; -+ -+struct d3dkmt_signalsynchronizationobjectfromgpu { -+ struct d3dkmthandle context; -+ __u32 object_count; -+#ifdef __KERNEL__ -+ struct d3dkmthandle *objects; -+#else -+ __u64 objects; -+#endif -+ union { -+#ifdef __KERNEL__ -+ __u64 *monitored_fence_values; -+#else -+ __u64 monitored_fence_values; -+#endif -+ __u64 reserved[8]; -+ }; -+}; -+ -+struct d3dkmt_signalsynchronizationobjectfromgpu2 { -+ __u32 object_count; -+ __u32 reserved1; -+#ifdef __KERNEL__ -+ struct d3dkmthandle *objects; -+#else -+ __u64 objects; -+#endif -+ struct d3dddicb_signalflags flags; -+ __u32 context_count; -+#ifdef __KERNEL__ -+ struct d3dkmthandle *contexts; -+#else -+ __u64 contexts; -+#endif -+ union { -+ __u64 fence_value; -+ __u64 cpu_event_handle; -+#ifdef __KERNEL__ -+ __u64 *monitored_fence_values; -+#else -+ __u64 monitored_fence_values; -+#endif -+ __u64 reserved[8]; -+ }; -+}; -+ - struct d3dkmt_destroysynchronizationobject { - struct d3dkmthandle sync_object; - }; -@@ -576,6 +721,10 @@ struct d3dkmt_enumadapters3 { - _IOWR(0x47, 0x09, struct d3dkmt_queryadapterinfo) - #define LX_DXCREATESYNCHRONIZATIONOBJECT \ - _IOWR(0x47, 0x10, struct d3dkmt_createsynchronizationobject2) -+#define LX_DXSIGNALSYNCHRONIZATIONOBJECT \ -+ _IOWR(0x47, 0x11, struct d3dkmt_signalsynchronizationobject2) -+#define LX_DXWAITFORSYNCHRONIZATIONOBJECT \ -+ _IOWR(0x47, 0x12, struct d3dkmt_waitforsynchronizationobject2) - #define LX_DXDESTROYALLOCATION2 \ - _IOWR(0x47, 0x13, struct d3dkmt_destroyallocation2) - #define LX_DXENUMADAPTERS2 \ -@@ -586,6 +735,16 @@ struct d3dkmt_enumadapters3 { - _IOWR(0x47, 0x19, struct d3dkmt_destroydevice) - #define LX_DXDESTROYSYNCHRONIZATIONOBJECT \ - _IOWR(0x47, 0x1d, struct d3dkmt_destroysynchronizationobject) -+#define LX_DXSIGNALSYNCHRONIZATIONOBJECTFROMCPU \ -+ _IOWR(0x47, 0x31, struct d3dkmt_signalsynchronizationobjectfromcpu) -+#define LX_DXSIGNALSYNCHRONIZATIONOBJECTFROMGPU \ -+ _IOWR(0x47, 0x32, struct d3dkmt_signalsynchronizationobjectfromgpu) -+#define LX_DXSIGNALSYNCHRONIZATIONOBJECTFROMGPU2 \ -+ _IOWR(0x47, 0x33, struct d3dkmt_signalsynchronizationobjectfromgpu2) -+#define LX_DXWAITFORSYNCHRONIZATIONOBJECTFROMCPU \ -+ _IOWR(0x47, 0x3a, struct d3dkmt_waitforsynchronizationobjectfromcpu) -+#define LX_DXWAITFORSYNCHRONIZATIONOBJECTFROMGPU \ -+ _IOWR(0x47, 0x3b, struct d3dkmt_waitforsynchronizationobjectfromgpu) - #define LX_DXENUMADAPTERS3 \ - _IOWR(0x47, 0x3e, struct d3dkmt_enumadapters3) - --- -Armbian - diff --git a/patch/kernel/archive/wsl2-arm64-6.1/1679-drivers-hv-dxgkrnl-Sharing-of-dxgresource-objects.patch b/patch/kernel/archive/wsl2-arm64-6.1/1679-drivers-hv-dxgkrnl-Sharing-of-dxgresource-objects.patch deleted file mode 100644 index e777f4880d2e..000000000000 --- a/patch/kernel/archive/wsl2-arm64-6.1/1679-drivers-hv-dxgkrnl-Sharing-of-dxgresource-objects.patch +++ /dev/null @@ -1,1464 +0,0 @@ -From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001 -From: Iouri Tarassov -Date: Mon, 31 Jan 2022 17:52:31 -0800 -Subject: drivers: hv: dxgkrnl: Sharing of dxgresource objects - -Implement creation of shared resources and ioctls for sharing -dxgresource objects between processes in the virtual machine. - -A dxgresource object is a collection of dxgallocation objects. -The driver API allows addition/removal of allocations to a resource, -but has limitations on addition/removal of allocations to a shared -resource. When a resource is "sealed", addition/removal of allocations -is not allowed. - -Resources are shared using file descriptor (FD) handles. The name -"NT handle" is used to be compatible with Windows implementation. - -An FD handle is created by the LX_DXSHAREOBJECTS ioctl. The given FD -handle could be sent to another process using any Linux API. - -To use a shared resource object in other ioctls the object needs to be -opened using its FD handle. An resource object is opened by the -LX_DXOPENRESOURCEFROMNTHANDLE ioctl. This ioctl returns a d3dkmthandle -value, which can be used to reference the resource object. - -The LX_DXQUERYRESOURCEINFOFROMNTHANDLE ioctl is used to query private -driver data of a shared resource object. This private data needs to be -used to actually open the object using the LX_DXOPENRESOURCEFROMNTHANDLE -ioctl. - -Signed-off-by: Iouri Tarassov -[kms: Forward port to v6.1] -Signed-off-by: Kelsey Steele ---- - drivers/hv/dxgkrnl/dxgadapter.c | 81 + - drivers/hv/dxgkrnl/dxgkrnl.h | 77 + - drivers/hv/dxgkrnl/dxgmodule.c | 1 + - drivers/hv/dxgkrnl/dxgvmbus.c | 127 ++ - drivers/hv/dxgkrnl/dxgvmbus.h | 30 + - drivers/hv/dxgkrnl/ioctl.c | 792 +++++++++- - include/uapi/misc/d3dkmthk.h | 96 ++ - 7 files changed, 1200 insertions(+), 4 deletions(-) - -diff --git a/drivers/hv/dxgkrnl/dxgadapter.c b/drivers/hv/dxgkrnl/dxgadapter.c -index 111111111111..222222222222 100644 ---- a/drivers/hv/dxgkrnl/dxgadapter.c -+++ b/drivers/hv/dxgkrnl/dxgadapter.c -@@ -160,6 +160,17 @@ void dxgadapter_remove_process(struct dxgprocess_adapter *process_info) - list_del(&process_info->adapter_process_list_entry); - } - -+void dxgadapter_remove_shared_resource(struct dxgadapter *adapter, -+ struct dxgsharedresource *object) -+{ -+ down_write(&adapter->shared_resource_list_lock); -+ if (object->shared_resource_list_entry.next) { -+ list_del(&object->shared_resource_list_entry); -+ object->shared_resource_list_entry.next = NULL; -+ } -+ up_write(&adapter->shared_resource_list_lock); -+} -+ - void dxgadapter_add_syncobj(struct dxgadapter *adapter, - struct dxgsyncobject *object) - { -@@ -489,6 +500,69 @@ void dxgdevice_remove_resource(struct dxgdevice *device, - } - } - -+struct dxgsharedresource *dxgsharedresource_create(struct dxgadapter *adapter) -+{ -+ struct dxgsharedresource *resource; -+ -+ resource = kzalloc(sizeof(*resource), GFP_KERNEL); -+ if (resource) { -+ INIT_LIST_HEAD(&resource->resource_list_head); -+ kref_init(&resource->sresource_kref); -+ mutex_init(&resource->fd_mutex); -+ resource->adapter = adapter; -+ } -+ return resource; -+} -+ -+void dxgsharedresource_destroy(struct kref *refcount) -+{ -+ struct dxgsharedresource *resource; -+ -+ resource = container_of(refcount, struct dxgsharedresource, -+ sresource_kref); -+ if (resource->runtime_private_data) -+ vfree(resource->runtime_private_data); -+ if (resource->resource_private_data) -+ vfree(resource->resource_private_data); -+ if (resource->alloc_private_data_sizes) -+ vfree(resource->alloc_private_data_sizes); -+ if (resource->alloc_private_data) -+ vfree(resource->alloc_private_data); -+ kfree(resource); -+} -+ -+void dxgsharedresource_add_resource(struct dxgsharedresource *shared_resource, -+ struct dxgresource *resource) -+{ -+ down_write(&shared_resource->adapter->shared_resource_list_lock); -+ DXG_TRACE("Adding resource: %p %p", shared_resource, resource); -+ list_add_tail(&resource->shared_resource_list_entry, -+ &shared_resource->resource_list_head); -+ kref_get(&shared_resource->sresource_kref); -+ kref_get(&resource->resource_kref); -+ resource->shared_owner = shared_resource; -+ up_write(&shared_resource->adapter->shared_resource_list_lock); -+} -+ -+void dxgsharedresource_remove_resource(struct dxgsharedresource -+ *shared_resource, -+ struct dxgresource *resource) -+{ -+ struct dxgadapter *adapter = shared_resource->adapter; -+ -+ down_write(&adapter->shared_resource_list_lock); -+ DXG_TRACE("Removing resource: %p %p", shared_resource, resource); -+ if (resource->shared_resource_list_entry.next) { -+ list_del(&resource->shared_resource_list_entry); -+ resource->shared_resource_list_entry.next = NULL; -+ kref_put(&shared_resource->sresource_kref, -+ dxgsharedresource_destroy); -+ resource->shared_owner = NULL; -+ kref_put(&resource->resource_kref, dxgresource_release); -+ } -+ up_write(&adapter->shared_resource_list_lock); -+} -+ - struct dxgresource *dxgresource_create(struct dxgdevice *device) - { - struct dxgresource *resource; -@@ -532,6 +606,7 @@ void dxgresource_destroy(struct dxgresource *resource) - struct d3dkmt_destroyallocation2 args = { }; - int destroyed = test_and_set_bit(0, &resource->flags); - struct dxgdevice *device = resource->device; -+ struct dxgsharedresource *shared_resource; - - if (!destroyed) { - dxgresource_free_handle(resource); -@@ -547,6 +622,12 @@ void dxgresource_destroy(struct dxgresource *resource) - dxgallocation_destroy(alloc); - } - dxgdevice_remove_resource(device, resource); -+ shared_resource = resource->shared_owner; -+ if (shared_resource) { -+ dxgsharedresource_remove_resource(shared_resource, -+ resource); -+ resource->shared_owner = NULL; -+ } - } - kref_put(&resource->resource_kref, dxgresource_release); - } -diff --git a/drivers/hv/dxgkrnl/dxgkrnl.h b/drivers/hv/dxgkrnl/dxgkrnl.h -index 111111111111..222222222222 100644 ---- a/drivers/hv/dxgkrnl/dxgkrnl.h -+++ b/drivers/hv/dxgkrnl/dxgkrnl.h -@@ -38,6 +38,7 @@ struct dxgdevice; - struct dxgcontext; - struct dxgallocation; - struct dxgresource; -+struct dxgsharedresource; - struct dxgsyncobject; - - /* -@@ -372,6 +373,8 @@ struct dxgadapter { - struct list_head adapter_list_entry; - /* The list of dxgprocess_adapter entries */ - struct list_head adapter_process_list_head; -+ /* List of all dxgsharedresource objects */ -+ struct list_head shared_resource_list_head; - /* List of all non-device dxgsyncobject objects */ - struct list_head syncobj_list_head; - /* This lock protects shared resource and syncobject lists */ -@@ -405,6 +408,8 @@ void dxgadapter_remove_syncobj(struct dxgsyncobject *so); - void dxgadapter_add_process(struct dxgadapter *adapter, - struct dxgprocess_adapter *process_info); - void dxgadapter_remove_process(struct dxgprocess_adapter *process_info); -+void dxgadapter_remove_shared_resource(struct dxgadapter *adapter, -+ struct dxgsharedresource *object); - - /* - * The object represent the device object. -@@ -484,6 +489,64 @@ void dxgcontext_destroy_safe(struct dxgprocess *pr, struct dxgcontext *ctx); - void dxgcontext_release(struct kref *refcount); - bool dxgcontext_is_active(struct dxgcontext *ctx); - -+/* -+ * A shared resource object is created to track the list of dxgresource objects, -+ * which are opened for the same underlying shared resource. -+ * Objects are shared by using a file descriptor handle. -+ * FD is created by calling dxgk_share_objects and providing shandle to -+ * dxgsharedresource. The FD points to a dxgresource object, which is created -+ * by calling dxgk_open_resource_nt. dxgresource object is referenced by the -+ * FD. -+ * -+ * The object is referenced by every dxgresource in its list. -+ * -+ */ -+struct dxgsharedresource { -+ /* Every dxgresource object in the resource list takes a reference */ -+ struct kref sresource_kref; -+ struct dxgadapter *adapter; -+ /* List of dxgresource objects, opened for the shared resource. */ -+ /* Protected by dxgadapter::shared_resource_list_lock */ -+ struct list_head resource_list_head; -+ /* Entry in the list of dxgsharedresource in dxgadapter */ -+ /* Protected by dxgadapter::shared_resource_list_lock */ -+ struct list_head shared_resource_list_entry; -+ struct mutex fd_mutex; -+ /* Referenced by file descriptors */ -+ int host_shared_handle_nt_reference; -+ /* Corresponding global handle in the host */ -+ struct d3dkmthandle host_shared_handle; -+ /* -+ * When the sync object is shared by NT handle, this is the -+ * corresponding handle in the host -+ */ -+ struct d3dkmthandle host_shared_handle_nt; -+ /* Values below are computed when the resource is sealed */ -+ u32 runtime_private_data_size; -+ u32 alloc_private_data_size; -+ u32 resource_private_data_size; -+ u32 allocation_count; -+ union { -+ struct { -+ /* Cannot add new allocations */ -+ u32 sealed:1; -+ u32 reserved:31; -+ }; -+ long flags; -+ }; -+ u32 *alloc_private_data_sizes; -+ u8 *alloc_private_data; -+ u8 *runtime_private_data; -+ u8 *resource_private_data; -+}; -+ -+struct dxgsharedresource *dxgsharedresource_create(struct dxgadapter *adapter); -+void dxgsharedresource_destroy(struct kref *refcount); -+void dxgsharedresource_add_resource(struct dxgsharedresource *sres, -+ struct dxgresource *res); -+void dxgsharedresource_remove_resource(struct dxgsharedresource *sres, -+ struct dxgresource *res); -+ - struct dxgresource { - struct kref resource_kref; - enum dxgobjectstate object_state; -@@ -504,6 +567,8 @@ struct dxgresource { - }; - long flags; - }; -+ /* Owner of the shared resource */ -+ struct dxgsharedresource *shared_owner; - }; - - struct dxgresource *dxgresource_create(struct dxgdevice *dev); -@@ -658,6 +723,18 @@ int dxgvmb_send_wait_sync_object_cpu(struct dxgprocess *process, - int dxgvmb_send_query_adapter_info(struct dxgprocess *process, - struct dxgadapter *adapter, - struct d3dkmt_queryadapterinfo *args); -+int dxgvmb_send_create_nt_shared_object(struct dxgprocess *process, -+ struct d3dkmthandle object, -+ struct d3dkmthandle *shared_handle); -+int dxgvmb_send_destroy_nt_shared_object(struct d3dkmthandle shared_handle); -+int dxgvmb_send_open_resource(struct dxgprocess *process, -+ struct dxgadapter *adapter, -+ struct d3dkmthandle device, -+ struct d3dkmthandle global_share, -+ u32 allocation_count, -+ u32 total_priv_drv_data_size, -+ struct d3dkmthandle *resource_handle, -+ struct d3dkmthandle *alloc_handles); - int dxgvmb_send_get_stdalloc_data(struct dxgdevice *device, - enum d3dkmdt_standardallocationtype t, - struct d3dkmdt_gdisurfacedata *data, -diff --git a/drivers/hv/dxgkrnl/dxgmodule.c b/drivers/hv/dxgkrnl/dxgmodule.c -index 111111111111..222222222222 100644 ---- a/drivers/hv/dxgkrnl/dxgmodule.c -+++ b/drivers/hv/dxgkrnl/dxgmodule.c -@@ -258,6 +258,7 @@ int dxgglobal_create_adapter(struct pci_dev *dev, guid_t *guid, - init_rwsem(&adapter->core_lock); - - INIT_LIST_HEAD(&adapter->adapter_process_list_head); -+ INIT_LIST_HEAD(&adapter->shared_resource_list_head); - INIT_LIST_HEAD(&adapter->syncobj_list_head); - init_rwsem(&adapter->shared_resource_list_lock); - adapter->pci_dev = dev; -diff --git a/drivers/hv/dxgkrnl/dxgvmbus.c b/drivers/hv/dxgkrnl/dxgvmbus.c -index 111111111111..222222222222 100644 ---- a/drivers/hv/dxgkrnl/dxgvmbus.c -+++ b/drivers/hv/dxgkrnl/dxgvmbus.c -@@ -712,6 +712,79 @@ int dxgvmb_send_destroy_process(struct d3dkmthandle process) - return ret; - } - -+int dxgvmb_send_create_nt_shared_object(struct dxgprocess *process, -+ struct d3dkmthandle object, -+ struct d3dkmthandle *shared_handle) -+{ -+ struct dxgkvmb_command_createntsharedobject *command; -+ int ret; -+ struct dxgvmbusmsg msg; -+ -+ ret = init_message(&msg, NULL, process, sizeof(*command)); -+ if (ret) -+ return ret; -+ command = (void *)msg.msg; -+ -+ command_vm_to_host_init2(&command->hdr, -+ DXGK_VMBCOMMAND_CREATENTSHAREDOBJECT, -+ process->host_handle); -+ command->object = object; -+ -+ ret = dxgglobal_acquire_channel_lock(); -+ if (ret < 0) -+ goto cleanup; -+ -+ ret = dxgvmb_send_sync_msg(dxgglobal_get_dxgvmbuschannel(), -+ msg.hdr, msg.size, shared_handle, -+ sizeof(*shared_handle)); -+ -+ dxgglobal_release_channel_lock(); -+ -+ if (ret < 0) -+ goto cleanup; -+ if (shared_handle->v == 0) { -+ DXG_ERR("failed to create NT shared object"); -+ ret = -ENOTRECOVERABLE; -+ } -+ -+cleanup: -+ free_message(&msg, process); -+ if (ret) -+ DXG_TRACE("err: %d", ret); -+ return ret; -+} -+ -+int dxgvmb_send_destroy_nt_shared_object(struct d3dkmthandle shared_handle) -+{ -+ struct dxgkvmb_command_destroyntsharedobject *command; -+ int ret; -+ struct dxgvmbusmsg msg; -+ -+ ret = init_message(&msg, NULL, NULL, sizeof(*command)); -+ if (ret) -+ return ret; -+ command = (void *)msg.msg; -+ -+ command_vm_to_host_init1(&command->hdr, -+ DXGK_VMBCOMMAND_DESTROYNTSHAREDOBJECT); -+ command->shared_handle = shared_handle; -+ -+ ret = dxgglobal_acquire_channel_lock(); -+ if (ret < 0) -+ goto cleanup; -+ -+ ret = dxgvmb_send_sync_msg_ntstatus(dxgglobal_get_dxgvmbuschannel(), -+ msg.hdr, msg.size); -+ -+ dxgglobal_release_channel_lock(); -+ -+cleanup: -+ free_message(&msg, NULL); -+ if (ret) -+ DXG_TRACE("err: %d", ret); -+ return ret; -+} -+ - int dxgvmb_send_destroy_sync_object(struct dxgprocess *process, - struct d3dkmthandle sync_object) - { -@@ -1552,6 +1625,60 @@ int dxgvmb_send_destroy_allocation(struct dxgprocess *process, - return ret; - } - -+int dxgvmb_send_open_resource(struct dxgprocess *process, -+ struct dxgadapter *adapter, -+ struct d3dkmthandle device, -+ struct d3dkmthandle global_share, -+ u32 allocation_count, -+ u32 total_priv_drv_data_size, -+ struct d3dkmthandle *resource_handle, -+ struct d3dkmthandle *alloc_handles) -+{ -+ struct dxgkvmb_command_openresource *command; -+ struct dxgkvmb_command_openresource_return *result; -+ struct d3dkmthandle *handles; -+ int ret; -+ int i; -+ u32 result_size = allocation_count * sizeof(struct d3dkmthandle) + -+ sizeof(*result); -+ struct dxgvmbusmsgres msg = {.hdr = NULL}; -+ -+ ret = init_message_res(&msg, adapter, process, sizeof(*command), -+ result_size); -+ if (ret) -+ goto cleanup; -+ command = msg.msg; -+ result = msg.res; -+ -+ command_vgpu_to_host_init2(&command->hdr, DXGK_VMBCOMMAND_OPENRESOURCE, -+ process->host_handle); -+ command->device = device; -+ command->nt_security_sharing = 1; -+ command->global_share = global_share; -+ command->allocation_count = allocation_count; -+ command->total_priv_drv_data_size = total_priv_drv_data_size; -+ -+ ret = dxgvmb_send_sync_msg(msg.channel, msg.hdr, msg.size, -+ result, msg.res_size); -+ if (ret < 0) -+ goto cleanup; -+ -+ ret = ntstatus2int(result->status); -+ if (ret < 0) -+ goto cleanup; -+ -+ *resource_handle = result->resource; -+ handles = (struct d3dkmthandle *) &result[1]; -+ for (i = 0; i < allocation_count; i++) -+ alloc_handles[i] = handles[i]; -+ -+cleanup: -+ free_message((struct dxgvmbusmsg *)&msg, process); -+ if (ret) -+ DXG_TRACE("err: %d", ret); -+ return ret; -+} -+ - int dxgvmb_send_get_stdalloc_data(struct dxgdevice *device, - enum d3dkmdt_standardallocationtype alloctype, - struct d3dkmdt_gdisurfacedata *alloc_data, -diff --git a/drivers/hv/dxgkrnl/dxgvmbus.h b/drivers/hv/dxgkrnl/dxgvmbus.h -index 111111111111..222222222222 100644 ---- a/drivers/hv/dxgkrnl/dxgvmbus.h -+++ b/drivers/hv/dxgkrnl/dxgvmbus.h -@@ -172,6 +172,21 @@ struct dxgkvmb_command_signalguestevent { - bool dereference_event; - }; - -+/* -+ * The command returns struct d3dkmthandle of a shared object for the -+ * given pre-process object -+ */ -+struct dxgkvmb_command_createntsharedobject { -+ struct dxgkvmb_command_vm_to_host hdr; -+ struct d3dkmthandle object; -+}; -+ -+/* The command returns ntstatus */ -+struct dxgkvmb_command_destroyntsharedobject { -+ struct dxgkvmb_command_vm_to_host hdr; -+ struct d3dkmthandle shared_handle; -+}; -+ - /* Returns ntstatus */ - struct dxgkvmb_command_setiospaceregion { - struct dxgkvmb_command_vm_to_host hdr; -@@ -305,6 +320,21 @@ struct dxgkvmb_command_createallocation { - /* u8 priv_drv_data[] for each alloc_info */ - }; - -+struct dxgkvmb_command_openresource { -+ struct dxgkvmb_command_vgpu_to_host hdr; -+ struct d3dkmthandle device; -+ bool nt_security_sharing; -+ struct d3dkmthandle global_share; -+ u32 allocation_count; -+ u32 total_priv_drv_data_size; -+}; -+ -+struct dxgkvmb_command_openresource_return { -+ struct d3dkmthandle resource; -+ struct ntstatus status; -+/* struct d3dkmthandle allocation[allocation_count]; */ -+}; -+ - struct dxgkvmb_command_getstandardallocprivdata { - struct dxgkvmb_command_vgpu_to_host hdr; - enum d3dkmdt_standardallocationtype alloc_type; -diff --git a/drivers/hv/dxgkrnl/ioctl.c b/drivers/hv/dxgkrnl/ioctl.c -index 111111111111..222222222222 100644 ---- a/drivers/hv/dxgkrnl/ioctl.c -+++ b/drivers/hv/dxgkrnl/ioctl.c -@@ -36,8 +36,35 @@ static char *errorstr(int ret) - } - #endif - -+static int dxgsharedresource_release(struct inode *inode, struct file *file) -+{ -+ struct dxgsharedresource *resource = file->private_data; -+ -+ DXG_TRACE("Release resource: %p", resource); -+ mutex_lock(&resource->fd_mutex); -+ kref_get(&resource->sresource_kref); -+ resource->host_shared_handle_nt_reference--; -+ if (resource->host_shared_handle_nt_reference == 0) { -+ if (resource->host_shared_handle_nt.v) { -+ dxgvmb_send_destroy_nt_shared_object( -+ resource->host_shared_handle_nt); -+ DXG_TRACE("Resource host_handle_nt destroyed: %x", -+ resource->host_shared_handle_nt.v); -+ resource->host_shared_handle_nt.v = 0; -+ } -+ kref_put(&resource->sresource_kref, dxgsharedresource_destroy); -+ } -+ mutex_unlock(&resource->fd_mutex); -+ kref_put(&resource->sresource_kref, dxgsharedresource_destroy); -+ return 0; -+} -+ -+static const struct file_operations dxg_resource_fops = { -+ .release = dxgsharedresource_release, -+}; -+ - static int dxgkio_open_adapter_from_luid(struct dxgprocess *process, -- void *__user inargs) -+ void *__user inargs) - { - struct d3dkmt_openadapterfromluid args; - int ret; -@@ -212,6 +239,98 @@ dxgkp_enum_adapters(struct dxgprocess *process, - return ret; - } - -+static int dxgsharedresource_seal(struct dxgsharedresource *shared_resource) -+{ -+ int ret = 0; -+ int i = 0; -+ u8 *private_data; -+ u32 data_size; -+ struct dxgresource *resource; -+ struct dxgallocation *alloc; -+ -+ DXG_TRACE("Sealing resource: %p", shared_resource); -+ -+ down_write(&shared_resource->adapter->shared_resource_list_lock); -+ if (shared_resource->sealed) { -+ DXG_TRACE("Resource already sealed"); -+ goto cleanup; -+ } -+ shared_resource->sealed = 1; -+ if (!list_empty(&shared_resource->resource_list_head)) { -+ resource = -+ list_first_entry(&shared_resource->resource_list_head, -+ struct dxgresource, -+ shared_resource_list_entry); -+ DXG_TRACE("First resource: %p", resource); -+ mutex_lock(&resource->resource_mutex); -+ list_for_each_entry(alloc, &resource->alloc_list_head, -+ alloc_list_entry) { -+ DXG_TRACE("Resource alloc: %p %d", alloc, -+ alloc->priv_drv_data->data_size); -+ shared_resource->allocation_count++; -+ shared_resource->alloc_private_data_size += -+ alloc->priv_drv_data->data_size; -+ if (shared_resource->alloc_private_data_size < -+ alloc->priv_drv_data->data_size) { -+ DXG_ERR("alloc private data overflow"); -+ ret = -EINVAL; -+ goto cleanup1; -+ } -+ } -+ if (shared_resource->alloc_private_data_size == 0) { -+ ret = -EINVAL; -+ goto cleanup1; -+ } -+ shared_resource->alloc_private_data = -+ vzalloc(shared_resource->alloc_private_data_size); -+ if (shared_resource->alloc_private_data == NULL) { -+ ret = -EINVAL; -+ goto cleanup1; -+ } -+ shared_resource->alloc_private_data_sizes = -+ vzalloc(sizeof(u32)*shared_resource->allocation_count); -+ if (shared_resource->alloc_private_data_sizes == NULL) { -+ ret = -EINVAL; -+ goto cleanup1; -+ } -+ private_data = shared_resource->alloc_private_data; -+ data_size = shared_resource->alloc_private_data_size; -+ i = 0; -+ list_for_each_entry(alloc, &resource->alloc_list_head, -+ alloc_list_entry) { -+ u32 alloc_data_size = alloc->priv_drv_data->data_size; -+ -+ if (alloc_data_size) { -+ if (data_size < alloc_data_size) { -+ dev_err(DXGDEV, -+ "Invalid private data size"); -+ ret = -EINVAL; -+ goto cleanup1; -+ } -+ shared_resource->alloc_private_data_sizes[i] = -+ alloc_data_size; -+ memcpy(private_data, -+ alloc->priv_drv_data->data, -+ alloc_data_size); -+ vfree(alloc->priv_drv_data); -+ alloc->priv_drv_data = NULL; -+ private_data += alloc_data_size; -+ data_size -= alloc_data_size; -+ } -+ i++; -+ } -+ if (data_size != 0) { -+ DXG_ERR("Data size mismatch"); -+ ret = -EINVAL; -+ } -+cleanup1: -+ mutex_unlock(&resource->resource_mutex); -+ } -+cleanup: -+ up_write(&shared_resource->adapter->shared_resource_list_lock); -+ return ret; -+} -+ - static int - dxgkio_enum_adapters(struct dxgprocess *process, void *__user inargs) - { -@@ -803,6 +922,7 @@ dxgkio_create_allocation(struct dxgprocess *process, void *__user inargs) - u32 alloc_info_size = 0; - struct dxgresource *resource = NULL; - struct dxgallocation **dxgalloc = NULL; -+ struct dxgsharedresource *shared_resource = NULL; - bool resource_mutex_acquired = false; - u32 standard_alloc_priv_data_size = 0; - void *standard_alloc_priv_data = NULL; -@@ -973,6 +1093,76 @@ dxgkio_create_allocation(struct dxgprocess *process, void *__user inargs) - } - resource->private_runtime_handle = - args.private_runtime_resource_handle; -+ if (args.flags.create_shared) { -+ if (!args.flags.nt_security_sharing) { -+ dev_err(DXGDEV, -+ "nt_security_sharing must be set"); -+ ret = -EINVAL; -+ goto cleanup; -+ } -+ shared_resource = dxgsharedresource_create(adapter); -+ if (shared_resource == NULL) { -+ ret = -ENOMEM; -+ goto cleanup; -+ } -+ shared_resource->runtime_private_data_size = -+ args.priv_drv_data_size; -+ shared_resource->resource_private_data_size = -+ args.priv_drv_data_size; -+ -+ shared_resource->runtime_private_data_size = -+ args.private_runtime_data_size; -+ shared_resource->resource_private_data_size = -+ args.priv_drv_data_size; -+ dxgsharedresource_add_resource(shared_resource, -+ resource); -+ if (args.flags.standard_allocation) { -+ shared_resource->resource_private_data = -+ res_priv_data; -+ shared_resource->resource_private_data_size = -+ res_priv_data_size; -+ res_priv_data = NULL; -+ } -+ if (args.private_runtime_data_size) { -+ shared_resource->runtime_private_data = -+ vzalloc(args.private_runtime_data_size); -+ if (shared_resource->runtime_private_data == -+ NULL) { -+ ret = -ENOMEM; -+ goto cleanup; -+ } -+ ret = copy_from_user( -+ shared_resource->runtime_private_data, -+ args.private_runtime_data, -+ args.private_runtime_data_size); -+ if (ret) { -+ dev_err(DXGDEV, -+ "failed to copy runtime data"); -+ ret = -EINVAL; -+ goto cleanup; -+ } -+ } -+ if (args.priv_drv_data_size && -+ !args.flags.standard_allocation) { -+ shared_resource->resource_private_data = -+ vzalloc(args.priv_drv_data_size); -+ if (shared_resource->resource_private_data == -+ NULL) { -+ ret = -ENOMEM; -+ goto cleanup; -+ } -+ ret = copy_from_user( -+ shared_resource->resource_private_data, -+ args.priv_drv_data, -+ args.priv_drv_data_size); -+ if (ret) { -+ dev_err(DXGDEV, -+ "failed to copy res data"); -+ ret = -EINVAL; -+ goto cleanup; -+ } -+ } -+ } - } else { - if (args.resource.v) { - /* Adding new allocations to the given resource */ -@@ -991,6 +1181,12 @@ dxgkio_create_allocation(struct dxgprocess *process, void *__user inargs) - ret = -EINVAL; - goto cleanup; - } -+ if (resource->shared_owner && -+ resource->shared_owner->sealed) { -+ DXG_ERR("Resource is sealed"); -+ ret = -EINVAL; -+ goto cleanup; -+ } - /* Synchronize with resource destruction */ - mutex_lock(&resource->resource_mutex); - if (!dxgresource_is_active(resource)) { -@@ -1092,9 +1288,16 @@ dxgkio_create_allocation(struct dxgprocess *process, void *__user inargs) - } - } - if (resource && args.flags.create_resource) { -+ if (shared_resource) { -+ dxgsharedresource_remove_resource -+ (shared_resource, resource); -+ } - dxgresource_destroy(resource); - } - } -+ if (shared_resource) -+ kref_put(&shared_resource->sresource_kref, -+ dxgsharedresource_destroy); - if (dxgalloc) - vfree(dxgalloc); - if (standard_alloc_priv_data) -@@ -1140,6 +1343,10 @@ static int validate_alloc(struct dxgallocation *alloc0, - fail_reason = 4; - goto cleanup; - } -+ if (alloc->owner.resource->shared_owner) { -+ fail_reason = 5; -+ goto cleanup; -+ } - } else { - if (alloc->owner.device != device) { - fail_reason = 6; -@@ -2146,6 +2353,582 @@ dxgkio_wait_sync_object_gpu(struct dxgprocess *process, void *__user inargs) - return ret; - } - -+static int -+dxgsharedresource_get_host_nt_handle(struct dxgsharedresource *resource, -+ struct dxgprocess *process, -+ struct d3dkmthandle objecthandle) -+{ -+ int ret = 0; -+ -+ mutex_lock(&resource->fd_mutex); -+ if (resource->host_shared_handle_nt_reference == 0) { -+ ret = dxgvmb_send_create_nt_shared_object(process, -+ objecthandle, -+ &resource->host_shared_handle_nt); -+ if (ret < 0) -+ goto cleanup; -+ DXG_TRACE("Resource host_shared_handle_ht: %x", -+ resource->host_shared_handle_nt.v); -+ kref_get(&resource->sresource_kref); -+ } -+ resource->host_shared_handle_nt_reference++; -+cleanup: -+ mutex_unlock(&resource->fd_mutex); -+ return ret; -+} -+ -+enum dxg_sharedobject_type { -+ DXG_SHARED_RESOURCE -+}; -+ -+static int get_object_fd(enum dxg_sharedobject_type type, -+ void *object, int *fdout) -+{ -+ struct file *file; -+ int fd; -+ -+ fd = get_unused_fd_flags(O_CLOEXEC); -+ if (fd < 0) { -+ DXG_ERR("get_unused_fd_flags failed: %x", fd); -+ return -ENOTRECOVERABLE; -+ } -+ -+ switch (type) { -+ case DXG_SHARED_RESOURCE: -+ file = anon_inode_getfile("dxgresource", -+ &dxg_resource_fops, object, 0); -+ break; -+ default: -+ return -EINVAL; -+ }; -+ if (IS_ERR(file)) { -+ DXG_ERR("anon_inode_getfile failed: %x", fd); -+ put_unused_fd(fd); -+ return -ENOTRECOVERABLE; -+ } -+ -+ fd_install(fd, file); -+ *fdout = fd; -+ return 0; -+} -+ -+static int -+dxgkio_share_objects(struct dxgprocess *process, void *__user inargs) -+{ -+ struct d3dkmt_shareobjects args; -+ enum hmgrentry_type object_type; -+ struct dxgsyncobject *syncobj = NULL; -+ struct dxgresource *resource = NULL; -+ struct dxgsharedresource *shared_resource = NULL; -+ struct d3dkmthandle *handles = NULL; -+ int object_fd = -1; -+ void *obj = NULL; -+ u32 handle_size; -+ int ret; -+ u64 tmp = 0; -+ -+ ret = copy_from_user(&args, inargs, sizeof(args)); -+ if (ret) { -+ DXG_ERR("failed to copy input args"); -+ ret = -EINVAL; -+ goto cleanup; -+ } -+ -+ if (args.object_count == 0 || args.object_count > 1) { -+ DXG_ERR("invalid object count %d", args.object_count); -+ ret = -EINVAL; -+ goto cleanup; -+ } -+ -+ handle_size = args.object_count * sizeof(struct d3dkmthandle); -+ -+ handles = vzalloc(handle_size); -+ if (handles == NULL) { -+ ret = -ENOMEM; -+ goto cleanup; -+ } -+ ret = copy_from_user(handles, args.objects, handle_size); -+ if (ret) { -+ DXG_ERR("failed to copy object handles"); -+ ret = -EINVAL; -+ goto cleanup; -+ } -+ -+ DXG_TRACE("Sharing handle: %x", handles[0].v); -+ -+ hmgrtable_lock(&process->handle_table, DXGLOCK_SHARED); -+ object_type = hmgrtable_get_object_type(&process->handle_table, -+ handles[0]); -+ obj = hmgrtable_get_object(&process->handle_table, handles[0]); -+ if (obj == NULL) { -+ DXG_ERR("invalid object handle %x", handles[0].v); -+ ret = -EINVAL; -+ } else { -+ switch (object_type) { -+ case HMGRENTRY_TYPE_DXGRESOURCE: -+ resource = obj; -+ if (resource->shared_owner) { -+ kref_get(&resource->resource_kref); -+ shared_resource = resource->shared_owner; -+ } else { -+ resource = NULL; -+ DXG_ERR("resource object shared"); -+ ret = -EINVAL; -+ } -+ break; -+ default: -+ DXG_ERR("invalid object type %d", object_type); -+ ret = -EINVAL; -+ break; -+ } -+ } -+ hmgrtable_unlock(&process->handle_table, DXGLOCK_SHARED); -+ -+ if (ret < 0) -+ goto cleanup; -+ -+ switch (object_type) { -+ case HMGRENTRY_TYPE_DXGRESOURCE: -+ ret = get_object_fd(DXG_SHARED_RESOURCE, shared_resource, -+ &object_fd); -+ if (ret < 0) { -+ DXG_ERR("get_object_fd failed for resource"); -+ goto cleanup; -+ } -+ ret = dxgsharedresource_get_host_nt_handle(shared_resource, -+ process, handles[0]); -+ if (ret < 0) { -+ DXG_ERR("get_host_res_nt_handle failed"); -+ goto cleanup; -+ } -+ ret = dxgsharedresource_seal(shared_resource); -+ if (ret < 0) { -+ DXG_ERR("dxgsharedresource_seal failed"); -+ goto cleanup; -+ } -+ break; -+ default: -+ ret = -EINVAL; -+ break; -+ } -+ -+ if (ret < 0) -+ goto cleanup; -+ -+ DXG_TRACE("Object FD: %x", object_fd); -+ -+ tmp = (u64) object_fd; -+ -+ ret = copy_to_user(args.shared_handle, &tmp, sizeof(u64)); -+ if (ret < 0) -+ DXG_ERR("failed to copy shared handle"); -+ -+cleanup: -+ if (ret < 0) { -+ if (object_fd >= 0) -+ put_unused_fd(object_fd); -+ } -+ -+ if (handles) -+ vfree(handles); -+ -+ if (syncobj) -+ kref_put(&syncobj->syncobj_kref, dxgsyncobject_release); -+ -+ if (resource) -+ kref_put(&resource->resource_kref, dxgresource_release); -+ -+ DXG_TRACE("ioctl:%s %d", errorstr(ret), ret); -+ return ret; -+} -+ -+static int -+dxgkio_query_resource_info_nt(struct dxgprocess *process, void *__user inargs) -+{ -+ struct d3dkmt_queryresourceinfofromnthandle args; -+ int ret; -+ struct dxgdevice *device = NULL; -+ struct dxgsharedresource *shared_resource = NULL; -+ struct file *file = NULL; -+ -+ ret = copy_from_user(&args, inargs, sizeof(args)); -+ if (ret) { -+ DXG_ERR("failed to copy input args"); -+ ret = -EINVAL; -+ goto cleanup; -+ } -+ -+ file = fget(args.nt_handle); -+ if (!file) { -+ DXG_ERR("failed to get file from handle: %llx", -+ args.nt_handle); -+ ret = -EINVAL; -+ goto cleanup; -+ } -+ -+ if (file->f_op != &dxg_resource_fops) { -+ DXG_ERR("invalid fd: %llx", args.nt_handle); -+ ret = -EINVAL; -+ goto cleanup; -+ } -+ -+ shared_resource = file->private_data; -+ if (shared_resource == NULL) { -+ DXG_ERR("invalid private data: %llx", args.nt_handle); -+ ret = -EINVAL; -+ goto cleanup; -+ } -+ -+ device = dxgprocess_device_by_handle(process, args.device); -+ if (device == NULL) { -+ ret = -EINVAL; -+ goto cleanup; -+ } -+ -+ ret = dxgdevice_acquire_lock_shared(device); -+ if (ret < 0) { -+ kref_put(&device->device_kref, dxgdevice_release); -+ device = NULL; -+ goto cleanup; -+ } -+ -+ ret = dxgsharedresource_seal(shared_resource); -+ if (ret < 0) -+ goto cleanup; -+ -+ args.private_runtime_data_size = -+ shared_resource->runtime_private_data_size; -+ args.resource_priv_drv_data_size = -+ shared_resource->resource_private_data_size; -+ args.allocation_count = shared_resource->allocation_count; -+ args.total_priv_drv_data_size = -+ shared_resource->alloc_private_data_size; -+ -+ ret = copy_to_user(inargs, &args, sizeof(args)); -+ if (ret) { -+ DXG_ERR("failed to copy output args"); -+ ret = -EINVAL; -+ } -+ -+cleanup: -+ -+ if (file) -+ fput(file); -+ if (device) -+ dxgdevice_release_lock_shared(device); -+ if (device) -+ kref_put(&device->device_kref, dxgdevice_release); -+ -+ DXG_TRACE("ioctl:%s %d", errorstr(ret), ret); -+ return ret; -+} -+ -+static int -+assign_resource_handles(struct dxgprocess *process, -+ struct dxgsharedresource *shared_resource, -+ struct d3dkmt_openresourcefromnthandle *args, -+ struct d3dkmthandle resource_handle, -+ struct dxgresource *resource, -+ struct dxgallocation **allocs, -+ struct d3dkmthandle *handles) -+{ -+ int ret; -+ int i; -+ u8 *cur_priv_data; -+ u32 total_priv_data_size = 0; -+ struct d3dddi_openallocationinfo2 open_alloc_info = { }; -+ -+ hmgrtable_lock(&process->handle_table, DXGLOCK_EXCL); -+ ret = hmgrtable_assign_handle(&process->handle_table, resource, -+ HMGRENTRY_TYPE_DXGRESOURCE, -+ resource_handle); -+ if (ret < 0) -+ goto cleanup; -+ resource->handle = resource_handle; -+ resource->handle_valid = 1; -+ cur_priv_data = args->total_priv_drv_data; -+ for (i = 0; i < args->allocation_count; i++) { -+ ret = hmgrtable_assign_handle(&process->handle_table, allocs[i], -+ HMGRENTRY_TYPE_DXGALLOCATION, -+ handles[i]); -+ if (ret < 0) -+ goto cleanup; -+ allocs[i]->alloc_handle = handles[i]; -+ allocs[i]->handle_valid = 1; -+ open_alloc_info.allocation = handles[i]; -+ if (shared_resource->alloc_private_data_sizes) -+ open_alloc_info.priv_drv_data_size = -+ shared_resource->alloc_private_data_sizes[i]; -+ else -+ open_alloc_info.priv_drv_data_size = 0; -+ -+ total_priv_data_size += open_alloc_info.priv_drv_data_size; -+ open_alloc_info.priv_drv_data = cur_priv_data; -+ cur_priv_data += open_alloc_info.priv_drv_data_size; -+ -+ ret = copy_to_user(&args->open_alloc_info[i], -+ &open_alloc_info, -+ sizeof(open_alloc_info)); -+ if (ret) { -+ DXG_ERR("failed to copy alloc info"); -+ ret = -EINVAL; -+ goto cleanup; -+ } -+ } -+ args->total_priv_drv_data_size = total_priv_data_size; -+cleanup: -+ hmgrtable_unlock(&process->handle_table, DXGLOCK_EXCL); -+ if (ret < 0) { -+ for (i = 0; i < args->allocation_count; i++) -+ dxgallocation_free_handle(allocs[i]); -+ dxgresource_free_handle(resource); -+ } -+ return ret; -+} -+ -+static int -+open_resource(struct dxgprocess *process, -+ struct d3dkmt_openresourcefromnthandle *args, -+ __user struct d3dkmthandle *res_out, -+ __user u32 *total_driver_data_size_out) -+{ -+ int ret = 0; -+ int i; -+ struct d3dkmthandle *alloc_handles = NULL; -+ int alloc_handles_size = sizeof(struct d3dkmthandle) * -+ args->allocation_count; -+ struct dxgsharedresource *shared_resource = NULL; -+ struct dxgresource *resource = NULL; -+ struct dxgallocation **allocs = NULL; -+ struct d3dkmthandle global_share = {}; -+ struct dxgdevice *device = NULL; -+ struct dxgadapter *adapter = NULL; -+ struct d3dkmthandle resource_handle = {}; -+ struct file *file = NULL; -+ -+ DXG_TRACE("Opening resource handle: %llx", args->nt_handle); -+ -+ file = fget(args->nt_handle); -+ if (!file) { -+ DXG_ERR("failed to get file from handle: %llx", -+ args->nt_handle); -+ ret = -EINVAL; -+ goto cleanup; -+ } -+ if (file->f_op != &dxg_resource_fops) { -+ DXG_ERR("invalid fd type: %llx", args->nt_handle); -+ ret = -EINVAL; -+ goto cleanup; -+ } -+ shared_resource = file->private_data; -+ if (shared_resource == NULL) { -+ DXG_ERR("invalid private data: %llx", args->nt_handle); -+ ret = -EINVAL; -+ goto cleanup; -+ } -+ if (kref_get_unless_zero(&shared_resource->sresource_kref) == 0) -+ shared_resource = NULL; -+ else -+ global_share = shared_resource->host_shared_handle_nt; -+ -+ if (shared_resource == NULL) { -+ DXG_ERR("Invalid shared resource handle: %x", -+ (u32)args->nt_handle); -+ ret = -EINVAL; -+ goto cleanup; -+ } -+ -+ DXG_TRACE("Shared resource: %p %x", shared_resource, -+ global_share.v); -+ -+ device = dxgprocess_device_by_handle(process, args->device); -+ if (device == NULL) { -+ ret = -EINVAL; -+ goto cleanup; -+ } -+ -+ ret = dxgdevice_acquire_lock_shared(device); -+ if (ret < 0) { -+ kref_put(&device->device_kref, dxgdevice_release); -+ device = NULL; -+ goto cleanup; -+ } -+ -+ adapter = device->adapter; -+ ret = dxgadapter_acquire_lock_shared(adapter); -+ if (ret < 0) { -+ adapter = NULL; -+ goto cleanup; -+ } -+ -+ ret = dxgsharedresource_seal(shared_resource); -+ if (ret < 0) -+ goto cleanup; -+ -+ if (args->allocation_count != shared_resource->allocation_count || -+ args->private_runtime_data_size < -+ shared_resource->runtime_private_data_size || -+ args->resource_priv_drv_data_size < -+ shared_resource->resource_private_data_size || -+ args->total_priv_drv_data_size < -+ shared_resource->alloc_private_data_size) { -+ ret = -EINVAL; -+ DXG_ERR("Invalid data sizes"); -+ goto cleanup; -+ } -+ -+ alloc_handles = vzalloc(alloc_handles_size); -+ if (alloc_handles == NULL) { -+ ret = -ENOMEM; -+ goto cleanup; -+ } -+ -+ allocs = vzalloc(sizeof(void *) * args->allocation_count); -+ if (allocs == NULL) { -+ ret = -ENOMEM; -+ goto cleanup; -+ } -+ -+ resource = dxgresource_create(device); -+ if (resource == NULL) { -+ ret = -ENOMEM; -+ goto cleanup; -+ } -+ dxgsharedresource_add_resource(shared_resource, resource); -+ -+ for (i = 0; i < args->allocation_count; i++) { -+ allocs[i] = dxgallocation_create(process); -+ if (allocs[i] == NULL) -+ goto cleanup; -+ ret = dxgresource_add_alloc(resource, allocs[i]); -+ if (ret < 0) -+ goto cleanup; -+ } -+ -+ ret = dxgvmb_send_open_resource(process, adapter, -+ device->handle, global_share, -+ args->allocation_count, -+ args->total_priv_drv_data_size, -+ &resource_handle, alloc_handles); -+ if (ret < 0) { -+ DXG_ERR("dxgvmb_send_open_resource failed"); -+ goto cleanup; -+ } -+ -+ if (shared_resource->runtime_private_data_size) { -+ ret = copy_to_user(args->private_runtime_data, -+ shared_resource->runtime_private_data, -+ shared_resource->runtime_private_data_size); -+ if (ret) { -+ DXG_ERR("failed to copy runtime data"); -+ ret = -EINVAL; -+ goto cleanup; -+ } -+ } -+ -+ if (shared_resource->resource_private_data_size) { -+ ret = copy_to_user(args->resource_priv_drv_data, -+ shared_resource->resource_private_data, -+ shared_resource->resource_private_data_size); -+ if (ret) { -+ DXG_ERR("failed to copy resource data"); -+ ret = -EINVAL; -+ goto cleanup; -+ } -+ } -+ -+ if (shared_resource->alloc_private_data_size) { -+ ret = copy_to_user(args->total_priv_drv_data, -+ shared_resource->alloc_private_data, -+ shared_resource->alloc_private_data_size); -+ if (ret) { -+ DXG_ERR("failed to copy alloc data"); -+ ret = -EINVAL; -+ goto cleanup; -+ } -+ } -+ -+ ret = assign_resource_handles(process, shared_resource, args, -+ resource_handle, resource, allocs, -+ alloc_handles); -+ if (ret < 0) -+ goto cleanup; -+ -+ ret = copy_to_user(res_out, &resource_handle, -+ sizeof(struct d3dkmthandle)); -+ if (ret) { -+ DXG_ERR("failed to copy resource handle to user"); -+ ret = -EINVAL; -+ goto cleanup; -+ } -+ -+ ret = copy_to_user(total_driver_data_size_out, -+ &args->total_priv_drv_data_size, sizeof(u32)); -+ if (ret) { -+ DXG_ERR("failed to copy total driver data size"); -+ ret = -EINVAL; -+ } -+ -+cleanup: -+ -+ if (ret < 0) { -+ if (resource_handle.v) { -+ struct d3dkmt_destroyallocation2 tmp = { }; -+ -+ tmp.flags.assume_not_in_use = 1; -+ tmp.device = args->device; -+ tmp.resource = resource_handle; -+ ret = dxgvmb_send_destroy_allocation(process, device, -+ &tmp, NULL); -+ } -+ if (resource) -+ dxgresource_destroy(resource); -+ } -+ -+ if (file) -+ fput(file); -+ if (allocs) -+ vfree(allocs); -+ if (shared_resource) -+ kref_put(&shared_resource->sresource_kref, -+ dxgsharedresource_destroy); -+ if (alloc_handles) -+ vfree(alloc_handles); -+ if (adapter) -+ dxgadapter_release_lock_shared(adapter); -+ if (device) -+ dxgdevice_release_lock_shared(device); -+ if (device) -+ kref_put(&device->device_kref, dxgdevice_release); -+ -+ return ret; -+} -+ -+static int -+dxgkio_open_resource_nt(struct dxgprocess *process, -+ void *__user inargs) -+{ -+ struct d3dkmt_openresourcefromnthandle args; -+ struct d3dkmt_openresourcefromnthandle *__user args_user = inargs; -+ int ret; -+ -+ ret = copy_from_user(&args, inargs, sizeof(args)); -+ if (ret) { -+ DXG_ERR("failed to copy input args"); -+ ret = -EINVAL; -+ goto cleanup; -+ } -+ -+ ret = open_resource(process, &args, -+ &args_user->resource, -+ &args_user->total_priv_drv_data_size); -+ -+cleanup: -+ -+ DXG_TRACE("ioctl:%s %d", errorstr(ret), ret); -+ return ret; -+} -+ - static struct ioctl_desc ioctls[] = { - /* 0x00 */ {}, - /* 0x01 */ {dxgkio_open_adapter_from_luid, LX_DXOPENADAPTERFROMLUID}, -@@ -2215,10 +2998,11 @@ static struct ioctl_desc ioctls[] = { - /* 0x3c */ {}, - /* 0x3d */ {}, - /* 0x3e */ {dxgkio_enum_adapters3, LX_DXENUMADAPTERS3}, --/* 0x3f */ {}, -+/* 0x3f */ {dxgkio_share_objects, LX_DXSHAREOBJECTS}, - /* 0x40 */ {}, --/* 0x41 */ {}, --/* 0x42 */ {}, -+/* 0x41 */ {dxgkio_query_resource_info_nt, -+ LX_DXQUERYRESOURCEINFOFROMNTHANDLE}, -+/* 0x42 */ {dxgkio_open_resource_nt, LX_DXOPENRESOURCEFROMNTHANDLE}, - /* 0x43 */ {}, - /* 0x44 */ {}, - /* 0x45 */ {}, -diff --git a/include/uapi/misc/d3dkmthk.h b/include/uapi/misc/d3dkmthk.h -index 111111111111..222222222222 100644 ---- a/include/uapi/misc/d3dkmthk.h -+++ b/include/uapi/misc/d3dkmthk.h -@@ -682,6 +682,94 @@ enum d3dkmt_deviceexecution_state { - _D3DKMT_DEVICEEXECUTION_ERROR_DMAPAGEFAULT = 7, - }; - -+struct d3dddi_openallocationinfo2 { -+ struct d3dkmthandle allocation; -+#ifdef __KERNEL__ -+ void *priv_drv_data; -+#else -+ __u64 priv_drv_data; -+#endif -+ __u32 priv_drv_data_size; -+ __u64 gpu_va; -+ __u64 reserved[6]; -+}; -+ -+struct d3dkmt_openresourcefromnthandle { -+ struct d3dkmthandle device; -+ __u32 reserved; -+ __u64 nt_handle; -+ __u32 allocation_count; -+ __u32 reserved1; -+#ifdef __KERNEL__ -+ struct d3dddi_openallocationinfo2 *open_alloc_info; -+#else -+ __u64 open_alloc_info; -+#endif -+ int private_runtime_data_size; -+ __u32 reserved2; -+#ifdef __KERNEL__ -+ void *private_runtime_data; -+#else -+ __u64 private_runtime_data; -+#endif -+ __u32 resource_priv_drv_data_size; -+ __u32 reserved3; -+#ifdef __KERNEL__ -+ void *resource_priv_drv_data; -+#else -+ __u64 resource_priv_drv_data; -+#endif -+ __u32 total_priv_drv_data_size; -+#ifdef __KERNEL__ -+ void *total_priv_drv_data; -+#else -+ __u64 total_priv_drv_data; -+#endif -+ struct d3dkmthandle resource; -+ struct d3dkmthandle keyed_mutex; -+#ifdef __KERNEL__ -+ void *keyed_mutex_private_data; -+#else -+ __u64 keyed_mutex_private_data; -+#endif -+ __u32 keyed_mutex_private_data_size; -+ struct d3dkmthandle sync_object; -+}; -+ -+struct d3dkmt_queryresourceinfofromnthandle { -+ struct d3dkmthandle device; -+ __u32 reserved; -+ __u64 nt_handle; -+#ifdef __KERNEL__ -+ void *private_runtime_data; -+#else -+ __u64 private_runtime_data; -+#endif -+ __u32 private_runtime_data_size; -+ __u32 total_priv_drv_data_size; -+ __u32 resource_priv_drv_data_size; -+ __u32 allocation_count; -+}; -+ -+struct d3dkmt_shareobjects { -+ __u32 object_count; -+ __u32 reserved; -+#ifdef __KERNEL__ -+ const struct d3dkmthandle *objects; -+ void *object_attr; /* security attributes */ -+#else -+ __u64 objects; -+ __u64 object_attr; -+#endif -+ __u32 desired_access; -+ __u32 reserved1; -+#ifdef __KERNEL__ -+ __u64 *shared_handle; /* output file descriptors */ -+#else -+ __u64 shared_handle; -+#endif -+}; -+ - union d3dkmt_enumadapters_filter { - struct { - __u64 include_compute_only:1; -@@ -747,5 +835,13 @@ struct d3dkmt_enumadapters3 { - _IOWR(0x47, 0x3b, struct d3dkmt_waitforsynchronizationobjectfromgpu) - #define LX_DXENUMADAPTERS3 \ - _IOWR(0x47, 0x3e, struct d3dkmt_enumadapters3) -+#define LX_DXSHAREOBJECTS \ -+ _IOWR(0x47, 0x3f, struct d3dkmt_shareobjects) -+#define LX_DXOPENSYNCOBJECTFROMNTHANDLE2 \ -+ _IOWR(0x47, 0x40, struct d3dkmt_opensyncobjectfromnthandle2) -+#define LX_DXQUERYRESOURCEINFOFROMNTHANDLE \ -+ _IOWR(0x47, 0x41, struct d3dkmt_queryresourceinfofromnthandle) -+#define LX_DXOPENRESOURCEFROMNTHANDLE \ -+ _IOWR(0x47, 0x42, struct d3dkmt_openresourcefromnthandle) - - #endif /* _D3DKMTHK_H */ --- -Armbian - diff --git a/patch/kernel/archive/wsl2-arm64-6.1/1680-drivers-hv-dxgkrnl-Sharing-of-sync-objects.patch b/patch/kernel/archive/wsl2-arm64-6.1/1680-drivers-hv-dxgkrnl-Sharing-of-sync-objects.patch deleted file mode 100644 index 8e9148855001..000000000000 --- a/patch/kernel/archive/wsl2-arm64-6.1/1680-drivers-hv-dxgkrnl-Sharing-of-sync-objects.patch +++ /dev/null @@ -1,1555 +0,0 @@ -From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001 -From: Iouri Tarassov -Date: Mon, 31 Jan 2022 16:41:28 -0800 -Subject: drivers: hv: dxgkrnl: Sharing of sync objects - -Implement creation of a shared sync objects and the ioctl for sharing -dxgsyncobject objects between processes in the virtual machine. - -Sync objects are shared using file descriptor (FD) handles. -The name "NT handle" is used to be compatible with Windows implementation. - -An FD handle is created by the LX_DXSHAREOBJECTS ioctl. The created FD -handle could be sent to another process using any Linux API. - -To use a shared sync object in other ioctls, the object needs to be -opened using its FD handle. A sync object is opened by the -LX_DXOPENSYNCOBJECTFROMNTHANDLE2 ioctl, which returns a d3dkmthandle -value. - -Signed-off-by: Iouri Tarassov -[kms: Forward port to v6.1] -Signed-off-by: Kelsey Steele ---- - drivers/hv/dxgkrnl/dxgadapter.c | 181 ++- - drivers/hv/dxgkrnl/dxgkrnl.h | 96 ++ - drivers/hv/dxgkrnl/dxgmodule.c | 1 + - drivers/hv/dxgkrnl/dxgprocess.c | 4 + - drivers/hv/dxgkrnl/dxgvmbus.c | 221 ++++ - drivers/hv/dxgkrnl/dxgvmbus.h | 35 + - drivers/hv/dxgkrnl/ioctl.c | 556 +++++++++- - include/uapi/misc/d3dkmthk.h | 93 ++ - 8 files changed, 1181 insertions(+), 6 deletions(-) - -diff --git a/drivers/hv/dxgkrnl/dxgadapter.c b/drivers/hv/dxgkrnl/dxgadapter.c -index 111111111111..222222222222 100644 ---- a/drivers/hv/dxgkrnl/dxgadapter.c -+++ b/drivers/hv/dxgkrnl/dxgadapter.c -@@ -171,6 +171,26 @@ void dxgadapter_remove_shared_resource(struct dxgadapter *adapter, - up_write(&adapter->shared_resource_list_lock); - } - -+void dxgadapter_add_shared_syncobj(struct dxgadapter *adapter, -+ struct dxgsharedsyncobject *object) -+{ -+ down_write(&adapter->shared_resource_list_lock); -+ list_add_tail(&object->adapter_shared_syncobj_list_entry, -+ &adapter->adapter_shared_syncobj_list_head); -+ up_write(&adapter->shared_resource_list_lock); -+} -+ -+void dxgadapter_remove_shared_syncobj(struct dxgadapter *adapter, -+ struct dxgsharedsyncobject *object) -+{ -+ down_write(&adapter->shared_resource_list_lock); -+ if (object->adapter_shared_syncobj_list_entry.next) { -+ list_del(&object->adapter_shared_syncobj_list_entry); -+ object->adapter_shared_syncobj_list_entry.next = NULL; -+ } -+ up_write(&adapter->shared_resource_list_lock); -+} -+ - void dxgadapter_add_syncobj(struct dxgadapter *adapter, - struct dxgsyncobject *object) - { -@@ -622,7 +642,7 @@ void dxgresource_destroy(struct dxgresource *resource) - dxgallocation_destroy(alloc); - } - dxgdevice_remove_resource(device, resource); -- shared_resource = resource->shared_owner; -+ shared_resource = resource->shared_owner; - if (shared_resource) { - dxgsharedresource_remove_resource(shared_resource, - resource); -@@ -736,6 +756,9 @@ struct dxgcontext *dxgcontext_create(struct dxgdevice *device) - */ - void dxgcontext_destroy(struct dxgprocess *process, struct dxgcontext *context) - { -+ struct dxghwqueue *hwqueue; -+ struct dxghwqueue *tmp; -+ - DXG_TRACE("Destroying context %p", context); - context->object_state = DXGOBJECTSTATE_DESTROYED; - if (context->device) { -@@ -747,6 +770,10 @@ void dxgcontext_destroy(struct dxgprocess *process, struct dxgcontext *context) - dxgdevice_remove_context(context->device, context); - kref_put(&context->device->device_kref, dxgdevice_release); - } -+ list_for_each_entry_safe(hwqueue, tmp, &context->hwqueue_list_head, -+ hwqueue_list_entry) { -+ dxghwqueue_destroy(process, hwqueue); -+ } - kref_put(&context->context_kref, dxgcontext_release); - } - -@@ -773,6 +800,38 @@ void dxgcontext_release(struct kref *refcount) - kfree(context); - } - -+int dxgcontext_add_hwqueue(struct dxgcontext *context, -+ struct dxghwqueue *hwqueue) -+{ -+ int ret = 0; -+ -+ down_write(&context->hwqueue_list_lock); -+ if (dxgcontext_is_active(context)) -+ list_add_tail(&hwqueue->hwqueue_list_entry, -+ &context->hwqueue_list_head); -+ else -+ ret = -ENODEV; -+ up_write(&context->hwqueue_list_lock); -+ return ret; -+} -+ -+void dxgcontext_remove_hwqueue(struct dxgcontext *context, -+ struct dxghwqueue *hwqueue) -+{ -+ if (hwqueue->hwqueue_list_entry.next) { -+ list_del(&hwqueue->hwqueue_list_entry); -+ hwqueue->hwqueue_list_entry.next = NULL; -+ } -+} -+ -+void dxgcontext_remove_hwqueue_safe(struct dxgcontext *context, -+ struct dxghwqueue *hwqueue) -+{ -+ down_write(&context->hwqueue_list_lock); -+ dxgcontext_remove_hwqueue(context, hwqueue); -+ up_write(&context->hwqueue_list_lock); -+} -+ - struct dxgallocation *dxgallocation_create(struct dxgprocess *process) - { - struct dxgallocation *alloc; -@@ -958,6 +1017,63 @@ void dxgprocess_adapter_remove_device(struct dxgdevice *device) - mutex_unlock(&device->adapter_info->device_list_mutex); - } - -+struct dxgsharedsyncobject *dxgsharedsyncobj_create(struct dxgadapter *adapter, -+ struct dxgsyncobject *so) -+{ -+ struct dxgsharedsyncobject *syncobj; -+ -+ syncobj = kzalloc(sizeof(*syncobj), GFP_KERNEL); -+ if (syncobj) { -+ kref_init(&syncobj->ssyncobj_kref); -+ INIT_LIST_HEAD(&syncobj->shared_syncobj_list_head); -+ syncobj->adapter = adapter; -+ syncobj->type = so->type; -+ syncobj->monitored_fence = so->monitored_fence; -+ dxgadapter_add_shared_syncobj(adapter, syncobj); -+ kref_get(&adapter->adapter_kref); -+ init_rwsem(&syncobj->syncobj_list_lock); -+ mutex_init(&syncobj->fd_mutex); -+ } -+ return syncobj; -+} -+ -+void dxgsharedsyncobj_release(struct kref *refcount) -+{ -+ struct dxgsharedsyncobject *syncobj; -+ -+ syncobj = container_of(refcount, struct dxgsharedsyncobject, -+ ssyncobj_kref); -+ DXG_TRACE("Destroying shared sync object %p", syncobj); -+ if (syncobj->adapter) { -+ dxgadapter_remove_shared_syncobj(syncobj->adapter, -+ syncobj); -+ kref_put(&syncobj->adapter->adapter_kref, -+ dxgadapter_release); -+ } -+ kfree(syncobj); -+} -+ -+void dxgsharedsyncobj_add_syncobj(struct dxgsharedsyncobject *shared, -+ struct dxgsyncobject *syncobj) -+{ -+ DXG_TRACE("Add syncobj 0x%p 0x%p", shared, syncobj); -+ kref_get(&shared->ssyncobj_kref); -+ down_write(&shared->syncobj_list_lock); -+ list_add(&syncobj->shared_syncobj_list_entry, -+ &shared->shared_syncobj_list_head); -+ syncobj->shared_owner = shared; -+ up_write(&shared->syncobj_list_lock); -+} -+ -+void dxgsharedsyncobj_remove_syncobj(struct dxgsharedsyncobject *shared, -+ struct dxgsyncobject *syncobj) -+{ -+ DXG_TRACE("Remove syncobj 0x%p", shared); -+ down_write(&shared->syncobj_list_lock); -+ list_del(&syncobj->shared_syncobj_list_entry); -+ up_write(&shared->syncobj_list_lock); -+} -+ - struct dxgsyncobject *dxgsyncobject_create(struct dxgprocess *process, - struct dxgdevice *device, - struct dxgadapter *adapter, -@@ -1091,7 +1207,70 @@ void dxgsyncobject_release(struct kref *refcount) - struct dxgsyncobject *syncobj; - - syncobj = container_of(refcount, struct dxgsyncobject, syncobj_kref); -+ if (syncobj->shared_owner) { -+ dxgsharedsyncobj_remove_syncobj(syncobj->shared_owner, -+ syncobj); -+ kref_put(&syncobj->shared_owner->ssyncobj_kref, -+ dxgsharedsyncobj_release); -+ } - if (syncobj->host_event) - kfree(syncobj->host_event); - kfree(syncobj); - } -+ -+struct dxghwqueue *dxghwqueue_create(struct dxgcontext *context) -+{ -+ struct dxgprocess *process = context->device->process; -+ struct dxghwqueue *hwqueue = kzalloc(sizeof(*hwqueue), GFP_KERNEL); -+ -+ if (hwqueue) { -+ kref_init(&hwqueue->hwqueue_kref); -+ hwqueue->context = context; -+ hwqueue->process = process; -+ hwqueue->device_handle = context->device->handle; -+ if (dxgcontext_add_hwqueue(context, hwqueue) < 0) { -+ kref_put(&hwqueue->hwqueue_kref, dxghwqueue_release); -+ hwqueue = NULL; -+ } else { -+ kref_get(&context->context_kref); -+ } -+ } -+ return hwqueue; -+} -+ -+void dxghwqueue_destroy(struct dxgprocess *process, struct dxghwqueue *hwqueue) -+{ -+ DXG_TRACE("Destroyng hwqueue %p", hwqueue); -+ hmgrtable_lock(&process->handle_table, DXGLOCK_EXCL); -+ if (hwqueue->handle.v) { -+ hmgrtable_free_handle(&process->handle_table, -+ HMGRENTRY_TYPE_DXGHWQUEUE, -+ hwqueue->handle); -+ hwqueue->handle.v = 0; -+ } -+ if (hwqueue->progress_fence_sync_object.v) { -+ hmgrtable_free_handle(&process->handle_table, -+ HMGRENTRY_TYPE_MONITOREDFENCE, -+ hwqueue->progress_fence_sync_object); -+ hwqueue->progress_fence_sync_object.v = 0; -+ } -+ hmgrtable_unlock(&process->handle_table, DXGLOCK_EXCL); -+ -+ if (hwqueue->progress_fence_mapped_address) { -+ dxg_unmap_iospace(hwqueue->progress_fence_mapped_address, -+ PAGE_SIZE); -+ hwqueue->progress_fence_mapped_address = NULL; -+ } -+ dxgcontext_remove_hwqueue_safe(hwqueue->context, hwqueue); -+ -+ kref_put(&hwqueue->context->context_kref, dxgcontext_release); -+ kref_put(&hwqueue->hwqueue_kref, dxghwqueue_release); -+} -+ -+void dxghwqueue_release(struct kref *refcount) -+{ -+ struct dxghwqueue *hwqueue; -+ -+ hwqueue = container_of(refcount, struct dxghwqueue, hwqueue_kref); -+ kfree(hwqueue); -+} -diff --git a/drivers/hv/dxgkrnl/dxgkrnl.h b/drivers/hv/dxgkrnl/dxgkrnl.h -index 111111111111..222222222222 100644 ---- a/drivers/hv/dxgkrnl/dxgkrnl.h -+++ b/drivers/hv/dxgkrnl/dxgkrnl.h -@@ -40,6 +40,8 @@ struct dxgallocation; - struct dxgresource; - struct dxgsharedresource; - struct dxgsyncobject; -+struct dxgsharedsyncobject; -+struct dxghwqueue; - - /* - * Driver private data. -@@ -137,6 +139,18 @@ struct dxghosteventcpu { - * "device" syncobject, because the belong to a device (dxgdevice). - * Device syncobjects are inserted to a list in dxgdevice. - * -+ * A syncobject can be "shared", meaning that it could be opened by many -+ * processes. -+ * -+ * Shared syncobjects are inserted to a list in its owner -+ * (dxgsharedsyncobject). -+ * A syncobject can be shared by using a global handle or by using -+ * "NT security handle". -+ * When global handle sharing is used, the handle is created durinig object -+ * creation. -+ * When "NT security" is used, the handle for sharing is create be calling -+ * dxgk_share_objects. On Linux "NT handle" is represented by a file -+ * descriptor. FD points to dxgsharedsyncobject. - */ - struct dxgsyncobject { - struct kref syncobj_kref; -@@ -146,6 +160,8 @@ struct dxgsyncobject { - * List entry in dxgadapter for other objects - */ - struct list_head syncobj_list_entry; -+ /* List entry in the dxgsharedsyncobject object for shared synobjects */ -+ struct list_head shared_syncobj_list_entry; - /* Adapter, the syncobject belongs to. NULL for stopped sync obejcts. */ - struct dxgadapter *adapter; - /* -@@ -156,6 +172,8 @@ struct dxgsyncobject { - struct dxgprocess *process; - /* Used by D3DDDI_CPU_NOTIFICATION objects */ - struct dxghosteventcpu *host_event; -+ /* Owner object for shared syncobjects */ -+ struct dxgsharedsyncobject *shared_owner; - /* CPU virtual address of the fence value for "device" syncobjects */ - void *mapped_address; - /* Handle in the process handle table */ -@@ -187,6 +205,41 @@ struct dxgvgpuchannel { - struct hv_device *hdev; - }; - -+/* -+ * The object is used as parent of all sync objects, created for a shared -+ * syncobject. When a shared syncobject is created without NT security, the -+ * handle in the global handle table will point to this object. -+ */ -+struct dxgsharedsyncobject { -+ struct kref ssyncobj_kref; -+ /* Referenced by file descriptors */ -+ int host_shared_handle_nt_reference; -+ /* Corresponding handle in the host global handle table */ -+ struct d3dkmthandle host_shared_handle; -+ /* -+ * When the sync object is shared by NT handle, this is the -+ * corresponding handle in the host -+ */ -+ struct d3dkmthandle host_shared_handle_nt; -+ /* Protects access to host_shared_handle_nt */ -+ struct mutex fd_mutex; -+ struct rw_semaphore syncobj_list_lock; -+ struct list_head shared_syncobj_list_head; -+ struct list_head adapter_shared_syncobj_list_entry; -+ struct dxgadapter *adapter; -+ enum d3dddi_synchronizationobject_type type; -+ u32 monitored_fence:1; -+}; -+ -+struct dxgsharedsyncobject *dxgsharedsyncobj_create(struct dxgadapter *adapter, -+ struct dxgsyncobject -+ *syncobj); -+void dxgsharedsyncobj_release(struct kref *refcount); -+void dxgsharedsyncobj_add_syncobj(struct dxgsharedsyncobject *sharedsyncobj, -+ struct dxgsyncobject *syncobj); -+void dxgsharedsyncobj_remove_syncobj(struct dxgsharedsyncobject *sharedsyncobj, -+ struct dxgsyncobject *syncobj); -+ - struct dxgsyncobject *dxgsyncobject_create(struct dxgprocess *process, - struct dxgdevice *device, - struct dxgadapter *adapter, -@@ -375,6 +428,8 @@ struct dxgadapter { - struct list_head adapter_process_list_head; - /* List of all dxgsharedresource objects */ - struct list_head shared_resource_list_head; -+ /* List of all dxgsharedsyncobject objects */ -+ struct list_head adapter_shared_syncobj_list_head; - /* List of all non-device dxgsyncobject objects */ - struct list_head syncobj_list_head; - /* This lock protects shared resource and syncobject lists */ -@@ -402,6 +457,10 @@ void dxgadapter_release_lock_shared(struct dxgadapter *adapter); - int dxgadapter_acquire_lock_exclusive(struct dxgadapter *adapter); - void dxgadapter_acquire_lock_forced(struct dxgadapter *adapter); - void dxgadapter_release_lock_exclusive(struct dxgadapter *adapter); -+void dxgadapter_add_shared_syncobj(struct dxgadapter *adapter, -+ struct dxgsharedsyncobject *so); -+void dxgadapter_remove_shared_syncobj(struct dxgadapter *adapter, -+ struct dxgsharedsyncobject *so); - void dxgadapter_add_syncobj(struct dxgadapter *adapter, - struct dxgsyncobject *so); - void dxgadapter_remove_syncobj(struct dxgsyncobject *so); -@@ -487,8 +546,32 @@ struct dxgcontext *dxgcontext_create(struct dxgdevice *dev); - void dxgcontext_destroy(struct dxgprocess *pr, struct dxgcontext *ctx); - void dxgcontext_destroy_safe(struct dxgprocess *pr, struct dxgcontext *ctx); - void dxgcontext_release(struct kref *refcount); -+int dxgcontext_add_hwqueue(struct dxgcontext *ctx, -+ struct dxghwqueue *hq); -+void dxgcontext_remove_hwqueue(struct dxgcontext *ctx, struct dxghwqueue *hq); -+void dxgcontext_remove_hwqueue_safe(struct dxgcontext *ctx, -+ struct dxghwqueue *hq); - bool dxgcontext_is_active(struct dxgcontext *ctx); - -+/* -+ * The object represent the execution hardware queue of a device. -+ */ -+struct dxghwqueue { -+ /* entry in the context hw queue list */ -+ struct list_head hwqueue_list_entry; -+ struct kref hwqueue_kref; -+ struct dxgcontext *context; -+ struct dxgprocess *process; -+ struct d3dkmthandle progress_fence_sync_object; -+ struct d3dkmthandle handle; -+ struct d3dkmthandle device_handle; -+ void *progress_fence_mapped_address; -+}; -+ -+struct dxghwqueue *dxghwqueue_create(struct dxgcontext *ctx); -+void dxghwqueue_destroy(struct dxgprocess *pr, struct dxghwqueue *hq); -+void dxghwqueue_release(struct kref *refcount); -+ - /* - * A shared resource object is created to track the list of dxgresource objects, - * which are opened for the same underlying shared resource. -@@ -720,9 +803,22 @@ int dxgvmb_send_wait_sync_object_cpu(struct dxgprocess *process, - d3dkmt_waitforsynchronizationobjectfromcpu - *args, - u64 cpu_event); -+int dxgvmb_send_create_hwqueue(struct dxgprocess *process, -+ struct dxgadapter *adapter, -+ struct d3dkmt_createhwqueue *args, -+ struct d3dkmt_createhwqueue *__user inargs, -+ struct dxghwqueue *hq); -+int dxgvmb_send_destroy_hwqueue(struct dxgprocess *process, -+ struct dxgadapter *adapter, -+ struct d3dkmthandle handle); - int dxgvmb_send_query_adapter_info(struct dxgprocess *process, - struct dxgadapter *adapter, - struct d3dkmt_queryadapterinfo *args); -+int dxgvmb_send_open_sync_object_nt(struct dxgprocess *process, -+ struct dxgvmbuschannel *channel, -+ struct d3dkmt_opensyncobjectfromnthandle2 -+ *args, -+ struct dxgsyncobject *syncobj); - int dxgvmb_send_create_nt_shared_object(struct dxgprocess *process, - struct d3dkmthandle object, - struct d3dkmthandle *shared_handle); -diff --git a/drivers/hv/dxgkrnl/dxgmodule.c b/drivers/hv/dxgkrnl/dxgmodule.c -index 111111111111..222222222222 100644 ---- a/drivers/hv/dxgkrnl/dxgmodule.c -+++ b/drivers/hv/dxgkrnl/dxgmodule.c -@@ -259,6 +259,7 @@ int dxgglobal_create_adapter(struct pci_dev *dev, guid_t *guid, - - INIT_LIST_HEAD(&adapter->adapter_process_list_head); - INIT_LIST_HEAD(&adapter->shared_resource_list_head); -+ INIT_LIST_HEAD(&adapter->adapter_shared_syncobj_list_head); - INIT_LIST_HEAD(&adapter->syncobj_list_head); - init_rwsem(&adapter->shared_resource_list_lock); - adapter->pci_dev = dev; -diff --git a/drivers/hv/dxgkrnl/dxgprocess.c b/drivers/hv/dxgkrnl/dxgprocess.c -index 111111111111..222222222222 100644 ---- a/drivers/hv/dxgkrnl/dxgprocess.c -+++ b/drivers/hv/dxgkrnl/dxgprocess.c -@@ -277,6 +277,10 @@ struct dxgdevice *dxgprocess_device_by_object_handle(struct dxgprocess *process, - device_handle = - ((struct dxgcontext *)obj)->device_handle; - break; -+ case HMGRENTRY_TYPE_DXGHWQUEUE: -+ device_handle = -+ ((struct dxghwqueue *)obj)->device_handle; -+ break; - default: - DXG_ERR("invalid handle type: %d", t); - break; -diff --git a/drivers/hv/dxgkrnl/dxgvmbus.c b/drivers/hv/dxgkrnl/dxgvmbus.c -index 111111111111..222222222222 100644 ---- a/drivers/hv/dxgkrnl/dxgvmbus.c -+++ b/drivers/hv/dxgkrnl/dxgvmbus.c -@@ -712,6 +712,69 @@ int dxgvmb_send_destroy_process(struct d3dkmthandle process) - return ret; - } - -+int dxgvmb_send_open_sync_object_nt(struct dxgprocess *process, -+ struct dxgvmbuschannel *channel, -+ struct d3dkmt_opensyncobjectfromnthandle2 -+ *args, -+ struct dxgsyncobject *syncobj) -+{ -+ struct dxgkvmb_command_opensyncobject *command; -+ struct dxgkvmb_command_opensyncobject_return result = { }; -+ int ret; -+ struct dxgvmbusmsg msg; -+ -+ ret = init_message(&msg, NULL, process, sizeof(*command)); -+ if (ret) -+ return ret; -+ command = (void *)msg.msg; -+ -+ command_vm_to_host_init2(&command->hdr, DXGK_VMBCOMMAND_OPENSYNCOBJECT, -+ process->host_handle); -+ command->device = args->device; -+ command->global_sync_object = syncobj->shared_owner->host_shared_handle; -+ command->flags = args->flags; -+ if (syncobj->monitored_fence) -+ command->engine_affinity = -+ args->monitored_fence.engine_affinity; -+ -+ ret = dxgglobal_acquire_channel_lock(); -+ if (ret < 0) -+ goto cleanup; -+ -+ ret = dxgvmb_send_sync_msg(channel, msg.hdr, msg.size, -+ &result, sizeof(result)); -+ -+ dxgglobal_release_channel_lock(); -+ -+ if (ret < 0) -+ goto cleanup; -+ -+ ret = ntstatus2int(result.status); -+ if (ret < 0) -+ goto cleanup; -+ -+ args->sync_object = result.sync_object; -+ if (syncobj->monitored_fence) { -+ void *va = dxg_map_iospace(result.guest_cpu_physical_address, -+ PAGE_SIZE, PROT_READ | PROT_WRITE, -+ true); -+ if (va == NULL) { -+ ret = -ENOMEM; -+ goto cleanup; -+ } -+ args->monitored_fence.fence_value_cpu_va = va; -+ args->monitored_fence.fence_value_gpu_va = -+ result.gpu_virtual_address; -+ syncobj->mapped_address = va; -+ } -+ -+cleanup: -+ free_message(&msg, process); -+ if (ret) -+ DXG_TRACE("err: %d", ret); -+ return ret; -+} -+ - int dxgvmb_send_create_nt_shared_object(struct dxgprocess *process, - struct d3dkmthandle object, - struct d3dkmthandle *shared_handle) -@@ -2050,6 +2113,164 @@ int dxgvmb_send_wait_sync_object_gpu(struct dxgprocess *process, - return ret; - } - -+int dxgvmb_send_create_hwqueue(struct dxgprocess *process, -+ struct dxgadapter *adapter, -+ struct d3dkmt_createhwqueue *args, -+ struct d3dkmt_createhwqueue *__user inargs, -+ struct dxghwqueue *hwqueue) -+{ -+ struct dxgkvmb_command_createhwqueue *command = NULL; -+ u32 cmd_size = sizeof(struct dxgkvmb_command_createhwqueue); -+ int ret; -+ struct dxgvmbusmsg msg = {.hdr = NULL}; -+ -+ if (args->priv_drv_data_size > DXG_MAX_VM_BUS_PACKET_SIZE) { -+ DXG_ERR("invalid private driver data size: %d", -+ args->priv_drv_data_size); -+ ret = -EINVAL; -+ goto cleanup; -+ } -+ -+ if (args->priv_drv_data_size) -+ cmd_size += args->priv_drv_data_size - 1; -+ -+ ret = init_message(&msg, adapter, process, cmd_size); -+ if (ret) -+ goto cleanup; -+ command = (void *)msg.msg; -+ -+ command_vgpu_to_host_init2(&command->hdr, -+ DXGK_VMBCOMMAND_CREATEHWQUEUE, -+ process->host_handle); -+ command->context = args->context; -+ command->flags = args->flags; -+ command->priv_drv_data_size = args->priv_drv_data_size; -+ if (args->priv_drv_data_size) { -+ ret = copy_from_user(command->priv_drv_data, -+ args->priv_drv_data, -+ args->priv_drv_data_size); -+ if (ret) { -+ DXG_ERR("failed to copy private data"); -+ ret = -EINVAL; -+ goto cleanup; -+ } -+ } -+ -+ ret = dxgvmb_send_sync_msg(msg.channel, msg.hdr, msg.size, -+ command, cmd_size); -+ if (ret < 0) -+ goto cleanup; -+ -+ ret = ntstatus2int(command->status); -+ if (ret < 0) { -+ DXG_ERR("dxgvmb_send_sync_msg failed: %x", -+ command->status.v); -+ goto cleanup; -+ } -+ -+ ret = hmgrtable_assign_handle_safe(&process->handle_table, hwqueue, -+ HMGRENTRY_TYPE_DXGHWQUEUE, -+ command->hwqueue); -+ if (ret < 0) -+ goto cleanup; -+ -+ ret = hmgrtable_assign_handle_safe(&process->handle_table, -+ NULL, -+ HMGRENTRY_TYPE_MONITOREDFENCE, -+ command->hwqueue_progress_fence); -+ if (ret < 0) -+ goto cleanup; -+ -+ hwqueue->handle = command->hwqueue; -+ hwqueue->progress_fence_sync_object = command->hwqueue_progress_fence; -+ -+ hwqueue->progress_fence_mapped_address = -+ dxg_map_iospace((u64)command->hwqueue_progress_fence_cpuva, -+ PAGE_SIZE, PROT_READ | PROT_WRITE, true); -+ if (hwqueue->progress_fence_mapped_address == NULL) { -+ ret = -ENOMEM; -+ goto cleanup; -+ } -+ -+ ret = copy_to_user(&inargs->queue, &command->hwqueue, -+ sizeof(struct d3dkmthandle)); -+ if (ret < 0) { -+ DXG_ERR("failed to copy hwqueue handle"); -+ goto cleanup; -+ } -+ ret = copy_to_user(&inargs->queue_progress_fence, -+ &command->hwqueue_progress_fence, -+ sizeof(struct d3dkmthandle)); -+ if (ret < 0) { -+ DXG_ERR("failed to progress fence"); -+ goto cleanup; -+ } -+ ret = copy_to_user(&inargs->queue_progress_fence_cpu_va, -+ &hwqueue->progress_fence_mapped_address, -+ sizeof(inargs->queue_progress_fence_cpu_va)); -+ if (ret < 0) { -+ DXG_ERR("failed to copy fence cpu va"); -+ goto cleanup; -+ } -+ ret = copy_to_user(&inargs->queue_progress_fence_gpu_va, -+ &command->hwqueue_progress_fence_gpuva, -+ sizeof(u64)); -+ if (ret < 0) { -+ DXG_ERR("failed to copy fence gpu va"); -+ goto cleanup; -+ } -+ if (args->priv_drv_data_size) { -+ ret = copy_to_user(args->priv_drv_data, -+ command->priv_drv_data, -+ args->priv_drv_data_size); -+ if (ret < 0) -+ DXG_ERR("failed to copy private data"); -+ } -+ -+cleanup: -+ if (ret < 0) { -+ DXG_ERR("failed %x", ret); -+ if (hwqueue->handle.v) { -+ hmgrtable_free_handle_safe(&process->handle_table, -+ HMGRENTRY_TYPE_DXGHWQUEUE, -+ hwqueue->handle); -+ hwqueue->handle.v = 0; -+ } -+ if (command && command->hwqueue.v) -+ dxgvmb_send_destroy_hwqueue(process, adapter, -+ command->hwqueue); -+ } -+ free_message(&msg, process); -+ return ret; -+} -+ -+int dxgvmb_send_destroy_hwqueue(struct dxgprocess *process, -+ struct dxgadapter *adapter, -+ struct d3dkmthandle handle) -+{ -+ int ret; -+ struct dxgkvmb_command_destroyhwqueue *command; -+ struct dxgvmbusmsg msg = {.hdr = NULL}; -+ -+ ret = init_message(&msg, adapter, process, sizeof(*command)); -+ if (ret) -+ goto cleanup; -+ command = (void *)msg.msg; -+ -+ command_vgpu_to_host_init2(&command->hdr, -+ DXGK_VMBCOMMAND_DESTROYHWQUEUE, -+ process->host_handle); -+ command->hwqueue = handle; -+ -+ ret = dxgvmb_send_sync_msg_ntstatus(msg.channel, msg.hdr, msg.size); -+ -+cleanup: -+ free_message(&msg, process); -+ if (ret) -+ DXG_TRACE("err: %d", ret); -+ return ret; -+} -+ - int dxgvmb_send_query_adapter_info(struct dxgprocess *process, - struct dxgadapter *adapter, - struct d3dkmt_queryadapterinfo *args) -diff --git a/drivers/hv/dxgkrnl/dxgvmbus.h b/drivers/hv/dxgkrnl/dxgvmbus.h -index 111111111111..222222222222 100644 ---- a/drivers/hv/dxgkrnl/dxgvmbus.h -+++ b/drivers/hv/dxgkrnl/dxgvmbus.h -@@ -172,6 +172,21 @@ struct dxgkvmb_command_signalguestevent { - bool dereference_event; - }; - -+struct dxgkvmb_command_opensyncobject { -+ struct dxgkvmb_command_vm_to_host hdr; -+ struct d3dkmthandle device; -+ struct d3dkmthandle global_sync_object; -+ u32 engine_affinity; -+ struct d3dddi_synchronizationobject_flags flags; -+}; -+ -+struct dxgkvmb_command_opensyncobject_return { -+ struct d3dkmthandle sync_object; -+ struct ntstatus status; -+ u64 gpu_virtual_address; -+ u64 guest_cpu_physical_address; -+}; -+ - /* - * The command returns struct d3dkmthandle of a shared object for the - * given pre-process object -@@ -508,4 +523,24 @@ struct dxgkvmb_command_waitforsyncobjectfromgpu { - /* struct d3dkmthandle ObjectHandles[object_count] */ - }; - -+/* Returns the same structure */ -+struct dxgkvmb_command_createhwqueue { -+ struct dxgkvmb_command_vgpu_to_host hdr; -+ struct ntstatus status; -+ struct d3dkmthandle hwqueue; -+ struct d3dkmthandle hwqueue_progress_fence; -+ void *hwqueue_progress_fence_cpuva; -+ u64 hwqueue_progress_fence_gpuva; -+ struct d3dkmthandle context; -+ struct d3dddi_createhwqueueflags flags; -+ u32 priv_drv_data_size; -+ char priv_drv_data[1]; -+}; -+ -+/* The command returns ntstatus */ -+struct dxgkvmb_command_destroyhwqueue { -+ struct dxgkvmb_command_vgpu_to_host hdr; -+ struct d3dkmthandle hwqueue; -+}; -+ - #endif /* _DXGVMBUS_H */ -diff --git a/drivers/hv/dxgkrnl/ioctl.c b/drivers/hv/dxgkrnl/ioctl.c -index 111111111111..222222222222 100644 ---- a/drivers/hv/dxgkrnl/ioctl.c -+++ b/drivers/hv/dxgkrnl/ioctl.c -@@ -36,6 +36,33 @@ static char *errorstr(int ret) - } - #endif - -+static int dxgsyncobj_release(struct inode *inode, struct file *file) -+{ -+ struct dxgsharedsyncobject *syncobj = file->private_data; -+ -+ DXG_TRACE("Release syncobj: %p", syncobj); -+ mutex_lock(&syncobj->fd_mutex); -+ kref_get(&syncobj->ssyncobj_kref); -+ syncobj->host_shared_handle_nt_reference--; -+ if (syncobj->host_shared_handle_nt_reference == 0) { -+ if (syncobj->host_shared_handle_nt.v) { -+ dxgvmb_send_destroy_nt_shared_object( -+ syncobj->host_shared_handle_nt); -+ DXG_TRACE("Syncobj host_handle_nt destroyed: %x", -+ syncobj->host_shared_handle_nt.v); -+ syncobj->host_shared_handle_nt.v = 0; -+ } -+ kref_put(&syncobj->ssyncobj_kref, dxgsharedsyncobj_release); -+ } -+ mutex_unlock(&syncobj->fd_mutex); -+ kref_put(&syncobj->ssyncobj_kref, dxgsharedsyncobj_release); -+ return 0; -+} -+ -+static const struct file_operations dxg_syncobj_fops = { -+ .release = dxgsyncobj_release, -+}; -+ - static int dxgsharedresource_release(struct inode *inode, struct file *file) - { - struct dxgsharedresource *resource = file->private_data; -@@ -833,6 +860,156 @@ dxgkio_destroy_context(struct dxgprocess *process, void *__user inargs) - return ret; - } - -+static int -+dxgkio_create_hwqueue(struct dxgprocess *process, void *__user inargs) -+{ -+ struct d3dkmt_createhwqueue args; -+ struct dxgdevice *device = NULL; -+ struct dxgcontext *context = NULL; -+ struct dxgadapter *adapter = NULL; -+ struct dxghwqueue *hwqueue = NULL; -+ int ret; -+ bool device_lock_acquired = false; -+ -+ ret = copy_from_user(&args, inargs, sizeof(args)); -+ if (ret) { -+ DXG_ERR("failed to copy input args"); -+ ret = -EINVAL; -+ goto cleanup; -+ } -+ -+ /* -+ * The call acquires reference on the device. It is safe to access the -+ * adapter, because the device holds reference on it. -+ */ -+ device = dxgprocess_device_by_object_handle(process, -+ HMGRENTRY_TYPE_DXGCONTEXT, -+ args.context); -+ if (device == NULL) { -+ ret = -EINVAL; -+ goto cleanup; -+ } -+ -+ ret = dxgdevice_acquire_lock_shared(device); -+ if (ret < 0) -+ goto cleanup; -+ -+ device_lock_acquired = true; -+ -+ hmgrtable_lock(&process->handle_table, DXGLOCK_SHARED); -+ context = hmgrtable_get_object_by_type(&process->handle_table, -+ HMGRENTRY_TYPE_DXGCONTEXT, -+ args.context); -+ hmgrtable_unlock(&process->handle_table, DXGLOCK_SHARED); -+ -+ if (context == NULL) { -+ DXG_ERR("Invalid context handle %x", args.context.v); -+ ret = -EINVAL; -+ goto cleanup; -+ } -+ -+ hwqueue = dxghwqueue_create(context); -+ if (hwqueue == NULL) { -+ ret = -ENOMEM; -+ goto cleanup; -+ } -+ -+ adapter = device->adapter; -+ ret = dxgadapter_acquire_lock_shared(adapter); -+ if (ret < 0) { -+ adapter = NULL; -+ goto cleanup; -+ } -+ -+ ret = dxgvmb_send_create_hwqueue(process, adapter, &args, -+ inargs, hwqueue); -+ -+cleanup: -+ -+ if (ret < 0 && hwqueue) -+ dxghwqueue_destroy(process, hwqueue); -+ -+ if (adapter) -+ dxgadapter_release_lock_shared(adapter); -+ -+ if (device_lock_acquired) -+ dxgdevice_release_lock_shared(device); -+ -+ if (device) -+ kref_put(&device->device_kref, dxgdevice_release); -+ -+ DXG_TRACE("ioctl:%s %d", errorstr(ret), ret); -+ return ret; -+} -+ -+static int dxgkio_destroy_hwqueue(struct dxgprocess *process, -+ void *__user inargs) -+{ -+ struct d3dkmt_destroyhwqueue args; -+ int ret; -+ struct dxgadapter *adapter = NULL; -+ struct dxgdevice *device = NULL; -+ struct dxghwqueue *hwqueue = NULL; -+ struct d3dkmthandle device_handle = {}; -+ -+ ret = copy_from_user(&args, inargs, sizeof(args)); -+ if (ret) { -+ DXG_ERR("failed to copy input args"); -+ ret = -EINVAL; -+ goto cleanup; -+ } -+ -+ hmgrtable_lock(&process->handle_table, DXGLOCK_EXCL); -+ hwqueue = hmgrtable_get_object_by_type(&process->handle_table, -+ HMGRENTRY_TYPE_DXGHWQUEUE, -+ args.queue); -+ if (hwqueue) { -+ hmgrtable_free_handle(&process->handle_table, -+ HMGRENTRY_TYPE_DXGHWQUEUE, args.queue); -+ hwqueue->handle.v = 0; -+ device_handle = hwqueue->device_handle; -+ } -+ hmgrtable_unlock(&process->handle_table, DXGLOCK_EXCL); -+ -+ if (hwqueue == NULL) { -+ DXG_ERR("invalid hwqueue handle: %x", args.queue.v); -+ ret = -EINVAL; -+ goto cleanup; -+ } -+ -+ /* -+ * The call acquires reference on the device. It is safe to access the -+ * adapter, because the device holds reference on it. -+ */ -+ device = dxgprocess_device_by_handle(process, device_handle); -+ if (device == NULL) { -+ ret = -EINVAL; -+ goto cleanup; -+ } -+ -+ adapter = device->adapter; -+ ret = dxgadapter_acquire_lock_shared(adapter); -+ if (ret < 0) { -+ adapter = NULL; -+ goto cleanup; -+ } -+ -+ ret = dxgvmb_send_destroy_hwqueue(process, adapter, args.queue); -+ -+ dxghwqueue_destroy(process, hwqueue); -+ -+cleanup: -+ -+ if (adapter) -+ dxgadapter_release_lock_shared(adapter); -+ -+ if (device) -+ kref_put(&device->device_kref, dxgdevice_release); -+ -+ DXG_TRACE("ioctl:%s %d", errorstr(ret), ret); -+ return ret; -+} -+ - static int - get_standard_alloc_priv_data(struct dxgdevice *device, - struct d3dkmt_createstandardallocation *alloc_info, -@@ -1548,6 +1725,164 @@ dxgkio_destroy_allocation(struct dxgprocess *process, void *__user inargs) - return ret; - } - -+static int -+dxgkio_submit_signal_to_hwqueue(struct dxgprocess *process, void *__user inargs) -+{ -+ int ret; -+ struct d3dkmt_submitsignalsyncobjectstohwqueue args; -+ struct dxgdevice *device = NULL; -+ struct dxgadapter *adapter = NULL; -+ struct d3dkmthandle hwqueue = {}; -+ -+ ret = copy_from_user(&args, inargs, sizeof(args)); -+ if (ret) { -+ DXG_ERR("failed to copy input args"); -+ ret = -EINVAL; -+ goto cleanup; -+ } -+ -+ if (args.hwqueue_count > D3DDDI_MAX_BROADCAST_CONTEXT || -+ args.hwqueue_count == 0) { -+ DXG_ERR("invalid hwqueue count: %d", -+ args.hwqueue_count); -+ ret = -EINVAL; -+ goto cleanup; -+ } -+ -+ if (args.object_count > D3DDDI_MAX_OBJECT_SIGNALED || -+ args.object_count == 0) { -+ DXG_ERR("invalid number of syncobjects: %d", -+ args.object_count); -+ ret = -EINVAL; -+ goto cleanup; -+ } -+ -+ ret = copy_from_user(&hwqueue, args.hwqueues, -+ sizeof(struct d3dkmthandle)); -+ if (ret) { -+ DXG_ERR("failed to copy hwqueue handle"); -+ ret = -EINVAL; -+ goto cleanup; -+ } -+ -+ device = dxgprocess_device_by_object_handle(process, -+ HMGRENTRY_TYPE_DXGHWQUEUE, -+ hwqueue); -+ if (device == NULL) { -+ ret = -EINVAL; -+ goto cleanup; -+ } -+ -+ adapter = device->adapter; -+ ret = dxgadapter_acquire_lock_shared(adapter); -+ if (ret < 0) { -+ adapter = NULL; -+ goto cleanup; -+ } -+ -+ ret = dxgvmb_send_signal_sync_object(process, adapter, -+ args.flags, 0, zerohandle, -+ args.object_count, args.objects, -+ args.hwqueue_count, args.hwqueues, -+ args.object_count, -+ args.fence_values, NULL, -+ zerohandle); -+ -+cleanup: -+ -+ if (adapter) -+ dxgadapter_release_lock_shared(adapter); -+ if (device) -+ kref_put(&device->device_kref, dxgdevice_release); -+ -+ DXG_TRACE("ioctl:%s %d", errorstr(ret), ret); -+ return ret; -+} -+ -+static int -+dxgkio_submit_wait_to_hwqueue(struct dxgprocess *process, void *__user inargs) -+{ -+ struct d3dkmt_submitwaitforsyncobjectstohwqueue args; -+ struct dxgdevice *device = NULL; -+ struct dxgadapter *adapter = NULL; -+ int ret; -+ struct d3dkmthandle *objects = NULL; -+ u32 object_size; -+ u64 *fences = NULL; -+ -+ ret = copy_from_user(&args, inargs, sizeof(args)); -+ if (ret) { -+ DXG_ERR("failed to copy input args"); -+ ret = -EINVAL; -+ goto cleanup; -+ } -+ -+ if (args.object_count > D3DDDI_MAX_OBJECT_WAITED_ON || -+ args.object_count == 0) { -+ ret = -EINVAL; -+ goto cleanup; -+ } -+ -+ object_size = sizeof(struct d3dkmthandle) * args.object_count; -+ objects = vzalloc(object_size); -+ if (objects == NULL) { -+ ret = -ENOMEM; -+ goto cleanup; -+ } -+ ret = copy_from_user(objects, args.objects, object_size); -+ if (ret) { -+ DXG_ERR("failed to copy objects"); -+ ret = -EINVAL; -+ goto cleanup; -+ } -+ -+ object_size = sizeof(u64) * args.object_count; -+ fences = vzalloc(object_size); -+ if (fences == NULL) { -+ ret = -ENOMEM; -+ goto cleanup; -+ } -+ ret = copy_from_user(fences, args.fence_values, object_size); -+ if (ret) { -+ DXG_ERR("failed to copy fence values"); -+ ret = -EINVAL; -+ goto cleanup; -+ } -+ -+ device = dxgprocess_device_by_object_handle(process, -+ HMGRENTRY_TYPE_DXGHWQUEUE, -+ args.hwqueue); -+ if (device == NULL) { -+ ret = -EINVAL; -+ goto cleanup; -+ } -+ -+ adapter = device->adapter; -+ ret = dxgadapter_acquire_lock_shared(adapter); -+ if (ret < 0) { -+ adapter = NULL; -+ goto cleanup; -+ } -+ -+ ret = dxgvmb_send_wait_sync_object_gpu(process, adapter, -+ args.hwqueue, args.object_count, -+ objects, fences, false); -+ -+cleanup: -+ -+ if (objects) -+ vfree(objects); -+ if (fences) -+ vfree(fences); -+ if (adapter) -+ dxgadapter_release_lock_shared(adapter); -+ if (device) -+ kref_put(&device->device_kref, dxgdevice_release); -+ -+ DXG_TRACE("ioctl:%s %d", errorstr(ret), ret); -+ return ret; -+} -+ - static int - dxgkio_create_sync_object(struct dxgprocess *process, void *__user inargs) - { -@@ -1558,6 +1893,7 @@ dxgkio_create_sync_object(struct dxgprocess *process, void *__user inargs) - struct eventfd_ctx *event = NULL; - struct dxgsyncobject *syncobj = NULL; - bool device_lock_acquired = false; -+ struct dxgsharedsyncobject *syncobjgbl = NULL; - struct dxghosteventcpu *host_event = NULL; - - ret = copy_from_user(&args, inargs, sizeof(args)); -@@ -1618,6 +1954,22 @@ dxgkio_create_sync_object(struct dxgprocess *process, void *__user inargs) - if (ret < 0) - goto cleanup; - -+ if (args.info.flags.shared) { -+ if (args.info.shared_handle.v == 0) { -+ DXG_ERR("shared handle should not be 0"); -+ ret = -EINVAL; -+ goto cleanup; -+ } -+ syncobjgbl = dxgsharedsyncobj_create(device->adapter, syncobj); -+ if (syncobjgbl == NULL) { -+ ret = -ENOMEM; -+ goto cleanup; -+ } -+ dxgsharedsyncobj_add_syncobj(syncobjgbl, syncobj); -+ -+ syncobjgbl->host_shared_handle = args.info.shared_handle; -+ } -+ - ret = copy_to_user(inargs, &args, sizeof(args)); - if (ret) { - DXG_ERR("failed to copy output args"); -@@ -1646,6 +1998,8 @@ dxgkio_create_sync_object(struct dxgprocess *process, void *__user inargs) - if (event) - eventfd_ctx_put(event); - } -+ if (syncobjgbl) -+ kref_put(&syncobjgbl->ssyncobj_kref, dxgsharedsyncobj_release); - if (adapter) - dxgadapter_release_lock_shared(adapter); - if (device_lock_acquired) -@@ -1700,6 +2054,140 @@ dxgkio_destroy_sync_object(struct dxgprocess *process, void *__user inargs) - return ret; - } - -+static int -+dxgkio_open_sync_object_nt(struct dxgprocess *process, void *__user inargs) -+{ -+ struct d3dkmt_opensyncobjectfromnthandle2 args; -+ struct dxgsyncobject *syncobj = NULL; -+ struct dxgsharedsyncobject *syncobj_fd = NULL; -+ struct file *file = NULL; -+ struct dxgdevice *device = NULL; -+ struct dxgadapter *adapter = NULL; -+ struct d3dddi_synchronizationobject_flags flags = { }; -+ int ret; -+ bool device_lock_acquired = false; -+ struct dxgglobal *dxgglobal = dxggbl(); -+ -+ ret = copy_from_user(&args, inargs, sizeof(args)); -+ if (ret) { -+ DXG_ERR("failed to copy input args"); -+ ret = -EINVAL; -+ goto cleanup; -+ } -+ -+ args.sync_object.v = 0; -+ -+ if (args.device.v) { -+ device = dxgprocess_device_by_handle(process, args.device); -+ if (device == NULL) { -+ return -EINVAL; -+ goto cleanup; -+ } -+ } else { -+ DXG_ERR("device handle is missing"); -+ ret = -EINVAL; -+ goto cleanup; -+ } -+ -+ ret = dxgdevice_acquire_lock_shared(device); -+ if (ret < 0) -+ goto cleanup; -+ -+ device_lock_acquired = true; -+ -+ adapter = device->adapter; -+ ret = dxgadapter_acquire_lock_shared(adapter); -+ if (ret < 0) { -+ adapter = NULL; -+ goto cleanup; -+ } -+ -+ file = fget(args.nt_handle); -+ if (!file) { -+ DXG_ERR("failed to get file from handle: %llx", -+ args.nt_handle); -+ ret = -EINVAL; -+ goto cleanup; -+ } -+ -+ if (file->f_op != &dxg_syncobj_fops) { -+ DXG_ERR("invalid fd: %llx", args.nt_handle); -+ ret = -EINVAL; -+ goto cleanup; -+ } -+ -+ syncobj_fd = file->private_data; -+ if (syncobj_fd == NULL) { -+ DXG_ERR("invalid private data: %llx", args.nt_handle); -+ ret = -EINVAL; -+ goto cleanup; -+ } -+ -+ flags.shared = 1; -+ flags.nt_security_sharing = 1; -+ syncobj = dxgsyncobject_create(process, device, adapter, -+ syncobj_fd->type, flags); -+ if (syncobj == NULL) { -+ DXG_ERR("failed to create sync object"); -+ ret = -ENOMEM; -+ goto cleanup; -+ } -+ -+ dxgsharedsyncobj_add_syncobj(syncobj_fd, syncobj); -+ -+ ret = dxgvmb_send_open_sync_object_nt(process, &dxgglobal->channel, -+ &args, syncobj); -+ if (ret < 0) { -+ DXG_ERR("failed to open sync object on host: %x", -+ syncobj_fd->host_shared_handle.v); -+ goto cleanup; -+ } -+ -+ hmgrtable_lock(&process->handle_table, DXGLOCK_EXCL); -+ ret = hmgrtable_assign_handle(&process->handle_table, syncobj, -+ HMGRENTRY_TYPE_DXGSYNCOBJECT, -+ args.sync_object); -+ if (ret >= 0) { -+ syncobj->handle = args.sync_object; -+ kref_get(&syncobj->syncobj_kref); -+ } -+ hmgrtable_unlock(&process->handle_table, DXGLOCK_EXCL); -+ -+ if (ret < 0) -+ goto cleanup; -+ -+ ret = copy_to_user(inargs, &args, sizeof(args)); -+ if (ret == 0) -+ goto success; -+ DXG_ERR("failed to copy output args"); -+ -+cleanup: -+ -+ if (syncobj) { -+ dxgsyncobject_destroy(process, syncobj); -+ syncobj = NULL; -+ } -+ -+ if (args.sync_object.v) -+ dxgvmb_send_destroy_sync_object(process, args.sync_object); -+ -+success: -+ -+ if (file) -+ fput(file); -+ if (syncobj) -+ kref_put(&syncobj->syncobj_kref, dxgsyncobject_release); -+ if (adapter) -+ dxgadapter_release_lock_shared(adapter); -+ if (device_lock_acquired) -+ dxgdevice_release_lock_shared(device); -+ if (device) -+ kref_put(&device->device_kref, dxgdevice_release); -+ -+ DXG_TRACE("ioctl:%s %d", errorstr(ret), ret); -+ return ret; -+} -+ - static int - dxgkio_signal_sync_object(struct dxgprocess *process, void *__user inargs) - { -@@ -2353,6 +2841,30 @@ dxgkio_wait_sync_object_gpu(struct dxgprocess *process, void *__user inargs) - return ret; - } - -+static int -+dxgsharedsyncobj_get_host_nt_handle(struct dxgsharedsyncobject *syncobj, -+ struct dxgprocess *process, -+ struct d3dkmthandle objecthandle) -+{ -+ int ret = 0; -+ -+ mutex_lock(&syncobj->fd_mutex); -+ if (syncobj->host_shared_handle_nt_reference == 0) { -+ ret = dxgvmb_send_create_nt_shared_object(process, -+ objecthandle, -+ &syncobj->host_shared_handle_nt); -+ if (ret < 0) -+ goto cleanup; -+ DXG_TRACE("Host_shared_handle_ht: %x", -+ syncobj->host_shared_handle_nt.v); -+ kref_get(&syncobj->ssyncobj_kref); -+ } -+ syncobj->host_shared_handle_nt_reference++; -+cleanup: -+ mutex_unlock(&syncobj->fd_mutex); -+ return ret; -+} -+ - static int - dxgsharedresource_get_host_nt_handle(struct dxgsharedresource *resource, - struct dxgprocess *process, -@@ -2378,6 +2890,7 @@ dxgsharedresource_get_host_nt_handle(struct dxgsharedresource *resource, - } - - enum dxg_sharedobject_type { -+ DXG_SHARED_SYNCOBJECT, - DXG_SHARED_RESOURCE - }; - -@@ -2394,6 +2907,10 @@ static int get_object_fd(enum dxg_sharedobject_type type, - } - - switch (type) { -+ case DXG_SHARED_SYNCOBJECT: -+ file = anon_inode_getfile("dxgsyncobj", -+ &dxg_syncobj_fops, object, 0); -+ break; - case DXG_SHARED_RESOURCE: - file = anon_inode_getfile("dxgresource", - &dxg_resource_fops, object, 0); -@@ -2419,6 +2936,7 @@ dxgkio_share_objects(struct dxgprocess *process, void *__user inargs) - enum hmgrentry_type object_type; - struct dxgsyncobject *syncobj = NULL; - struct dxgresource *resource = NULL; -+ struct dxgsharedsyncobject *shared_syncobj = NULL; - struct dxgsharedresource *shared_resource = NULL; - struct d3dkmthandle *handles = NULL; - int object_fd = -1; -@@ -2465,6 +2983,17 @@ dxgkio_share_objects(struct dxgprocess *process, void *__user inargs) - ret = -EINVAL; - } else { - switch (object_type) { -+ case HMGRENTRY_TYPE_DXGSYNCOBJECT: -+ syncobj = obj; -+ if (syncobj->shared) { -+ kref_get(&syncobj->syncobj_kref); -+ shared_syncobj = syncobj->shared_owner; -+ } else { -+ DXG_ERR("sync object is not shared"); -+ syncobj = NULL; -+ ret = -EINVAL; -+ } -+ break; - case HMGRENTRY_TYPE_DXGRESOURCE: - resource = obj; - if (resource->shared_owner) { -@@ -2488,6 +3017,21 @@ dxgkio_share_objects(struct dxgprocess *process, void *__user inargs) - goto cleanup; - - switch (object_type) { -+ case HMGRENTRY_TYPE_DXGSYNCOBJECT: -+ ret = get_object_fd(DXG_SHARED_SYNCOBJECT, shared_syncobj, -+ &object_fd); -+ if (ret < 0) { -+ DXG_ERR("get_object_fd failed for sync object"); -+ goto cleanup; -+ } -+ ret = dxgsharedsyncobj_get_host_nt_handle(shared_syncobj, -+ process, -+ handles[0]); -+ if (ret < 0) { -+ DXG_ERR("get_host_nt_handle failed"); -+ goto cleanup; -+ } -+ break; - case HMGRENTRY_TYPE_DXGRESOURCE: - ret = get_object_fd(DXG_SHARED_RESOURCE, shared_resource, - &object_fd); -@@ -2954,10 +3498,10 @@ static struct ioctl_desc ioctls[] = { - /* 0x15 */ {dxgkio_close_adapter, LX_DXCLOSEADAPTER}, - /* 0x16 */ {}, - /* 0x17 */ {}, --/* 0x18 */ {}, -+/* 0x18 */ {dxgkio_create_hwqueue, LX_DXCREATEHWQUEUE}, - /* 0x19 */ {dxgkio_destroy_device, LX_DXDESTROYDEVICE}, - /* 0x1a */ {}, --/* 0x1b */ {}, -+/* 0x1b */ {dxgkio_destroy_hwqueue, LX_DXDESTROYHWQUEUE}, - /* 0x1c */ {}, - /* 0x1d */ {dxgkio_destroy_sync_object, LX_DXDESTROYSYNCHRONIZATIONOBJECT}, - /* 0x1e */ {}, -@@ -2986,8 +3530,10 @@ static struct ioctl_desc ioctls[] = { - /* 0x33 */ {dxgkio_signal_sync_object_gpu2, - LX_DXSIGNALSYNCHRONIZATIONOBJECTFROMGPU2}, - /* 0x34 */ {}, --/* 0x35 */ {}, --/* 0x36 */ {}, -+/* 0x35 */ {dxgkio_submit_signal_to_hwqueue, -+ LX_DXSUBMITSIGNALSYNCOBJECTSTOHWQUEUE}, -+/* 0x36 */ {dxgkio_submit_wait_to_hwqueue, -+ LX_DXSUBMITWAITFORSYNCOBJECTSTOHWQUEUE}, - /* 0x37 */ {}, - /* 0x38 */ {}, - /* 0x39 */ {}, -@@ -2999,7 +3545,7 @@ static struct ioctl_desc ioctls[] = { - /* 0x3d */ {}, - /* 0x3e */ {dxgkio_enum_adapters3, LX_DXENUMADAPTERS3}, - /* 0x3f */ {dxgkio_share_objects, LX_DXSHAREOBJECTS}, --/* 0x40 */ {}, -+/* 0x40 */ {dxgkio_open_sync_object_nt, LX_DXOPENSYNCOBJECTFROMNTHANDLE2}, - /* 0x41 */ {dxgkio_query_resource_info_nt, - LX_DXQUERYRESOURCEINFOFROMNTHANDLE}, - /* 0x42 */ {dxgkio_open_resource_nt, LX_DXOPENRESOURCEFROMNTHANDLE}, -diff --git a/include/uapi/misc/d3dkmthk.h b/include/uapi/misc/d3dkmthk.h -index 111111111111..222222222222 100644 ---- a/include/uapi/misc/d3dkmthk.h -+++ b/include/uapi/misc/d3dkmthk.h -@@ -201,6 +201,16 @@ struct d3dkmt_createcontextvirtual { - struct d3dkmthandle context; - }; - -+struct d3dddi_createhwqueueflags { -+ union { -+ struct { -+ __u32 disable_gpu_timeout:1; -+ __u32 reserved:31; -+ }; -+ __u32 value; -+ }; -+}; -+ - enum d3dkmdt_gdisurfacetype { - _D3DKMDT_GDISURFACE_INVALID = 0, - _D3DKMDT_GDISURFACE_TEXTURE = 1, -@@ -694,6 +704,81 @@ struct d3dddi_openallocationinfo2 { - __u64 reserved[6]; - }; - -+struct d3dkmt_createhwqueue { -+ struct d3dkmthandle context; -+ struct d3dddi_createhwqueueflags flags; -+ __u32 priv_drv_data_size; -+ __u32 reserved; -+#ifdef __KERNEL__ -+ void *priv_drv_data; -+#else -+ __u64 priv_drv_data; -+#endif -+ struct d3dkmthandle queue; -+ struct d3dkmthandle queue_progress_fence; -+#ifdef __KERNEL__ -+ void *queue_progress_fence_cpu_va; -+#else -+ __u64 queue_progress_fence_cpu_va; -+#endif -+ __u64 queue_progress_fence_gpu_va; -+}; -+ -+struct d3dkmt_destroyhwqueue { -+ struct d3dkmthandle queue; -+}; -+ -+struct d3dkmt_submitwaitforsyncobjectstohwqueue { -+ struct d3dkmthandle hwqueue; -+ __u32 object_count; -+#ifdef __KERNEL__ -+ struct d3dkmthandle *objects; -+ __u64 *fence_values; -+#else -+ __u64 objects; -+ __u64 fence_values; -+#endif -+}; -+ -+struct d3dkmt_submitsignalsyncobjectstohwqueue { -+ struct d3dddicb_signalflags flags; -+ __u32 hwqueue_count; -+#ifdef __KERNEL__ -+ struct d3dkmthandle *hwqueues; -+#else -+ __u64 hwqueues; -+#endif -+ __u32 object_count; -+ __u32 reserved; -+#ifdef __KERNEL__ -+ struct d3dkmthandle *objects; -+ __u64 *fence_values; -+#else -+ __u64 objects; -+ __u64 fence_values; -+#endif -+}; -+ -+struct d3dkmt_opensyncobjectfromnthandle2 { -+ __u64 nt_handle; -+ struct d3dkmthandle device; -+ struct d3dddi_synchronizationobject_flags flags; -+ struct d3dkmthandle sync_object; -+ __u32 reserved1; -+ union { -+ struct { -+#ifdef __KERNEL__ -+ void *fence_value_cpu_va; -+#else -+ __u64 fence_value_cpu_va; -+#endif -+ __u64 fence_value_gpu_va; -+ __u32 engine_affinity; -+ } monitored_fence; -+ __u64 reserved[8]; -+ }; -+}; -+ - struct d3dkmt_openresourcefromnthandle { - struct d3dkmthandle device; - __u32 reserved; -@@ -819,6 +904,10 @@ struct d3dkmt_enumadapters3 { - _IOWR(0x47, 0x14, struct d3dkmt_enumadapters2) - #define LX_DXCLOSEADAPTER \ - _IOWR(0x47, 0x15, struct d3dkmt_closeadapter) -+#define LX_DXCREATEHWQUEUE \ -+ _IOWR(0x47, 0x18, struct d3dkmt_createhwqueue) -+#define LX_DXDESTROYHWQUEUE \ -+ _IOWR(0x47, 0x1b, struct d3dkmt_destroyhwqueue) - #define LX_DXDESTROYDEVICE \ - _IOWR(0x47, 0x19, struct d3dkmt_destroydevice) - #define LX_DXDESTROYSYNCHRONIZATIONOBJECT \ -@@ -829,6 +918,10 @@ struct d3dkmt_enumadapters3 { - _IOWR(0x47, 0x32, struct d3dkmt_signalsynchronizationobjectfromgpu) - #define LX_DXSIGNALSYNCHRONIZATIONOBJECTFROMGPU2 \ - _IOWR(0x47, 0x33, struct d3dkmt_signalsynchronizationobjectfromgpu2) -+#define LX_DXSUBMITSIGNALSYNCOBJECTSTOHWQUEUE \ -+ _IOWR(0x47, 0x35, struct d3dkmt_submitsignalsyncobjectstohwqueue) -+#define LX_DXSUBMITWAITFORSYNCOBJECTSTOHWQUEUE \ -+ _IOWR(0x47, 0x36, struct d3dkmt_submitwaitforsyncobjectstohwqueue) - #define LX_DXWAITFORSYNCHRONIZATIONOBJECTFROMCPU \ - _IOWR(0x47, 0x3a, struct d3dkmt_waitforsynchronizationobjectfromcpu) - #define LX_DXWAITFORSYNCHRONIZATIONOBJECTFROMGPU \ --- -Armbian - diff --git a/patch/kernel/archive/wsl2-arm64-6.1/1681-drivers-hv-dxgkrnl-Creation-of-paging-queue-objects.patch b/patch/kernel/archive/wsl2-arm64-6.1/1681-drivers-hv-dxgkrnl-Creation-of-paging-queue-objects.patch deleted file mode 100644 index 6043c2319241..000000000000 --- a/patch/kernel/archive/wsl2-arm64-6.1/1681-drivers-hv-dxgkrnl-Creation-of-paging-queue-objects.patch +++ /dev/null @@ -1,640 +0,0 @@ -From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001 -From: Iouri Tarassov -Date: Thu, 20 Jan 2022 15:15:18 -0800 -Subject: drivers: hv: dxgkrnl: Creation of paging queue objects. - -Implement ioctls for creation/destruction of the paging queue objects: - - LX_DXCREATEPAGINGQUEUE, - - LX_DXDESTROYPAGINGQUEUE - -Paging queue objects (dxgpagingqueue) contain operations, which -handle residency of device accessible allocations. An allocation is -resident, when the device has access to it. For example, the allocation -resides in local device memory or device page tables point to system -memory which is made non-pageable. - -Each paging queue has an associated monitored fence sync object, which -is used to detect when a paging operation is completed. - -Signed-off-by: Iouri Tarassov -[kms: Forward port to v6.1] -Signed-off-by: Kelsey Steele ---- - drivers/hv/dxgkrnl/dxgadapter.c | 89 +++++ - drivers/hv/dxgkrnl/dxgkrnl.h | 24 ++ - drivers/hv/dxgkrnl/dxgprocess.c | 4 + - drivers/hv/dxgkrnl/dxgvmbus.c | 74 ++++ - drivers/hv/dxgkrnl/dxgvmbus.h | 17 + - drivers/hv/dxgkrnl/ioctl.c | 189 +++++++++- - include/uapi/misc/d3dkmthk.h | 27 ++ - 7 files changed, 418 insertions(+), 6 deletions(-) - -diff --git a/drivers/hv/dxgkrnl/dxgadapter.c b/drivers/hv/dxgkrnl/dxgadapter.c -index 111111111111..222222222222 100644 ---- a/drivers/hv/dxgkrnl/dxgadapter.c -+++ b/drivers/hv/dxgkrnl/dxgadapter.c -@@ -278,6 +278,7 @@ struct dxgdevice *dxgdevice_create(struct dxgadapter *adapter, - void dxgdevice_stop(struct dxgdevice *device) - { - struct dxgallocation *alloc; -+ struct dxgpagingqueue *pqueue; - struct dxgsyncobject *syncobj; - - DXG_TRACE("Stopping device: %p", device); -@@ -288,6 +289,10 @@ void dxgdevice_stop(struct dxgdevice *device) - dxgdevice_release_alloc_list_lock(device); - - hmgrtable_lock(&device->process->handle_table, DXGLOCK_EXCL); -+ list_for_each_entry(pqueue, &device->pqueue_list_head, -+ pqueue_list_entry) { -+ dxgpagingqueue_stop(pqueue); -+ } - list_for_each_entry(syncobj, &device->syncobj_list_head, - syncobj_list_entry) { - dxgsyncobject_stop(syncobj); -@@ -375,6 +380,17 @@ void dxgdevice_destroy(struct dxgdevice *device) - dxgdevice_release_context_list_lock(device); - } - -+ { -+ struct dxgpagingqueue *tmp; -+ struct dxgpagingqueue *pqueue; -+ -+ DXG_TRACE("destroying paging queues"); -+ list_for_each_entry_safe(pqueue, tmp, &device->pqueue_list_head, -+ pqueue_list_entry) { -+ dxgpagingqueue_destroy(pqueue); -+ } -+ } -+ - /* Guest handles need to be released before the host handles */ - hmgrtable_lock(&process->handle_table, DXGLOCK_EXCL); - if (device->handle_valid) { -@@ -708,6 +724,26 @@ void dxgdevice_release(struct kref *refcount) - kfree(device); - } - -+void dxgdevice_add_paging_queue(struct dxgdevice *device, -+ struct dxgpagingqueue *entry) -+{ -+ dxgdevice_acquire_alloc_list_lock(device); -+ list_add_tail(&entry->pqueue_list_entry, &device->pqueue_list_head); -+ dxgdevice_release_alloc_list_lock(device); -+} -+ -+void dxgdevice_remove_paging_queue(struct dxgpagingqueue *pqueue) -+{ -+ struct dxgdevice *device = pqueue->device; -+ -+ dxgdevice_acquire_alloc_list_lock(device); -+ if (pqueue->pqueue_list_entry.next) { -+ list_del(&pqueue->pqueue_list_entry); -+ pqueue->pqueue_list_entry.next = NULL; -+ } -+ dxgdevice_release_alloc_list_lock(device); -+} -+ - void dxgdevice_add_syncobj(struct dxgdevice *device, - struct dxgsyncobject *syncobj) - { -@@ -899,6 +935,59 @@ else - kfree(alloc); - } - -+struct dxgpagingqueue *dxgpagingqueue_create(struct dxgdevice *device) -+{ -+ struct dxgpagingqueue *pqueue; -+ -+ pqueue = kzalloc(sizeof(*pqueue), GFP_KERNEL); -+ if (pqueue) { -+ pqueue->device = device; -+ pqueue->process = device->process; -+ pqueue->device_handle = device->handle; -+ dxgdevice_add_paging_queue(device, pqueue); -+ } -+ return pqueue; -+} -+ -+void dxgpagingqueue_stop(struct dxgpagingqueue *pqueue) -+{ -+ int ret; -+ -+ if (pqueue->mapped_address) { -+ ret = dxg_unmap_iospace(pqueue->mapped_address, PAGE_SIZE); -+ DXG_TRACE("fence is unmapped %d %p", -+ ret, pqueue->mapped_address); -+ pqueue->mapped_address = NULL; -+ } -+} -+ -+void dxgpagingqueue_destroy(struct dxgpagingqueue *pqueue) -+{ -+ struct dxgprocess *process = pqueue->process; -+ -+ DXG_TRACE("Destroying pqueue %p %x", pqueue, pqueue->handle.v); -+ -+ dxgpagingqueue_stop(pqueue); -+ -+ hmgrtable_lock(&process->handle_table, DXGLOCK_EXCL); -+ if (pqueue->handle.v) { -+ hmgrtable_free_handle(&process->handle_table, -+ HMGRENTRY_TYPE_DXGPAGINGQUEUE, -+ pqueue->handle); -+ pqueue->handle.v = 0; -+ } -+ if (pqueue->syncobj_handle.v) { -+ hmgrtable_free_handle(&process->handle_table, -+ HMGRENTRY_TYPE_MONITOREDFENCE, -+ pqueue->syncobj_handle); -+ pqueue->syncobj_handle.v = 0; -+ } -+ hmgrtable_unlock(&process->handle_table, DXGLOCK_EXCL); -+ if (pqueue->device) -+ dxgdevice_remove_paging_queue(pqueue); -+ kfree(pqueue); -+} -+ - struct dxgprocess_adapter *dxgprocess_adapter_create(struct dxgprocess *process, - struct dxgadapter *adapter) - { -diff --git a/drivers/hv/dxgkrnl/dxgkrnl.h b/drivers/hv/dxgkrnl/dxgkrnl.h -index 111111111111..222222222222 100644 ---- a/drivers/hv/dxgkrnl/dxgkrnl.h -+++ b/drivers/hv/dxgkrnl/dxgkrnl.h -@@ -104,6 +104,16 @@ int dxgvmbuschannel_init(struct dxgvmbuschannel *ch, struct hv_device *hdev); - void dxgvmbuschannel_destroy(struct dxgvmbuschannel *ch); - void dxgvmbuschannel_receive(void *ctx); - -+struct dxgpagingqueue { -+ struct dxgdevice *device; -+ struct dxgprocess *process; -+ struct list_head pqueue_list_entry; -+ struct d3dkmthandle device_handle; -+ struct d3dkmthandle handle; -+ struct d3dkmthandle syncobj_handle; -+ void *mapped_address; -+}; -+ - /* - * The structure describes an event, which will be signaled by - * a message from host. -@@ -127,6 +137,10 @@ struct dxghosteventcpu { - bool remove_from_list; - }; - -+struct dxgpagingqueue *dxgpagingqueue_create(struct dxgdevice *device); -+void dxgpagingqueue_destroy(struct dxgpagingqueue *pqueue); -+void dxgpagingqueue_stop(struct dxgpagingqueue *pqueue); -+ - /* - * This is GPU synchronization object, which is used to synchronize execution - * between GPU contextx/hardware queues or for tracking GPU execution progress. -@@ -516,6 +530,9 @@ void dxgdevice_remove_alloc_safe(struct dxgdevice *dev, - struct dxgallocation *a); - void dxgdevice_add_resource(struct dxgdevice *dev, struct dxgresource *res); - void dxgdevice_remove_resource(struct dxgdevice *dev, struct dxgresource *res); -+void dxgdevice_add_paging_queue(struct dxgdevice *dev, -+ struct dxgpagingqueue *pqueue); -+void dxgdevice_remove_paging_queue(struct dxgpagingqueue *pqueue); - void dxgdevice_add_syncobj(struct dxgdevice *dev, struct dxgsyncobject *so); - void dxgdevice_remove_syncobj(struct dxgsyncobject *so); - bool dxgdevice_is_active(struct dxgdevice *dev); -@@ -762,6 +779,13 @@ dxgvmb_send_create_context(struct dxgadapter *adapter, - int dxgvmb_send_destroy_context(struct dxgadapter *adapter, - struct dxgprocess *process, - struct d3dkmthandle h); -+int dxgvmb_send_create_paging_queue(struct dxgprocess *pr, -+ struct dxgdevice *dev, -+ struct d3dkmt_createpagingqueue *args, -+ struct dxgpagingqueue *pq); -+int dxgvmb_send_destroy_paging_queue(struct dxgprocess *process, -+ struct dxgadapter *adapter, -+ struct d3dkmthandle h); - int dxgvmb_send_create_allocation(struct dxgprocess *pr, struct dxgdevice *dev, - struct d3dkmt_createallocation *args, - struct d3dkmt_createallocation *__user inargs, -diff --git a/drivers/hv/dxgkrnl/dxgprocess.c b/drivers/hv/dxgkrnl/dxgprocess.c -index 111111111111..222222222222 100644 ---- a/drivers/hv/dxgkrnl/dxgprocess.c -+++ b/drivers/hv/dxgkrnl/dxgprocess.c -@@ -277,6 +277,10 @@ struct dxgdevice *dxgprocess_device_by_object_handle(struct dxgprocess *process, - device_handle = - ((struct dxgcontext *)obj)->device_handle; - break; -+ case HMGRENTRY_TYPE_DXGPAGINGQUEUE: -+ device_handle = -+ ((struct dxgpagingqueue *)obj)->device_handle; -+ break; - case HMGRENTRY_TYPE_DXGHWQUEUE: - device_handle = - ((struct dxghwqueue *)obj)->device_handle; -diff --git a/drivers/hv/dxgkrnl/dxgvmbus.c b/drivers/hv/dxgkrnl/dxgvmbus.c -index 111111111111..222222222222 100644 ---- a/drivers/hv/dxgkrnl/dxgvmbus.c -+++ b/drivers/hv/dxgkrnl/dxgvmbus.c -@@ -1155,6 +1155,80 @@ int dxgvmb_send_destroy_context(struct dxgadapter *adapter, - return ret; - } - -+int dxgvmb_send_create_paging_queue(struct dxgprocess *process, -+ struct dxgdevice *device, -+ struct d3dkmt_createpagingqueue *args, -+ struct dxgpagingqueue *pqueue) -+{ -+ struct dxgkvmb_command_createpagingqueue_return result; -+ struct dxgkvmb_command_createpagingqueue *command; -+ int ret; -+ struct dxgvmbusmsg msg = {.hdr = NULL}; -+ -+ ret = init_message(&msg, device->adapter, process, sizeof(*command)); -+ if (ret) -+ goto cleanup; -+ command = (void *)msg.msg; -+ -+ command_vgpu_to_host_init2(&command->hdr, -+ DXGK_VMBCOMMAND_CREATEPAGINGQUEUE, -+ process->host_handle); -+ command->args = *args; -+ args->paging_queue.v = 0; -+ -+ ret = dxgvmb_send_sync_msg(msg.channel, msg.hdr, msg.size, &result, -+ sizeof(result)); -+ if (ret < 0) { -+ DXG_ERR("send_create_paging_queue failed %x", ret); -+ goto cleanup; -+ } -+ -+ args->paging_queue = result.paging_queue; -+ args->sync_object = result.sync_object; -+ args->fence_cpu_virtual_address = -+ dxg_map_iospace(result.fence_storage_physical_address, PAGE_SIZE, -+ PROT_READ | PROT_WRITE, true); -+ if (args->fence_cpu_virtual_address == NULL) { -+ ret = -ENOMEM; -+ goto cleanup; -+ } -+ pqueue->mapped_address = args->fence_cpu_virtual_address; -+ pqueue->handle = args->paging_queue; -+ -+cleanup: -+ free_message(&msg, process); -+ if (ret) -+ DXG_TRACE("err: %d", ret); -+ return ret; -+} -+ -+int dxgvmb_send_destroy_paging_queue(struct dxgprocess *process, -+ struct dxgadapter *adapter, -+ struct d3dkmthandle h) -+{ -+ int ret; -+ struct dxgkvmb_command_destroypagingqueue *command; -+ struct dxgvmbusmsg msg = {.hdr = NULL}; -+ -+ ret = init_message(&msg, adapter, process, sizeof(*command)); -+ if (ret) -+ goto cleanup; -+ command = (void *)msg.msg; -+ -+ command_vgpu_to_host_init2(&command->hdr, -+ DXGK_VMBCOMMAND_DESTROYPAGINGQUEUE, -+ process->host_handle); -+ command->paging_queue = h; -+ -+ ret = dxgvmb_send_sync_msg(msg.channel, msg.hdr, msg.size, NULL, 0); -+ -+cleanup: -+ free_message(&msg, process); -+ if (ret) -+ DXG_TRACE("err: %d", ret); -+ return ret; -+} -+ - static int - copy_private_data(struct d3dkmt_createallocation *args, - struct dxgkvmb_command_createallocation *command, -diff --git a/drivers/hv/dxgkrnl/dxgvmbus.h b/drivers/hv/dxgkrnl/dxgvmbus.h -index 111111111111..222222222222 100644 ---- a/drivers/hv/dxgkrnl/dxgvmbus.h -+++ b/drivers/hv/dxgkrnl/dxgvmbus.h -@@ -462,6 +462,23 @@ struct dxgkvmb_command_destroycontext { - struct d3dkmthandle context; - }; - -+struct dxgkvmb_command_createpagingqueue { -+ struct dxgkvmb_command_vgpu_to_host hdr; -+ struct d3dkmt_createpagingqueue args; -+}; -+ -+struct dxgkvmb_command_createpagingqueue_return { -+ struct d3dkmthandle paging_queue; -+ struct d3dkmthandle sync_object; -+ u64 fence_storage_physical_address; -+ u64 fence_storage_offset; -+}; -+ -+struct dxgkvmb_command_destroypagingqueue { -+ struct dxgkvmb_command_vgpu_to_host hdr; -+ struct d3dkmthandle paging_queue; -+}; -+ - struct dxgkvmb_command_createsyncobject { - struct dxgkvmb_command_vgpu_to_host hdr; - struct d3dkmt_createsynchronizationobject2 args; -diff --git a/drivers/hv/dxgkrnl/ioctl.c b/drivers/hv/dxgkrnl/ioctl.c -index 111111111111..222222222222 100644 ---- a/drivers/hv/dxgkrnl/ioctl.c -+++ b/drivers/hv/dxgkrnl/ioctl.c -@@ -329,7 +329,7 @@ static int dxgsharedresource_seal(struct dxgsharedresource *shared_resource) - - if (alloc_data_size) { - if (data_size < alloc_data_size) { -- dev_err(DXGDEV, -+ DXG_ERR( - "Invalid private data size"); - ret = -EINVAL; - goto cleanup1; -@@ -1010,6 +1010,183 @@ static int dxgkio_destroy_hwqueue(struct dxgprocess *process, - return ret; - } - -+static int -+dxgkio_create_paging_queue(struct dxgprocess *process, void *__user inargs) -+{ -+ struct d3dkmt_createpagingqueue args; -+ struct dxgdevice *device = NULL; -+ struct dxgadapter *adapter = NULL; -+ struct dxgpagingqueue *pqueue = NULL; -+ int ret; -+ struct d3dkmthandle host_handle = {}; -+ bool device_lock_acquired = false; -+ -+ ret = copy_from_user(&args, inargs, sizeof(args)); -+ if (ret) { -+ DXG_ERR("failed to copy input args"); -+ ret = -EINVAL; -+ goto cleanup; -+ } -+ -+ /* -+ * The call acquires reference on the device. It is safe to access the -+ * adapter, because the device holds reference on it. -+ */ -+ device = dxgprocess_device_by_handle(process, args.device); -+ if (device == NULL) { -+ ret = -EINVAL; -+ goto cleanup; -+ } -+ -+ ret = dxgdevice_acquire_lock_shared(device); -+ if (ret < 0) -+ goto cleanup; -+ -+ device_lock_acquired = true; -+ adapter = device->adapter; -+ -+ ret = dxgadapter_acquire_lock_shared(adapter); -+ if (ret < 0) { -+ adapter = NULL; -+ goto cleanup; -+ } -+ -+ pqueue = dxgpagingqueue_create(device); -+ if (pqueue == NULL) { -+ ret = -ENOMEM; -+ goto cleanup; -+ } -+ -+ ret = dxgvmb_send_create_paging_queue(process, device, &args, pqueue); -+ if (ret >= 0) { -+ host_handle = args.paging_queue; -+ -+ ret = copy_to_user(inargs, &args, sizeof(args)); -+ if (ret) { -+ DXG_ERR("failed to copy input args"); -+ ret = -EINVAL; -+ goto cleanup; -+ } -+ -+ hmgrtable_lock(&process->handle_table, DXGLOCK_EXCL); -+ ret = hmgrtable_assign_handle(&process->handle_table, pqueue, -+ HMGRENTRY_TYPE_DXGPAGINGQUEUE, -+ host_handle); -+ if (ret >= 0) { -+ pqueue->handle = host_handle; -+ ret = hmgrtable_assign_handle(&process->handle_table, -+ NULL, -+ HMGRENTRY_TYPE_MONITOREDFENCE, -+ args.sync_object); -+ if (ret >= 0) -+ pqueue->syncobj_handle = args.sync_object; -+ } -+ hmgrtable_unlock(&process->handle_table, DXGLOCK_EXCL); -+ /* should not fail after this */ -+ } -+ -+cleanup: -+ -+ if (ret < 0) { -+ if (pqueue) -+ dxgpagingqueue_destroy(pqueue); -+ if (host_handle.v) -+ dxgvmb_send_destroy_paging_queue(process, -+ adapter, -+ host_handle); -+ } -+ -+ if (adapter) -+ dxgadapter_release_lock_shared(adapter); -+ -+ if (device) { -+ if (device_lock_acquired) -+ dxgdevice_release_lock_shared(device); -+ kref_put(&device->device_kref, dxgdevice_release); -+ } -+ -+ DXG_TRACE("ioctl:%s %d", errorstr(ret), ret); -+ return ret; -+} -+ -+static int -+dxgkio_destroy_paging_queue(struct dxgprocess *process, void *__user inargs) -+{ -+ struct d3dddi_destroypagingqueue args; -+ struct dxgpagingqueue *paging_queue = NULL; -+ int ret; -+ struct d3dkmthandle device_handle = {}; -+ struct dxgdevice *device = NULL; -+ struct dxgadapter *adapter = NULL; -+ -+ ret = copy_from_user(&args, inargs, sizeof(args)); -+ if (ret) { -+ DXG_ERR("failed to copy input args"); -+ ret = -EINVAL; -+ goto cleanup; -+ } -+ -+ hmgrtable_lock(&process->handle_table, DXGLOCK_EXCL); -+ paging_queue = hmgrtable_get_object_by_type(&process->handle_table, -+ HMGRENTRY_TYPE_DXGPAGINGQUEUE, -+ args.paging_queue); -+ if (paging_queue) { -+ device_handle = paging_queue->device_handle; -+ hmgrtable_free_handle(&process->handle_table, -+ HMGRENTRY_TYPE_DXGPAGINGQUEUE, -+ args.paging_queue); -+ hmgrtable_free_handle(&process->handle_table, -+ HMGRENTRY_TYPE_MONITOREDFENCE, -+ paging_queue->syncobj_handle); -+ paging_queue->syncobj_handle.v = 0; -+ paging_queue->handle.v = 0; -+ } -+ hmgrtable_unlock(&process->handle_table, DXGLOCK_EXCL); -+ -+ /* -+ * The call acquires reference on the device. It is safe to access the -+ * adapter, because the device holds reference on it. -+ */ -+ if (device_handle.v) -+ device = dxgprocess_device_by_handle(process, device_handle); -+ if (device == NULL) { -+ ret = -EINVAL; -+ goto cleanup; -+ } -+ -+ ret = dxgdevice_acquire_lock_shared(device); -+ if (ret < 0) { -+ kref_put(&device->device_kref, dxgdevice_release); -+ device = NULL; -+ goto cleanup; -+ } -+ -+ adapter = device->adapter; -+ ret = dxgadapter_acquire_lock_shared(adapter); -+ if (ret < 0) { -+ adapter = NULL; -+ goto cleanup; -+ } -+ -+ ret = dxgvmb_send_destroy_paging_queue(process, adapter, -+ args.paging_queue); -+ -+ dxgpagingqueue_destroy(paging_queue); -+ -+cleanup: -+ -+ if (adapter) -+ dxgadapter_release_lock_shared(adapter); -+ -+ if (device) { -+ dxgdevice_release_lock_shared(device); -+ kref_put(&device->device_kref, dxgdevice_release); -+ } -+ -+ DXG_TRACE("ioctl:%s %d", errorstr(ret), ret); -+ return ret; -+} -+ - static int - get_standard_alloc_priv_data(struct dxgdevice *device, - struct d3dkmt_createstandardallocation *alloc_info, -@@ -1272,7 +1449,7 @@ dxgkio_create_allocation(struct dxgprocess *process, void *__user inargs) - args.private_runtime_resource_handle; - if (args.flags.create_shared) { - if (!args.flags.nt_security_sharing) { -- dev_err(DXGDEV, -+ DXG_ERR( - "nt_security_sharing must be set"); - ret = -EINVAL; - goto cleanup; -@@ -1313,7 +1490,7 @@ dxgkio_create_allocation(struct dxgprocess *process, void *__user inargs) - args.private_runtime_data, - args.private_runtime_data_size); - if (ret) { -- dev_err(DXGDEV, -+ DXG_ERR( - "failed to copy runtime data"); - ret = -EINVAL; - goto cleanup; -@@ -1333,7 +1510,7 @@ dxgkio_create_allocation(struct dxgprocess *process, void *__user inargs) - args.priv_drv_data, - args.priv_drv_data_size); - if (ret) { -- dev_err(DXGDEV, -+ DXG_ERR( - "failed to copy res data"); - ret = -EINVAL; - goto cleanup; -@@ -3481,7 +3658,7 @@ static struct ioctl_desc ioctls[] = { - /* 0x04 */ {dxgkio_create_context_virtual, LX_DXCREATECONTEXTVIRTUAL}, - /* 0x05 */ {dxgkio_destroy_context, LX_DXDESTROYCONTEXT}, - /* 0x06 */ {dxgkio_create_allocation, LX_DXCREATEALLOCATION}, --/* 0x07 */ {}, -+/* 0x07 */ {dxgkio_create_paging_queue, LX_DXCREATEPAGINGQUEUE}, - /* 0x08 */ {}, - /* 0x09 */ {dxgkio_query_adapter_info, LX_DXQUERYADAPTERINFO}, - /* 0x0a */ {}, -@@ -3502,7 +3679,7 @@ static struct ioctl_desc ioctls[] = { - /* 0x19 */ {dxgkio_destroy_device, LX_DXDESTROYDEVICE}, - /* 0x1a */ {}, - /* 0x1b */ {dxgkio_destroy_hwqueue, LX_DXDESTROYHWQUEUE}, --/* 0x1c */ {}, -+/* 0x1c */ {dxgkio_destroy_paging_queue, LX_DXDESTROYPAGINGQUEUE}, - /* 0x1d */ {dxgkio_destroy_sync_object, LX_DXDESTROYSYNCHRONIZATIONOBJECT}, - /* 0x1e */ {}, - /* 0x1f */ {}, -diff --git a/include/uapi/misc/d3dkmthk.h b/include/uapi/misc/d3dkmthk.h -index 111111111111..222222222222 100644 ---- a/include/uapi/misc/d3dkmthk.h -+++ b/include/uapi/misc/d3dkmthk.h -@@ -211,6 +211,29 @@ struct d3dddi_createhwqueueflags { - }; - }; - -+enum d3dddi_pagingqueue_priority { -+ _D3DDDI_PAGINGQUEUE_PRIORITY_BELOW_NORMAL = -1, -+ _D3DDDI_PAGINGQUEUE_PRIORITY_NORMAL = 0, -+ _D3DDDI_PAGINGQUEUE_PRIORITY_ABOVE_NORMAL = 1, -+}; -+ -+struct d3dkmt_createpagingqueue { -+ struct d3dkmthandle device; -+ enum d3dddi_pagingqueue_priority priority; -+ struct d3dkmthandle paging_queue; -+ struct d3dkmthandle sync_object; -+#ifdef __KERNEL__ -+ void *fence_cpu_virtual_address; -+#else -+ __u64 fence_cpu_virtual_address; -+#endif -+ __u32 physical_adapter_index; -+}; -+ -+struct d3dddi_destroypagingqueue { -+ struct d3dkmthandle paging_queue; -+}; -+ - enum d3dkmdt_gdisurfacetype { - _D3DKMDT_GDISURFACE_INVALID = 0, - _D3DKMDT_GDISURFACE_TEXTURE = 1, -@@ -890,6 +913,8 @@ struct d3dkmt_enumadapters3 { - _IOWR(0x47, 0x05, struct d3dkmt_destroycontext) - #define LX_DXCREATEALLOCATION \ - _IOWR(0x47, 0x06, struct d3dkmt_createallocation) -+#define LX_DXCREATEPAGINGQUEUE \ -+ _IOWR(0x47, 0x07, struct d3dkmt_createpagingqueue) - #define LX_DXQUERYADAPTERINFO \ - _IOWR(0x47, 0x09, struct d3dkmt_queryadapterinfo) - #define LX_DXCREATESYNCHRONIZATIONOBJECT \ -@@ -908,6 +933,8 @@ struct d3dkmt_enumadapters3 { - _IOWR(0x47, 0x18, struct d3dkmt_createhwqueue) - #define LX_DXDESTROYHWQUEUE \ - _IOWR(0x47, 0x1b, struct d3dkmt_destroyhwqueue) -+#define LX_DXDESTROYPAGINGQUEUE \ -+ _IOWR(0x47, 0x1c, struct d3dddi_destroypagingqueue) - #define LX_DXDESTROYDEVICE \ - _IOWR(0x47, 0x19, struct d3dkmt_destroydevice) - #define LX_DXDESTROYSYNCHRONIZATIONOBJECT \ --- -Armbian - diff --git a/patch/kernel/archive/wsl2-arm64-6.1/1682-drivers-hv-dxgkrnl-Submit-execution-commands-to-the-compute-device.patch b/patch/kernel/archive/wsl2-arm64-6.1/1682-drivers-hv-dxgkrnl-Submit-execution-commands-to-the-compute-device.patch deleted file mode 100644 index a2bc7bd7880a..000000000000 --- a/patch/kernel/archive/wsl2-arm64-6.1/1682-drivers-hv-dxgkrnl-Submit-execution-commands-to-the-compute-device.patch +++ /dev/null @@ -1,450 +0,0 @@ -From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001 -From: Iouri Tarassov -Date: Wed, 19 Jan 2022 18:02:09 -0800 -Subject: drivers: hv: dxgkrnl: Submit execution commands to the compute device - -Implements ioctls for submission of compute device buffers for execution: - - LX_DXSUBMITCOMMAND - The ioctl is used to submit a command buffer to the device, - working in the "packet scheduling" mode. - - - LX_DXSUBMITCOMMANDTOHWQUEUE - The ioctl is used to submit a command buffer to the device, - working in the "hardware scheduling" mode. - -To improve performance both ioctls use asynchronous VM bus messages -to communicate with the host as these are high frequency operations. - -Signed-off-by: Iouri Tarassov -[kms: Forward port to v6.1] -Signed-off-by: Kelsey Steele ---- - drivers/hv/dxgkrnl/dxgkrnl.h | 6 + - drivers/hv/dxgkrnl/dxgvmbus.c | 113 +++++++++ - drivers/hv/dxgkrnl/dxgvmbus.h | 14 + - drivers/hv/dxgkrnl/ioctl.c | 127 +++++++++- - include/uapi/misc/d3dkmthk.h | 58 +++++ - 5 files changed, 316 insertions(+), 2 deletions(-) - -diff --git a/drivers/hv/dxgkrnl/dxgkrnl.h b/drivers/hv/dxgkrnl/dxgkrnl.h -index 111111111111..222222222222 100644 ---- a/drivers/hv/dxgkrnl/dxgkrnl.h -+++ b/drivers/hv/dxgkrnl/dxgkrnl.h -@@ -796,6 +796,9 @@ int dxgvmb_send_create_allocation(struct dxgprocess *pr, struct dxgdevice *dev, - int dxgvmb_send_destroy_allocation(struct dxgprocess *pr, struct dxgdevice *dev, - struct d3dkmt_destroyallocation2 *args, - struct d3dkmthandle *alloc_handles); -+int dxgvmb_send_submit_command(struct dxgprocess *pr, -+ struct dxgadapter *adapter, -+ struct d3dkmt_submitcommand *args); - int dxgvmb_send_create_sync_object(struct dxgprocess *pr, - struct dxgadapter *adapter, - struct d3dkmt_createsynchronizationobject2 -@@ -838,6 +841,9 @@ int dxgvmb_send_destroy_hwqueue(struct dxgprocess *process, - int dxgvmb_send_query_adapter_info(struct dxgprocess *process, - struct dxgadapter *adapter, - struct d3dkmt_queryadapterinfo *args); -+int dxgvmb_send_submit_command_hwqueue(struct dxgprocess *process, -+ struct dxgadapter *adapter, -+ struct d3dkmt_submitcommandtohwqueue *a); - int dxgvmb_send_open_sync_object_nt(struct dxgprocess *process, - struct dxgvmbuschannel *channel, - struct d3dkmt_opensyncobjectfromnthandle2 -diff --git a/drivers/hv/dxgkrnl/dxgvmbus.c b/drivers/hv/dxgkrnl/dxgvmbus.c -index 111111111111..222222222222 100644 ---- a/drivers/hv/dxgkrnl/dxgvmbus.c -+++ b/drivers/hv/dxgkrnl/dxgvmbus.c -@@ -1901,6 +1901,61 @@ int dxgvmb_send_get_stdalloc_data(struct dxgdevice *device, - return ret; - } - -+int dxgvmb_send_submit_command(struct dxgprocess *process, -+ struct dxgadapter *adapter, -+ struct d3dkmt_submitcommand *args) -+{ -+ int ret; -+ u32 cmd_size; -+ struct dxgkvmb_command_submitcommand *command; -+ u32 hbufsize = args->num_history_buffers * sizeof(struct d3dkmthandle); -+ struct dxgvmbusmsg msg = {.hdr = NULL}; -+ struct dxgglobal *dxgglobal = dxggbl(); -+ -+ cmd_size = sizeof(struct dxgkvmb_command_submitcommand) + -+ hbufsize + args->priv_drv_data_size; -+ -+ ret = init_message(&msg, adapter, process, cmd_size); -+ if (ret) -+ goto cleanup; -+ command = (void *)msg.msg; -+ -+ ret = copy_from_user(&command[1], args->history_buffer_array, -+ hbufsize); -+ if (ret) { -+ DXG_ERR(" failed to copy history buffer"); -+ ret = -EINVAL; -+ goto cleanup; -+ } -+ ret = copy_from_user((u8 *) &command[1] + hbufsize, -+ args->priv_drv_data, args->priv_drv_data_size); -+ if (ret) { -+ DXG_ERR("failed to copy history priv data"); -+ ret = -EINVAL; -+ goto cleanup; -+ } -+ -+ command_vgpu_to_host_init2(&command->hdr, -+ DXGK_VMBCOMMAND_SUBMITCOMMAND, -+ process->host_handle); -+ command->args = *args; -+ -+ if (dxgglobal->async_msg_enabled) { -+ command->hdr.async_msg = 1; -+ ret = dxgvmb_send_async_msg(msg.channel, msg.hdr, msg.size); -+ } else { -+ ret = dxgvmb_send_sync_msg_ntstatus(msg.channel, msg.hdr, -+ msg.size); -+ } -+ -+cleanup: -+ -+ free_message(&msg, process); -+ if (ret) -+ DXG_TRACE("err: %d", ret); -+ return ret; -+} -+ - static void set_result(struct d3dkmt_createsynchronizationobject2 *args, - u64 fence_gpu_va, u8 *va) - { -@@ -2427,3 +2482,61 @@ int dxgvmb_send_query_adapter_info(struct dxgprocess *process, - DXG_TRACE("err: %d", ret); - return ret; - } -+ -+int dxgvmb_send_submit_command_hwqueue(struct dxgprocess *process, -+ struct dxgadapter *adapter, -+ struct d3dkmt_submitcommandtohwqueue -+ *args) -+{ -+ int ret = -EINVAL; -+ u32 cmd_size; -+ struct dxgkvmb_command_submitcommandtohwqueue *command; -+ u32 primaries_size = args->num_primaries * sizeof(struct d3dkmthandle); -+ struct dxgvmbusmsg msg = {.hdr = NULL}; -+ struct dxgglobal *dxgglobal = dxggbl(); -+ -+ cmd_size = sizeof(*command) + args->priv_drv_data_size + primaries_size; -+ ret = init_message(&msg, adapter, process, cmd_size); -+ if (ret) -+ goto cleanup; -+ command = (void *)msg.msg; -+ -+ if (primaries_size) { -+ ret = copy_from_user(&command[1], args->written_primaries, -+ primaries_size); -+ if (ret) { -+ DXG_ERR("failed to copy primaries handles"); -+ ret = -EINVAL; -+ goto cleanup; -+ } -+ } -+ if (args->priv_drv_data_size) { -+ ret = copy_from_user((char *)&command[1] + primaries_size, -+ args->priv_drv_data, -+ args->priv_drv_data_size); -+ if (ret) { -+ DXG_ERR("failed to copy primaries data"); -+ ret = -EINVAL; -+ goto cleanup; -+ } -+ } -+ -+ command_vgpu_to_host_init2(&command->hdr, -+ DXGK_VMBCOMMAND_SUBMITCOMMANDTOHWQUEUE, -+ process->host_handle); -+ command->args = *args; -+ -+ if (dxgglobal->async_msg_enabled) { -+ command->hdr.async_msg = 1; -+ ret = dxgvmb_send_async_msg(msg.channel, msg.hdr, msg.size); -+ } else { -+ ret = dxgvmb_send_sync_msg_ntstatus(msg.channel, msg.hdr, -+ msg.size); -+ } -+ -+cleanup: -+ free_message(&msg, process); -+ if (ret) -+ DXG_TRACE("err: %d", ret); -+ return ret; -+} -diff --git a/drivers/hv/dxgkrnl/dxgvmbus.h b/drivers/hv/dxgkrnl/dxgvmbus.h -index 111111111111..222222222222 100644 ---- a/drivers/hv/dxgkrnl/dxgvmbus.h -+++ b/drivers/hv/dxgkrnl/dxgvmbus.h -@@ -314,6 +314,20 @@ struct dxgkvmb_command_flushdevice { - enum dxgdevice_flushschedulerreason reason; - }; - -+struct dxgkvmb_command_submitcommand { -+ struct dxgkvmb_command_vgpu_to_host hdr; -+ struct d3dkmt_submitcommand args; -+ /* HistoryBufferHandles */ -+ /* PrivateDriverData */ -+}; -+ -+struct dxgkvmb_command_submitcommandtohwqueue { -+ struct dxgkvmb_command_vgpu_to_host hdr; -+ struct d3dkmt_submitcommandtohwqueue args; -+ /* Written primaries */ -+ /* PrivateDriverData */ -+}; -+ - struct dxgkvmb_command_createallocation_allocinfo { - u32 flags; - u32 priv_drv_data_size; -diff --git a/drivers/hv/dxgkrnl/ioctl.c b/drivers/hv/dxgkrnl/ioctl.c -index 111111111111..222222222222 100644 ---- a/drivers/hv/dxgkrnl/ioctl.c -+++ b/drivers/hv/dxgkrnl/ioctl.c -@@ -1902,6 +1902,129 @@ dxgkio_destroy_allocation(struct dxgprocess *process, void *__user inargs) - return ret; - } - -+static int -+dxgkio_submit_command(struct dxgprocess *process, void *__user inargs) -+{ -+ int ret; -+ struct d3dkmt_submitcommand args; -+ struct dxgdevice *device = NULL; -+ struct dxgadapter *adapter = NULL; -+ -+ ret = copy_from_user(&args, inargs, sizeof(args)); -+ if (ret) { -+ DXG_ERR("failed to copy input args"); -+ ret = -EINVAL; -+ goto cleanup; -+ } -+ -+ if (args.broadcast_context_count > D3DDDI_MAX_BROADCAST_CONTEXT || -+ args.broadcast_context_count == 0) { -+ DXG_ERR("invalid number of contexts"); -+ ret = -EINVAL; -+ goto cleanup; -+ } -+ -+ if (args.priv_drv_data_size > DXG_MAX_VM_BUS_PACKET_SIZE) { -+ DXG_ERR("invalid private data size"); -+ ret = -EINVAL; -+ goto cleanup; -+ } -+ -+ if (args.num_history_buffers > 1024) { -+ DXG_ERR("invalid number of history buffers"); -+ ret = -EINVAL; -+ goto cleanup; -+ } -+ -+ if (args.num_primaries > DXG_MAX_VM_BUS_PACKET_SIZE) { -+ DXG_ERR("invalid number of primaries"); -+ ret = -EINVAL; -+ goto cleanup; -+ } -+ -+ device = dxgprocess_device_by_object_handle(process, -+ HMGRENTRY_TYPE_DXGCONTEXT, -+ args.broadcast_context[0]); -+ if (device == NULL) { -+ ret = -EINVAL; -+ goto cleanup; -+ } -+ -+ adapter = device->adapter; -+ ret = dxgadapter_acquire_lock_shared(adapter); -+ if (ret < 0) { -+ adapter = NULL; -+ goto cleanup; -+ } -+ -+ ret = dxgvmb_send_submit_command(process, adapter, &args); -+ -+cleanup: -+ -+ if (adapter) -+ dxgadapter_release_lock_shared(adapter); -+ if (device) -+ kref_put(&device->device_kref, dxgdevice_release); -+ -+ DXG_TRACE("ioctl:%s %d", errorstr(ret), ret); -+ return ret; -+} -+ -+static int -+dxgkio_submit_command_to_hwqueue(struct dxgprocess *process, void *__user inargs) -+{ -+ int ret; -+ struct d3dkmt_submitcommandtohwqueue args; -+ struct dxgdevice *device = NULL; -+ struct dxgadapter *adapter = NULL; -+ -+ ret = copy_from_user(&args, inargs, sizeof(args)); -+ if (ret) { -+ DXG_ERR("failed to copy input args"); -+ ret = -EINVAL; -+ goto cleanup; -+ } -+ -+ if (args.priv_drv_data_size > DXG_MAX_VM_BUS_PACKET_SIZE) { -+ DXG_ERR("invalid private data size"); -+ ret = -EINVAL; -+ goto cleanup; -+ } -+ -+ if (args.num_primaries > DXG_MAX_VM_BUS_PACKET_SIZE) { -+ DXG_ERR("invalid number of primaries"); -+ ret = -EINVAL; -+ goto cleanup; -+ } -+ -+ device = dxgprocess_device_by_object_handle(process, -+ HMGRENTRY_TYPE_DXGHWQUEUE, -+ args.hwqueue); -+ if (device == NULL) { -+ ret = -EINVAL; -+ goto cleanup; -+ } -+ -+ adapter = device->adapter; -+ ret = dxgadapter_acquire_lock_shared(adapter); -+ if (ret < 0) { -+ adapter = NULL; -+ goto cleanup; -+ } -+ -+ ret = dxgvmb_send_submit_command_hwqueue(process, adapter, &args); -+ -+cleanup: -+ -+ if (adapter) -+ dxgadapter_release_lock_shared(adapter); -+ if (device) -+ kref_put(&device->device_kref, dxgdevice_release); -+ -+ DXG_TRACE("ioctl:%s %d", errorstr(ret), ret); -+ return ret; -+} -+ - static int - dxgkio_submit_signal_to_hwqueue(struct dxgprocess *process, void *__user inargs) - { -@@ -3666,7 +3789,7 @@ static struct ioctl_desc ioctls[] = { - /* 0x0c */ {}, - /* 0x0d */ {}, - /* 0x0e */ {}, --/* 0x0f */ {}, -+/* 0x0f */ {dxgkio_submit_command, LX_DXSUBMITCOMMAND}, - /* 0x10 */ {dxgkio_create_sync_object, LX_DXCREATESYNCHRONIZATIONOBJECT}, - /* 0x11 */ {dxgkio_signal_sync_object, LX_DXSIGNALSYNCHRONIZATIONOBJECT}, - /* 0x12 */ {dxgkio_wait_sync_object, LX_DXWAITFORSYNCHRONIZATIONOBJECT}, -@@ -3706,7 +3829,7 @@ static struct ioctl_desc ioctls[] = { - LX_DXSIGNALSYNCHRONIZATIONOBJECTFROMGPU}, - /* 0x33 */ {dxgkio_signal_sync_object_gpu2, - LX_DXSIGNALSYNCHRONIZATIONOBJECTFROMGPU2}, --/* 0x34 */ {}, -+/* 0x34 */ {dxgkio_submit_command_to_hwqueue, LX_DXSUBMITCOMMANDTOHWQUEUE}, - /* 0x35 */ {dxgkio_submit_signal_to_hwqueue, - LX_DXSUBMITSIGNALSYNCOBJECTSTOHWQUEUE}, - /* 0x36 */ {dxgkio_submit_wait_to_hwqueue, -diff --git a/include/uapi/misc/d3dkmthk.h b/include/uapi/misc/d3dkmthk.h -index 111111111111..222222222222 100644 ---- a/include/uapi/misc/d3dkmthk.h -+++ b/include/uapi/misc/d3dkmthk.h -@@ -58,6 +58,8 @@ struct winluid { - __u32 b; - }; - -+#define D3DDDI_MAX_WRITTEN_PRIMARIES 16 -+ - #define D3DKMT_CREATEALLOCATION_MAX 1024 - #define D3DKMT_ADAPTERS_MAX 64 - #define D3DDDI_MAX_BROADCAST_CONTEXT 64 -@@ -525,6 +527,58 @@ struct d3dkmt_destroysynchronizationobject { - struct d3dkmthandle sync_object; - }; - -+struct d3dkmt_submitcommandflags { -+ __u32 null_rendering:1; -+ __u32 present_redirected:1; -+ __u32 reserved:30; -+}; -+ -+struct d3dkmt_submitcommand { -+ __u64 command_buffer; -+ __u32 command_length; -+ struct d3dkmt_submitcommandflags flags; -+ __u64 present_history_token; -+ __u32 broadcast_context_count; -+ struct d3dkmthandle broadcast_context[D3DDDI_MAX_BROADCAST_CONTEXT]; -+ __u32 reserved; -+#ifdef __KERNEL__ -+ void *priv_drv_data; -+#else -+ __u64 priv_drv_data; -+#endif -+ __u32 priv_drv_data_size; -+ __u32 num_primaries; -+ struct d3dkmthandle written_primaries[D3DDDI_MAX_WRITTEN_PRIMARIES]; -+ __u32 num_history_buffers; -+ __u32 reserved1; -+#ifdef __KERNEL__ -+ struct d3dkmthandle *history_buffer_array; -+#else -+ __u64 history_buffer_array; -+#endif -+}; -+ -+struct d3dkmt_submitcommandtohwqueue { -+ struct d3dkmthandle hwqueue; -+ __u32 reserved; -+ __u64 hwqueue_progress_fence_id; -+ __u64 command_buffer; -+ __u32 command_length; -+ __u32 priv_drv_data_size; -+#ifdef __KERNEL__ -+ void *priv_drv_data; -+#else -+ __u64 priv_drv_data; -+#endif -+ __u32 num_primaries; -+ __u32 reserved1; -+#ifdef __KERNEL__ -+ struct d3dkmthandle *written_primaries; -+#else -+ __u64 written_primaries; -+#endif -+}; -+ - enum d3dkmt_standardallocationtype { - _D3DKMT_STANDARDALLOCATIONTYPE_EXISTINGHEAP = 1, - _D3DKMT_STANDARDALLOCATIONTYPE_CROSSADAPTER = 2, -@@ -917,6 +971,8 @@ struct d3dkmt_enumadapters3 { - _IOWR(0x47, 0x07, struct d3dkmt_createpagingqueue) - #define LX_DXQUERYADAPTERINFO \ - _IOWR(0x47, 0x09, struct d3dkmt_queryadapterinfo) -+#define LX_DXSUBMITCOMMAND \ -+ _IOWR(0x47, 0x0f, struct d3dkmt_submitcommand) - #define LX_DXCREATESYNCHRONIZATIONOBJECT \ - _IOWR(0x47, 0x10, struct d3dkmt_createsynchronizationobject2) - #define LX_DXSIGNALSYNCHRONIZATIONOBJECT \ -@@ -945,6 +1001,8 @@ struct d3dkmt_enumadapters3 { - _IOWR(0x47, 0x32, struct d3dkmt_signalsynchronizationobjectfromgpu) - #define LX_DXSIGNALSYNCHRONIZATIONOBJECTFROMGPU2 \ - _IOWR(0x47, 0x33, struct d3dkmt_signalsynchronizationobjectfromgpu2) -+#define LX_DXSUBMITCOMMANDTOHWQUEUE \ -+ _IOWR(0x47, 0x34, struct d3dkmt_submitcommandtohwqueue) - #define LX_DXSUBMITSIGNALSYNCOBJECTSTOHWQUEUE \ - _IOWR(0x47, 0x35, struct d3dkmt_submitsignalsyncobjectstohwqueue) - #define LX_DXSUBMITWAITFORSYNCOBJECTSTOHWQUEUE \ --- -Armbian - diff --git a/patch/kernel/archive/wsl2-arm64-6.1/1683-drivers-hv-dxgkrnl-Share-objects-with-the-host.patch b/patch/kernel/archive/wsl2-arm64-6.1/1683-drivers-hv-dxgkrnl-Share-objects-with-the-host.patch deleted file mode 100644 index 42736ff7800f..000000000000 --- a/patch/kernel/archive/wsl2-arm64-6.1/1683-drivers-hv-dxgkrnl-Share-objects-with-the-host.patch +++ /dev/null @@ -1,271 +0,0 @@ -From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001 -From: Iouri Tarassov -Date: Sat, 7 Aug 2021 18:11:34 -0700 -Subject: drivers: hv: dxgkrnl: Share objects with the host - -Implement the LX_DXSHAREOBJECTWITHHOST ioctl. -This ioctl is used to create a Windows NT handle on the host -for the given shared object (resource or sync object). The NT -handle is returned to the caller. The caller could share the NT -handle with a host application, which needs to access the object. -The host application can open the shared resource using the NT -handle. This way the guest and the host have access to the same -object. - -Fix incorrect handling of error results from copy_from_user(). - -Signed-off-by: Iouri Tarassov -[kms: Forward port to v6.1] -Signed-off-by: Kelsey Steele ---- - drivers/hv/dxgkrnl/dxgkrnl.h | 2 + - drivers/hv/dxgkrnl/dxgvmbus.c | 60 +++++++++- - drivers/hv/dxgkrnl/dxgvmbus.h | 18 +++ - drivers/hv/dxgkrnl/ioctl.c | 38 +++++- - include/uapi/misc/d3dkmthk.h | 9 ++ - 5 files changed, 120 insertions(+), 7 deletions(-) - -diff --git a/drivers/hv/dxgkrnl/dxgkrnl.h b/drivers/hv/dxgkrnl/dxgkrnl.h -index 111111111111..222222222222 100644 ---- a/drivers/hv/dxgkrnl/dxgkrnl.h -+++ b/drivers/hv/dxgkrnl/dxgkrnl.h -@@ -872,6 +872,8 @@ int dxgvmb_send_get_stdalloc_data(struct dxgdevice *device, - int dxgvmb_send_async_msg(struct dxgvmbuschannel *channel, - void *command, - u32 cmd_size); -+int dxgvmb_send_share_object_with_host(struct dxgprocess *process, -+ struct d3dkmt_shareobjectwithhost *args); - - void signal_host_cpu_event(struct dxghostevent *eventhdr); - int ntstatus2int(struct ntstatus status); -diff --git a/drivers/hv/dxgkrnl/dxgvmbus.c b/drivers/hv/dxgkrnl/dxgvmbus.c -index 111111111111..222222222222 100644 ---- a/drivers/hv/dxgkrnl/dxgvmbus.c -+++ b/drivers/hv/dxgkrnl/dxgvmbus.c -@@ -881,6 +881,50 @@ int dxgvmb_send_destroy_sync_object(struct dxgprocess *process, - return ret; - } - -+int dxgvmb_send_share_object_with_host(struct dxgprocess *process, -+ struct d3dkmt_shareobjectwithhost *args) -+{ -+ struct dxgkvmb_command_shareobjectwithhost *command; -+ struct dxgkvmb_command_shareobjectwithhost_return result = {}; -+ int ret; -+ struct dxgvmbusmsg msg = {.hdr = NULL}; -+ -+ ret = init_message(&msg, NULL, process, sizeof(*command)); -+ if (ret) -+ return ret; -+ command = (void *)msg.msg; -+ -+ ret = dxgglobal_acquire_channel_lock(); -+ if (ret < 0) -+ goto cleanup; -+ -+ command_vm_to_host_init2(&command->hdr, -+ DXGK_VMBCOMMAND_SHAREOBJECTWITHHOST, -+ process->host_handle); -+ command->device_handle = args->device_handle; -+ command->object_handle = args->object_handle; -+ -+ ret = dxgvmb_send_sync_msg(dxgglobal_get_dxgvmbuschannel(), -+ msg.hdr, msg.size, &result, sizeof(result)); -+ -+ dxgglobal_release_channel_lock(); -+ -+ if (ret || !NT_SUCCESS(result.status)) { -+ if (ret == 0) -+ ret = ntstatus2int(result.status); -+ DXG_ERR("Host failed to share object with host: %d %x", -+ ret, result.status.v); -+ goto cleanup; -+ } -+ args->object_vail_nt_handle = result.vail_nt_handle; -+ -+cleanup: -+ free_message(&msg, process); -+ if (ret) -+ DXG_ERR("err: %d", ret); -+ return ret; -+} -+ - /* - * Virtual GPU messages to the host - */ -@@ -2323,37 +2367,43 @@ int dxgvmb_send_create_hwqueue(struct dxgprocess *process, - - ret = copy_to_user(&inargs->queue, &command->hwqueue, - sizeof(struct d3dkmthandle)); -- if (ret < 0) { -+ if (ret) { - DXG_ERR("failed to copy hwqueue handle"); -+ ret = -EINVAL; - goto cleanup; - } - ret = copy_to_user(&inargs->queue_progress_fence, - &command->hwqueue_progress_fence, - sizeof(struct d3dkmthandle)); -- if (ret < 0) { -+ if (ret) { - DXG_ERR("failed to progress fence"); -+ ret = -EINVAL; - goto cleanup; - } - ret = copy_to_user(&inargs->queue_progress_fence_cpu_va, - &hwqueue->progress_fence_mapped_address, - sizeof(inargs->queue_progress_fence_cpu_va)); -- if (ret < 0) { -+ if (ret) { - DXG_ERR("failed to copy fence cpu va"); -+ ret = -EINVAL; - goto cleanup; - } - ret = copy_to_user(&inargs->queue_progress_fence_gpu_va, - &command->hwqueue_progress_fence_gpuva, - sizeof(u64)); -- if (ret < 0) { -+ if (ret) { - DXG_ERR("failed to copy fence gpu va"); -+ ret = -EINVAL; - goto cleanup; - } - if (args->priv_drv_data_size) { - ret = copy_to_user(args->priv_drv_data, - command->priv_drv_data, - args->priv_drv_data_size); -- if (ret < 0) -+ if (ret) { - DXG_ERR("failed to copy private data"); -+ ret = -EINVAL; -+ } - } - - cleanup: -diff --git a/drivers/hv/dxgkrnl/dxgvmbus.h b/drivers/hv/dxgkrnl/dxgvmbus.h -index 111111111111..222222222222 100644 ---- a/drivers/hv/dxgkrnl/dxgvmbus.h -+++ b/drivers/hv/dxgkrnl/dxgvmbus.h -@@ -574,4 +574,22 @@ struct dxgkvmb_command_destroyhwqueue { - struct d3dkmthandle hwqueue; - }; - -+struct dxgkvmb_command_shareobjectwithhost { -+ struct dxgkvmb_command_vm_to_host hdr; -+ struct d3dkmthandle device_handle; -+ struct d3dkmthandle object_handle; -+ u64 reserved; -+}; -+ -+struct dxgkvmb_command_shareobjectwithhost_return { -+ struct ntstatus status; -+ u32 alignment; -+ u64 vail_nt_handle; -+}; -+ -+int -+dxgvmb_send_sync_msg(struct dxgvmbuschannel *channel, -+ void *command, u32 command_size, void *result, -+ u32 result_size); -+ - #endif /* _DXGVMBUS_H */ -diff --git a/drivers/hv/dxgkrnl/ioctl.c b/drivers/hv/dxgkrnl/ioctl.c -index 111111111111..222222222222 100644 ---- a/drivers/hv/dxgkrnl/ioctl.c -+++ b/drivers/hv/dxgkrnl/ioctl.c -@@ -2460,6 +2460,7 @@ dxgkio_open_sync_object_nt(struct dxgprocess *process, void *__user inargs) - if (ret == 0) - goto success; - DXG_ERR("failed to copy output args"); -+ ret = -EINVAL; - - cleanup: - -@@ -3364,8 +3365,10 @@ dxgkio_share_objects(struct dxgprocess *process, void *__user inargs) - tmp = (u64) object_fd; - - ret = copy_to_user(args.shared_handle, &tmp, sizeof(u64)); -- if (ret < 0) -+ if (ret) { - DXG_ERR("failed to copy shared handle"); -+ ret = -EINVAL; -+ } - - cleanup: - if (ret < 0) { -@@ -3773,6 +3776,37 @@ dxgkio_open_resource_nt(struct dxgprocess *process, - return ret; - } - -+static int -+dxgkio_share_object_with_host(struct dxgprocess *process, void *__user inargs) -+{ -+ struct d3dkmt_shareobjectwithhost args; -+ int ret; -+ -+ ret = copy_from_user(&args, inargs, sizeof(args)); -+ if (ret) { -+ DXG_ERR("failed to copy input args"); -+ ret = -EINVAL; -+ goto cleanup; -+ } -+ -+ ret = dxgvmb_send_share_object_with_host(process, &args); -+ if (ret) { -+ DXG_ERR("dxgvmb_send_share_object_with_host dailed"); -+ goto cleanup; -+ } -+ -+ ret = copy_to_user(inargs, &args, sizeof(args)); -+ if (ret) { -+ DXG_ERR("failed to copy data to user"); -+ ret = -EINVAL; -+ } -+ -+cleanup: -+ -+ DXG_TRACE("ioctl:%s %d", errorstr(ret), ret); -+ return ret; -+} -+ - static struct ioctl_desc ioctls[] = { - /* 0x00 */ {}, - /* 0x01 */ {dxgkio_open_adapter_from_luid, LX_DXOPENADAPTERFROMLUID}, -@@ -3850,7 +3884,7 @@ static struct ioctl_desc ioctls[] = { - LX_DXQUERYRESOURCEINFOFROMNTHANDLE}, - /* 0x42 */ {dxgkio_open_resource_nt, LX_DXOPENRESOURCEFROMNTHANDLE}, - /* 0x43 */ {}, --/* 0x44 */ {}, -+/* 0x44 */ {dxgkio_share_object_with_host, LX_DXSHAREOBJECTWITHHOST}, - /* 0x45 */ {}, - }; - -diff --git a/include/uapi/misc/d3dkmthk.h b/include/uapi/misc/d3dkmthk.h -index 111111111111..222222222222 100644 ---- a/include/uapi/misc/d3dkmthk.h -+++ b/include/uapi/misc/d3dkmthk.h -@@ -952,6 +952,13 @@ struct d3dkmt_enumadapters3 { - #endif - }; - -+struct d3dkmt_shareobjectwithhost { -+ struct d3dkmthandle device_handle; -+ struct d3dkmthandle object_handle; -+ __u64 reserved; -+ __u64 object_vail_nt_handle; -+}; -+ - /* - * Dxgkrnl Graphics Port Driver ioctl definitions - * -@@ -1021,5 +1028,7 @@ struct d3dkmt_enumadapters3 { - _IOWR(0x47, 0x41, struct d3dkmt_queryresourceinfofromnthandle) - #define LX_DXOPENRESOURCEFROMNTHANDLE \ - _IOWR(0x47, 0x42, struct d3dkmt_openresourcefromnthandle) -+#define LX_DXSHAREOBJECTWITHHOST \ -+ _IOWR(0x47, 0x44, struct d3dkmt_shareobjectwithhost) - - #endif /* _D3DKMTHK_H */ --- -Armbian - diff --git a/patch/kernel/archive/wsl2-arm64-6.1/1684-drivers-hv-dxgkrnl-Query-the-dxgdevice-state.patch b/patch/kernel/archive/wsl2-arm64-6.1/1684-drivers-hv-dxgkrnl-Query-the-dxgdevice-state.patch deleted file mode 100644 index 1b63bcdf315b..000000000000 --- a/patch/kernel/archive/wsl2-arm64-6.1/1684-drivers-hv-dxgkrnl-Query-the-dxgdevice-state.patch +++ /dev/null @@ -1,454 +0,0 @@ -From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001 -From: Iouri Tarassov -Date: Wed, 19 Jan 2022 16:53:47 -0800 -Subject: drivers: hv: dxgkrnl: Query the dxgdevice state - -Implement the ioctl to query the dxgdevice state - LX_DXGETDEVICESTATE. -The IOCTL is used to query the state of the given dxgdevice object (active, -error, etc.). - -A call to the dxgdevice execution state could be high frequency. -The following method is used to avoid sending a synchronous VM -bus message to the host for every call: -- When a dxgdevice is created, a pointer to dxgglobal->device_state_counter - is sent to the host -- Every time the device state on the host is changed, the host will send - an asynchronous message to the guest (DXGK_VMBCOMMAND_SETGUESTDATA) and - the guest will increment the device_state_counter value. -- the dxgdevice object has execution_state_counter member, which is equal - to dxgglobal->device_state_counter value at the time when - LX_DXGETDEVICESTATE was last processed.. -- if execution_state_counter is different from device_state_counter, the - dxgk_vmbcommand_getdevicestate VM bus message is sent to the host. - Otherwise, the cached value is returned to the caller. - -Signed-off-by: Iouri Tarassov -[kms: Forward port to v6.1] -Signed-off-by: Kelsey Steele ---- - drivers/hv/dxgkrnl/dxgkrnl.h | 11 + - drivers/hv/dxgkrnl/dxgmodule.c | 1 - - drivers/hv/dxgkrnl/dxgvmbus.c | 68 +++++++ - drivers/hv/dxgkrnl/dxgvmbus.h | 26 +++ - drivers/hv/dxgkrnl/ioctl.c | 66 +++++- - include/uapi/misc/d3dkmthk.h | 101 +++++++++- - 6 files changed, 261 insertions(+), 12 deletions(-) - -diff --git a/drivers/hv/dxgkrnl/dxgkrnl.h b/drivers/hv/dxgkrnl/dxgkrnl.h -index 111111111111..222222222222 100644 ---- a/drivers/hv/dxgkrnl/dxgkrnl.h -+++ b/drivers/hv/dxgkrnl/dxgkrnl.h -@@ -268,12 +268,18 @@ void dxgsyncobject_destroy(struct dxgprocess *process, - void dxgsyncobject_stop(struct dxgsyncobject *syncobj); - void dxgsyncobject_release(struct kref *refcount); - -+/* -+ * device_state_counter - incremented every time the execition state of -+ * a DXGDEVICE is changed in the host. Used to optimize access to the -+ * device execution state. -+ */ - struct dxgglobal { - struct dxgdriver *drvdata; - struct dxgvmbuschannel channel; - struct hv_device *hdev; - u32 num_adapters; - u32 vmbus_ver; /* Interface version */ -+ atomic_t device_state_counter; - struct resource *mem; - u64 mmiospace_base; - u64 mmiospace_size; -@@ -512,6 +518,7 @@ struct dxgdevice { - struct list_head syncobj_list_head; - struct d3dkmthandle handle; - enum d3dkmt_deviceexecution_state execution_state; -+ int execution_state_counter; - u32 handle_valid; - }; - -@@ -849,6 +856,10 @@ int dxgvmb_send_open_sync_object_nt(struct dxgprocess *process, - struct d3dkmt_opensyncobjectfromnthandle2 - *args, - struct dxgsyncobject *syncobj); -+int dxgvmb_send_get_device_state(struct dxgprocess *process, -+ struct dxgadapter *adapter, -+ struct d3dkmt_getdevicestate *args, -+ struct d3dkmt_getdevicestate *__user inargs); - int dxgvmb_send_create_nt_shared_object(struct dxgprocess *process, - struct d3dkmthandle object, - struct d3dkmthandle *shared_handle); -diff --git a/drivers/hv/dxgkrnl/dxgmodule.c b/drivers/hv/dxgkrnl/dxgmodule.c -index 111111111111..222222222222 100644 ---- a/drivers/hv/dxgkrnl/dxgmodule.c -+++ b/drivers/hv/dxgkrnl/dxgmodule.c -@@ -827,7 +827,6 @@ static struct dxgglobal *dxgglobal_create(void) - #ifdef DEBUG - dxgk_validate_ioctls(); - #endif -- - return dxgglobal; - } - -diff --git a/drivers/hv/dxgkrnl/dxgvmbus.c b/drivers/hv/dxgkrnl/dxgvmbus.c -index 111111111111..222222222222 100644 ---- a/drivers/hv/dxgkrnl/dxgvmbus.c -+++ b/drivers/hv/dxgkrnl/dxgvmbus.c -@@ -281,6 +281,24 @@ static void command_vm_to_host_init1(struct dxgkvmb_command_vm_to_host *command, - command->channel_type = DXGKVMB_VM_TO_HOST; - } - -+static void set_guest_data(struct dxgkvmb_command_host_to_vm *packet, -+ u32 packet_length) -+{ -+ struct dxgkvmb_command_setguestdata *command = (void *)packet; -+ struct dxgglobal *dxgglobal = dxggbl(); -+ -+ DXG_TRACE("Setting guest data: %d %d %p %p", -+ command->data_type, -+ command->data32, -+ command->guest_pointer, -+ &dxgglobal->device_state_counter); -+ if (command->data_type == SETGUESTDATA_DATATYPE_DWORD && -+ command->guest_pointer == &dxgglobal->device_state_counter && -+ command->data32 != 0) { -+ atomic_inc(&dxgglobal->device_state_counter); -+ } -+} -+ - static void signal_guest_event(struct dxgkvmb_command_host_to_vm *packet, - u32 packet_length) - { -@@ -311,6 +329,9 @@ static void process_inband_packet(struct dxgvmbuschannel *channel, - DXG_TRACE("global packet %d", - packet->command_type); - switch (packet->command_type) { -+ case DXGK_VMBCOMMAND_SETGUESTDATA: -+ set_guest_data(packet, packet_length); -+ break; - case DXGK_VMBCOMMAND_SIGNALGUESTEVENT: - case DXGK_VMBCOMMAND_SIGNALGUESTEVENTPASSIVE: - signal_guest_event(packet, packet_length); -@@ -1028,6 +1049,7 @@ struct d3dkmthandle dxgvmb_send_create_device(struct dxgadapter *adapter, - struct dxgkvmb_command_createdevice *command; - struct dxgkvmb_command_createdevice_return result = { }; - struct dxgvmbusmsg msg; -+ struct dxgglobal *dxgglobal = dxggbl(); - - ret = init_message(&msg, adapter, process, sizeof(*command)); - if (ret) -@@ -1037,6 +1059,7 @@ struct d3dkmthandle dxgvmb_send_create_device(struct dxgadapter *adapter, - command_vgpu_to_host_init2(&command->hdr, DXGK_VMBCOMMAND_CREATEDEVICE, - process->host_handle); - command->flags = args->flags; -+ command->error_code = &dxgglobal->device_state_counter; - - ret = dxgvmb_send_sync_msg(msg.channel, msg.hdr, msg.size, - &result, sizeof(result)); -@@ -1806,6 +1829,51 @@ int dxgvmb_send_destroy_allocation(struct dxgprocess *process, - return ret; - } - -+int dxgvmb_send_get_device_state(struct dxgprocess *process, -+ struct dxgadapter *adapter, -+ struct d3dkmt_getdevicestate *args, -+ struct d3dkmt_getdevicestate *__user output) -+{ -+ int ret; -+ struct dxgkvmb_command_getdevicestate *command; -+ struct dxgkvmb_command_getdevicestate_return result = { }; -+ struct dxgvmbusmsg msg = {.hdr = NULL}; -+ -+ ret = init_message(&msg, adapter, process, sizeof(*command)); -+ if (ret) -+ goto cleanup; -+ command = (void *)msg.msg; -+ -+ command_vgpu_to_host_init2(&command->hdr, -+ DXGK_VMBCOMMAND_GETDEVICESTATE, -+ process->host_handle); -+ command->args = *args; -+ -+ ret = dxgvmb_send_sync_msg(msg.channel, msg.hdr, msg.size, -+ &result, sizeof(result)); -+ if (ret < 0) -+ goto cleanup; -+ -+ ret = ntstatus2int(result.status); -+ if (ret < 0) -+ goto cleanup; -+ -+ ret = copy_to_user(output, &result.args, sizeof(result.args)); -+ if (ret) { -+ DXG_ERR("failed to copy output args"); -+ ret = -EINVAL; -+ } -+ -+ if (args->state_type == _D3DKMT_DEVICESTATE_EXECUTION) -+ args->execution_state = result.args.execution_state; -+ -+cleanup: -+ free_message(&msg, process); -+ if (ret) -+ DXG_TRACE("err: %d", ret); -+ return ret; -+} -+ - int dxgvmb_send_open_resource(struct dxgprocess *process, - struct dxgadapter *adapter, - struct d3dkmthandle device, -diff --git a/drivers/hv/dxgkrnl/dxgvmbus.h b/drivers/hv/dxgkrnl/dxgvmbus.h -index 111111111111..222222222222 100644 ---- a/drivers/hv/dxgkrnl/dxgvmbus.h -+++ b/drivers/hv/dxgkrnl/dxgvmbus.h -@@ -172,6 +172,22 @@ struct dxgkvmb_command_signalguestevent { - bool dereference_event; - }; - -+enum set_guestdata_type { -+ SETGUESTDATA_DATATYPE_DWORD = 0, -+ SETGUESTDATA_DATATYPE_UINT64 = 1 -+}; -+ -+struct dxgkvmb_command_setguestdata { -+ struct dxgkvmb_command_host_to_vm hdr; -+ void *guest_pointer; -+ union { -+ u64 data64; -+ u32 data32; -+ }; -+ u32 dereference : 1; -+ u32 data_type : 4; -+}; -+ - struct dxgkvmb_command_opensyncobject { - struct dxgkvmb_command_vm_to_host hdr; - struct d3dkmthandle device; -@@ -574,6 +590,16 @@ struct dxgkvmb_command_destroyhwqueue { - struct d3dkmthandle hwqueue; - }; - -+struct dxgkvmb_command_getdevicestate { -+ struct dxgkvmb_command_vgpu_to_host hdr; -+ struct d3dkmt_getdevicestate args; -+}; -+ -+struct dxgkvmb_command_getdevicestate_return { -+ struct d3dkmt_getdevicestate args; -+ struct ntstatus status; -+}; -+ - struct dxgkvmb_command_shareobjectwithhost { - struct dxgkvmb_command_vm_to_host hdr; - struct d3dkmthandle device_handle; -diff --git a/drivers/hv/dxgkrnl/ioctl.c b/drivers/hv/dxgkrnl/ioctl.c -index 111111111111..222222222222 100644 ---- a/drivers/hv/dxgkrnl/ioctl.c -+++ b/drivers/hv/dxgkrnl/ioctl.c -@@ -3142,6 +3142,70 @@ dxgkio_wait_sync_object_gpu(struct dxgprocess *process, void *__user inargs) - return ret; - } - -+static int -+dxgkio_get_device_state(struct dxgprocess *process, void *__user inargs) -+{ -+ int ret; -+ struct d3dkmt_getdevicestate args; -+ struct dxgdevice *device = NULL; -+ struct dxgadapter *adapter = NULL; -+ int global_device_state_counter = 0; -+ struct dxgglobal *dxgglobal = dxggbl(); -+ -+ ret = copy_from_user(&args, inargs, sizeof(args)); -+ if (ret) { -+ DXG_ERR("failed to copy input args"); -+ ret = -EINVAL; -+ goto cleanup; -+ } -+ -+ device = dxgprocess_device_by_handle(process, args.device); -+ if (device == NULL) { -+ ret = -EINVAL; -+ goto cleanup; -+ } -+ -+ adapter = device->adapter; -+ ret = dxgadapter_acquire_lock_shared(adapter); -+ if (ret < 0) { -+ adapter = NULL; -+ goto cleanup; -+ } -+ -+ if (args.state_type == _D3DKMT_DEVICESTATE_EXECUTION) { -+ global_device_state_counter = -+ atomic_read(&dxgglobal->device_state_counter); -+ if (device->execution_state_counter == -+ global_device_state_counter) { -+ args.execution_state = device->execution_state; -+ ret = copy_to_user(inargs, &args, sizeof(args)); -+ if (ret) { -+ DXG_ERR("failed to copy args to user"); -+ ret = -EINVAL; -+ } -+ goto cleanup; -+ } -+ } -+ -+ ret = dxgvmb_send_get_device_state(process, adapter, &args, inargs); -+ -+ if (ret == 0 && args.state_type == _D3DKMT_DEVICESTATE_EXECUTION) { -+ device->execution_state = args.execution_state; -+ device->execution_state_counter = global_device_state_counter; -+ } -+ -+cleanup: -+ -+ if (adapter) -+ dxgadapter_release_lock_shared(adapter); -+ if (device) -+ kref_put(&device->device_kref, dxgdevice_release); -+ if (ret < 0) -+ DXG_ERR("Failed to get device state %x", ret); -+ -+ return ret; -+} -+ - static int - dxgsharedsyncobj_get_host_nt_handle(struct dxgsharedsyncobject *syncobj, - struct dxgprocess *process, -@@ -3822,7 +3886,7 @@ static struct ioctl_desc ioctls[] = { - /* 0x0b */ {}, - /* 0x0c */ {}, - /* 0x0d */ {}, --/* 0x0e */ {}, -+/* 0x0e */ {dxgkio_get_device_state, LX_DXGETDEVICESTATE}, - /* 0x0f */ {dxgkio_submit_command, LX_DXSUBMITCOMMAND}, - /* 0x10 */ {dxgkio_create_sync_object, LX_DXCREATESYNCHRONIZATIONOBJECT}, - /* 0x11 */ {dxgkio_signal_sync_object, LX_DXSIGNALSYNCHRONIZATIONOBJECT}, -diff --git a/include/uapi/misc/d3dkmthk.h b/include/uapi/misc/d3dkmthk.h -index 111111111111..222222222222 100644 ---- a/include/uapi/misc/d3dkmthk.h -+++ b/include/uapi/misc/d3dkmthk.h -@@ -236,6 +236,95 @@ struct d3dddi_destroypagingqueue { - struct d3dkmthandle paging_queue; - }; - -+enum dxgk_render_pipeline_stage { -+ _DXGK_RENDER_PIPELINE_STAGE_UNKNOWN = 0, -+ _DXGK_RENDER_PIPELINE_STAGE_INPUT_ASSEMBLER = 1, -+ _DXGK_RENDER_PIPELINE_STAGE_VERTEX_SHADER = 2, -+ _DXGK_RENDER_PIPELINE_STAGE_GEOMETRY_SHADER = 3, -+ _DXGK_RENDER_PIPELINE_STAGE_STREAM_OUTPUT = 4, -+ _DXGK_RENDER_PIPELINE_STAGE_RASTERIZER = 5, -+ _DXGK_RENDER_PIPELINE_STAGE_PIXEL_SHADER = 6, -+ _DXGK_RENDER_PIPELINE_STAGE_OUTPUT_MERGER = 7, -+}; -+ -+enum dxgk_page_fault_flags { -+ _DXGK_PAGE_FAULT_WRITE = 0x1, -+ _DXGK_PAGE_FAULT_FENCE_INVALID = 0x2, -+ _DXGK_PAGE_FAULT_ADAPTER_RESET_REQUIRED = 0x4, -+ _DXGK_PAGE_FAULT_ENGINE_RESET_REQUIRED = 0x8, -+ _DXGK_PAGE_FAULT_FATAL_HARDWARE_ERROR = 0x10, -+ _DXGK_PAGE_FAULT_IOMMU = 0x20, -+ _DXGK_PAGE_FAULT_HW_CONTEXT_VALID = 0x40, -+ _DXGK_PAGE_FAULT_PROCESS_HANDLE_VALID = 0x80, -+}; -+ -+enum dxgk_general_error_code { -+ _DXGK_GENERAL_ERROR_PAGE_FAULT = 0, -+ _DXGK_GENERAL_ERROR_INVALID_INSTRUCTION = 1, -+}; -+ -+struct dxgk_fault_error_code { -+ union { -+ struct { -+ __u32 is_device_specific_code:1; -+ enum dxgk_general_error_code general_error_code:31; -+ }; -+ struct { -+ __u32 is_device_specific_code_reserved_bit:1; -+ __u32 device_specific_code:31; -+ }; -+ }; -+}; -+ -+struct d3dkmt_devicereset_state { -+ union { -+ struct { -+ __u32 desktop_switched:1; -+ __u32 reserved:31; -+ }; -+ __u32 value; -+ }; -+}; -+ -+struct d3dkmt_devicepagefault_state { -+ __u64 faulted_primitive_api_sequence_number; -+ enum dxgk_render_pipeline_stage faulted_pipeline_stage; -+ __u32 faulted_bind_table_entry; -+ enum dxgk_page_fault_flags page_fault_flags; -+ struct dxgk_fault_error_code fault_error_code; -+ __u64 faulted_virtual_address; -+}; -+ -+enum d3dkmt_deviceexecution_state { -+ _D3DKMT_DEVICEEXECUTION_ACTIVE = 1, -+ _D3DKMT_DEVICEEXECUTION_RESET = 2, -+ _D3DKMT_DEVICEEXECUTION_HUNG = 3, -+ _D3DKMT_DEVICEEXECUTION_STOPPED = 4, -+ _D3DKMT_DEVICEEXECUTION_ERROR_OUTOFMEMORY = 5, -+ _D3DKMT_DEVICEEXECUTION_ERROR_DMAFAULT = 6, -+ _D3DKMT_DEVICEEXECUTION_ERROR_DMAPAGEFAULT = 7, -+}; -+ -+enum d3dkmt_devicestate_type { -+ _D3DKMT_DEVICESTATE_EXECUTION = 1, -+ _D3DKMT_DEVICESTATE_PRESENT = 2, -+ _D3DKMT_DEVICESTATE_RESET = 3, -+ _D3DKMT_DEVICESTATE_PRESENT_DWM = 4, -+ _D3DKMT_DEVICESTATE_PAGE_FAULT = 5, -+ _D3DKMT_DEVICESTATE_PRESENT_QUEUE = 6, -+}; -+ -+struct d3dkmt_getdevicestate { -+ struct d3dkmthandle device; -+ enum d3dkmt_devicestate_type state_type; -+ union { -+ enum d3dkmt_deviceexecution_state execution_state; -+ struct d3dkmt_devicereset_state reset_state; -+ struct d3dkmt_devicepagefault_state page_fault_state; -+ char alignment[48]; -+ }; -+}; -+ - enum d3dkmdt_gdisurfacetype { - _D3DKMDT_GDISURFACE_INVALID = 0, - _D3DKMDT_GDISURFACE_TEXTURE = 1, -@@ -759,16 +848,6 @@ struct d3dkmt_queryadapterinfo { - __u32 private_data_size; - }; - --enum d3dkmt_deviceexecution_state { -- _D3DKMT_DEVICEEXECUTION_ACTIVE = 1, -- _D3DKMT_DEVICEEXECUTION_RESET = 2, -- _D3DKMT_DEVICEEXECUTION_HUNG = 3, -- _D3DKMT_DEVICEEXECUTION_STOPPED = 4, -- _D3DKMT_DEVICEEXECUTION_ERROR_OUTOFMEMORY = 5, -- _D3DKMT_DEVICEEXECUTION_ERROR_DMAFAULT = 6, -- _D3DKMT_DEVICEEXECUTION_ERROR_DMAPAGEFAULT = 7, --}; -- - struct d3dddi_openallocationinfo2 { - struct d3dkmthandle allocation; - #ifdef __KERNEL__ -@@ -978,6 +1057,8 @@ struct d3dkmt_shareobjectwithhost { - _IOWR(0x47, 0x07, struct d3dkmt_createpagingqueue) - #define LX_DXQUERYADAPTERINFO \ - _IOWR(0x47, 0x09, struct d3dkmt_queryadapterinfo) -+#define LX_DXGETDEVICESTATE \ -+ _IOWR(0x47, 0x0e, struct d3dkmt_getdevicestate) - #define LX_DXSUBMITCOMMAND \ - _IOWR(0x47, 0x0f, struct d3dkmt_submitcommand) - #define LX_DXCREATESYNCHRONIZATIONOBJECT \ --- -Armbian - diff --git a/patch/kernel/archive/wsl2-arm64-6.1/1685-drivers-hv-dxgkrnl-Map-unmap-CPU-address-to-device-allocation.patch b/patch/kernel/archive/wsl2-arm64-6.1/1685-drivers-hv-dxgkrnl-Map-unmap-CPU-address-to-device-allocation.patch deleted file mode 100644 index 8b3e0ee42808..000000000000 --- a/patch/kernel/archive/wsl2-arm64-6.1/1685-drivers-hv-dxgkrnl-Map-unmap-CPU-address-to-device-allocation.patch +++ /dev/null @@ -1,498 +0,0 @@ -From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001 -From: Iouri Tarassov -Date: Wed, 19 Jan 2022 13:58:28 -0800 -Subject: drivers: hv: dxgkrnl: Map(unmap) CPU address to device allocation - -Implement ioctls to map/unmap CPU virtual addresses to compute device -allocations - LX_DXLOCK2 and LX_DXUNLOCK2. - -The LX_DXLOCK2 ioctl maps a CPU virtual address to a compute device -allocation. The allocation could be located in system memory or local -device memory on the host. When the device allocation is created -from the guest system memory (existing sysmem allocation), the -allocation CPU address is known and is returned to the caller. -For other CPU visible allocations the code flow is the following: -1. A VM bus message is sent to the host to map the allocation -2. The host allocates a portion of the guest IO space and maps it - to the allocation backing store. The IO space address of the - allocation is returned back to the guest. -3. The guest allocates a CPU virtual address and maps it to the IO - space (see the dxg_map_iospace function). -4. The CPU VA is returned back to the caller -cpu_address_mapped and cpu_address_refcount are used to track how -many times an allocation was mapped. - -The LX_DXUNLOCK2 ioctl unmaps a CPU virtual address from a compute -device allocation. - -Signed-off-by: Iouri Tarassov -[kms: Forward port to v6.1] -Signed-off-by: Kelsey Steele ---- - drivers/hv/dxgkrnl/dxgadapter.c | 11 + - drivers/hv/dxgkrnl/dxgkrnl.h | 14 + - drivers/hv/dxgkrnl/dxgvmbus.c | 107 +++++++ - drivers/hv/dxgkrnl/dxgvmbus.h | 19 ++ - drivers/hv/dxgkrnl/ioctl.c | 160 +++++++++- - include/uapi/misc/d3dkmthk.h | 30 ++ - 6 files changed, 339 insertions(+), 2 deletions(-) - -diff --git a/drivers/hv/dxgkrnl/dxgadapter.c b/drivers/hv/dxgkrnl/dxgadapter.c -index 111111111111..222222222222 100644 ---- a/drivers/hv/dxgkrnl/dxgadapter.c -+++ b/drivers/hv/dxgkrnl/dxgadapter.c -@@ -885,6 +885,15 @@ void dxgallocation_stop(struct dxgallocation *alloc) - vfree(alloc->pages); - alloc->pages = NULL; - } -+ dxgprocess_ht_lock_exclusive_down(alloc->process); -+ if (alloc->cpu_address_mapped) { -+ dxg_unmap_iospace(alloc->cpu_address, -+ alloc->num_pages << PAGE_SHIFT); -+ alloc->cpu_address_mapped = false; -+ alloc->cpu_address = NULL; -+ alloc->cpu_address_refcount = 0; -+ } -+ dxgprocess_ht_lock_exclusive_up(alloc->process); - } - - void dxgallocation_free_handle(struct dxgallocation *alloc) -@@ -932,6 +941,8 @@ else - #endif - if (alloc->priv_drv_data) - vfree(alloc->priv_drv_data); -+ if (alloc->cpu_address_mapped) -+ pr_err("Alloc IO space is mapped: %p", alloc); - kfree(alloc); - } - -diff --git a/drivers/hv/dxgkrnl/dxgkrnl.h b/drivers/hv/dxgkrnl/dxgkrnl.h -index 111111111111..222222222222 100644 ---- a/drivers/hv/dxgkrnl/dxgkrnl.h -+++ b/drivers/hv/dxgkrnl/dxgkrnl.h -@@ -708,6 +708,8 @@ struct dxgallocation { - struct d3dkmthandle alloc_handle; - /* Set to 1 when allocation belongs to resource. */ - u32 resource_owner:1; -+ /* Set to 1 when 'cpu_address' is mapped to the IO space. */ -+ u32 cpu_address_mapped:1; - /* Set to 1 when the allocatio is mapped as cached */ - u32 cached:1; - u32 handle_valid:1; -@@ -719,6 +721,11 @@ struct dxgallocation { - #endif - /* Number of pages in the 'pages' array */ - u32 num_pages; -+ /* -+ * How many times dxgk_lock2 is called to allocation, which is mapped -+ * to IO space. -+ */ -+ u32 cpu_address_refcount; - /* - * CPU address from the existing sysmem allocation, or - * mapped to the CPU visible backing store in the IO space -@@ -837,6 +844,13 @@ int dxgvmb_send_wait_sync_object_cpu(struct dxgprocess *process, - d3dkmt_waitforsynchronizationobjectfromcpu - *args, - u64 cpu_event); -+int dxgvmb_send_lock2(struct dxgprocess *process, -+ struct dxgadapter *adapter, -+ struct d3dkmt_lock2 *args, -+ struct d3dkmt_lock2 *__user outargs); -+int dxgvmb_send_unlock2(struct dxgprocess *process, -+ struct dxgadapter *adapter, -+ struct d3dkmt_unlock2 *args); - int dxgvmb_send_create_hwqueue(struct dxgprocess *process, - struct dxgadapter *adapter, - struct d3dkmt_createhwqueue *args, -diff --git a/drivers/hv/dxgkrnl/dxgvmbus.c b/drivers/hv/dxgkrnl/dxgvmbus.c -index 111111111111..222222222222 100644 ---- a/drivers/hv/dxgkrnl/dxgvmbus.c -+++ b/drivers/hv/dxgkrnl/dxgvmbus.c -@@ -2354,6 +2354,113 @@ int dxgvmb_send_wait_sync_object_gpu(struct dxgprocess *process, - return ret; - } - -+int dxgvmb_send_lock2(struct dxgprocess *process, -+ struct dxgadapter *adapter, -+ struct d3dkmt_lock2 *args, -+ struct d3dkmt_lock2 *__user outargs) -+{ -+ int ret; -+ struct dxgkvmb_command_lock2 *command; -+ struct dxgkvmb_command_lock2_return result = { }; -+ struct dxgallocation *alloc = NULL; -+ struct dxgvmbusmsg msg = {.hdr = NULL}; -+ -+ ret = init_message(&msg, adapter, process, sizeof(*command)); -+ if (ret) -+ goto cleanup; -+ command = (void *)msg.msg; -+ -+ command_vgpu_to_host_init2(&command->hdr, -+ DXGK_VMBCOMMAND_LOCK2, process->host_handle); -+ command->args = *args; -+ -+ ret = dxgvmb_send_sync_msg(msg.channel, msg.hdr, msg.size, -+ &result, sizeof(result)); -+ if (ret < 0) -+ goto cleanup; -+ -+ ret = ntstatus2int(result.status); -+ if (ret < 0) -+ goto cleanup; -+ -+ hmgrtable_lock(&process->handle_table, DXGLOCK_EXCL); -+ alloc = hmgrtable_get_object_by_type(&process->handle_table, -+ HMGRENTRY_TYPE_DXGALLOCATION, -+ args->allocation); -+ if (alloc == NULL) { -+ DXG_ERR("invalid alloc"); -+ ret = -EINVAL; -+ } else { -+ if (alloc->cpu_address) { -+ args->data = alloc->cpu_address; -+ if (alloc->cpu_address_mapped) -+ alloc->cpu_address_refcount++; -+ } else { -+ u64 offset = (u64)result.cpu_visible_buffer_offset; -+ -+ args->data = dxg_map_iospace(offset, -+ alloc->num_pages << PAGE_SHIFT, -+ PROT_READ | PROT_WRITE, alloc->cached); -+ if (args->data) { -+ alloc->cpu_address_refcount = 1; -+ alloc->cpu_address_mapped = true; -+ alloc->cpu_address = args->data; -+ } -+ } -+ if (args->data == NULL) { -+ ret = -ENOMEM; -+ } else { -+ ret = copy_to_user(&outargs->data, &args->data, -+ sizeof(args->data)); -+ if (ret) { -+ DXG_ERR("failed to copy data"); -+ ret = -EINVAL; -+ alloc->cpu_address_refcount--; -+ if (alloc->cpu_address_refcount == 0) { -+ dxg_unmap_iospace(alloc->cpu_address, -+ alloc->num_pages << PAGE_SHIFT); -+ alloc->cpu_address_mapped = false; -+ alloc->cpu_address = NULL; -+ } -+ } -+ } -+ } -+ hmgrtable_unlock(&process->handle_table, DXGLOCK_EXCL); -+ -+cleanup: -+ free_message(&msg, process); -+ if (ret) -+ DXG_TRACE("err: %d", ret); -+ return ret; -+} -+ -+int dxgvmb_send_unlock2(struct dxgprocess *process, -+ struct dxgadapter *adapter, -+ struct d3dkmt_unlock2 *args) -+{ -+ int ret; -+ struct dxgkvmb_command_unlock2 *command; -+ struct dxgvmbusmsg msg = {.hdr = NULL}; -+ -+ ret = init_message(&msg, adapter, process, sizeof(*command)); -+ if (ret) -+ goto cleanup; -+ command = (void *)msg.msg; -+ -+ command_vgpu_to_host_init2(&command->hdr, -+ DXGK_VMBCOMMAND_UNLOCK2, -+ process->host_handle); -+ command->args = *args; -+ -+ ret = dxgvmb_send_sync_msg_ntstatus(msg.channel, msg.hdr, msg.size); -+ -+cleanup: -+ free_message(&msg, process); -+ if (ret) -+ DXG_TRACE("err: %d", ret); -+ return ret; -+} -+ - int dxgvmb_send_create_hwqueue(struct dxgprocess *process, - struct dxgadapter *adapter, - struct d3dkmt_createhwqueue *args, -diff --git a/drivers/hv/dxgkrnl/dxgvmbus.h b/drivers/hv/dxgkrnl/dxgvmbus.h -index 111111111111..222222222222 100644 ---- a/drivers/hv/dxgkrnl/dxgvmbus.h -+++ b/drivers/hv/dxgkrnl/dxgvmbus.h -@@ -570,6 +570,25 @@ struct dxgkvmb_command_waitforsyncobjectfromgpu { - /* struct d3dkmthandle ObjectHandles[object_count] */ - }; - -+struct dxgkvmb_command_lock2 { -+ struct dxgkvmb_command_vgpu_to_host hdr; -+ struct d3dkmt_lock2 args; -+ bool use_legacy_lock; -+ u32 flags; -+ u32 priv_drv_data; -+}; -+ -+struct dxgkvmb_command_lock2_return { -+ struct ntstatus status; -+ void *cpu_visible_buffer_offset; -+}; -+ -+struct dxgkvmb_command_unlock2 { -+ struct dxgkvmb_command_vgpu_to_host hdr; -+ struct d3dkmt_unlock2 args; -+ bool use_legacy_unlock; -+}; -+ - /* Returns the same structure */ - struct dxgkvmb_command_createhwqueue { - struct dxgkvmb_command_vgpu_to_host hdr; -diff --git a/drivers/hv/dxgkrnl/ioctl.c b/drivers/hv/dxgkrnl/ioctl.c -index 111111111111..222222222222 100644 ---- a/drivers/hv/dxgkrnl/ioctl.c -+++ b/drivers/hv/dxgkrnl/ioctl.c -@@ -3142,6 +3142,162 @@ dxgkio_wait_sync_object_gpu(struct dxgprocess *process, void *__user inargs) - return ret; - } - -+static int -+dxgkio_lock2(struct dxgprocess *process, void *__user inargs) -+{ -+ struct d3dkmt_lock2 args; -+ struct d3dkmt_lock2 *__user result = inargs; -+ int ret; -+ struct dxgadapter *adapter = NULL; -+ struct dxgdevice *device = NULL; -+ struct dxgallocation *alloc = NULL; -+ -+ ret = copy_from_user(&args, inargs, sizeof(args)); -+ if (ret) { -+ DXG_ERR("failed to copy input args"); -+ ret = -EINVAL; -+ goto cleanup; -+ } -+ -+ args.data = NULL; -+ hmgrtable_lock(&process->handle_table, DXGLOCK_EXCL); -+ alloc = hmgrtable_get_object_by_type(&process->handle_table, -+ HMGRENTRY_TYPE_DXGALLOCATION, -+ args.allocation); -+ if (alloc == NULL) { -+ ret = -EINVAL; -+ } else { -+ if (alloc->cpu_address) { -+ ret = copy_to_user(&result->data, -+ &alloc->cpu_address, -+ sizeof(args.data)); -+ if (ret == 0) { -+ args.data = alloc->cpu_address; -+ if (alloc->cpu_address_mapped) -+ alloc->cpu_address_refcount++; -+ } else { -+ DXG_ERR("Failed to copy cpu address"); -+ ret = -EINVAL; -+ } -+ } -+ } -+ hmgrtable_unlock(&process->handle_table, DXGLOCK_EXCL); -+ if (ret < 0) -+ goto cleanup; -+ if (args.data) -+ goto success; -+ -+ /* -+ * The call acquires reference on the device. It is safe to access the -+ * adapter, because the device holds reference on it. -+ */ -+ device = dxgprocess_device_by_handle(process, args.device); -+ if (device == NULL) { -+ ret = -EINVAL; -+ goto cleanup; -+ } -+ adapter = device->adapter; -+ ret = dxgadapter_acquire_lock_shared(adapter); -+ if (ret < 0) { -+ adapter = NULL; -+ goto cleanup; -+ } -+ -+ ret = dxgvmb_send_lock2(process, adapter, &args, result); -+ -+cleanup: -+ -+ if (adapter) -+ dxgadapter_release_lock_shared(adapter); -+ -+ if (device) -+ kref_put(&device->device_kref, dxgdevice_release); -+ -+success: -+ DXG_TRACE("ioctl:%s %s %d", errorstr(ret), __func__, ret); -+ return ret; -+} -+ -+static int -+dxgkio_unlock2(struct dxgprocess *process, void *__user inargs) -+{ -+ struct d3dkmt_unlock2 args; -+ int ret; -+ struct dxgadapter *adapter = NULL; -+ struct dxgdevice *device = NULL; -+ struct dxgallocation *alloc = NULL; -+ bool done = false; -+ -+ ret = copy_from_user(&args, inargs, sizeof(args)); -+ if (ret) { -+ DXG_ERR("failed to copy input args"); -+ ret = -EINVAL; -+ goto cleanup; -+ } -+ -+ hmgrtable_lock(&process->handle_table, DXGLOCK_EXCL); -+ alloc = hmgrtable_get_object_by_type(&process->handle_table, -+ HMGRENTRY_TYPE_DXGALLOCATION, -+ args.allocation); -+ if (alloc == NULL) { -+ ret = -EINVAL; -+ } else { -+ if (alloc->cpu_address == NULL) { -+ DXG_ERR("Allocation is not locked: %p", alloc); -+ ret = -EINVAL; -+ } else if (alloc->cpu_address_mapped) { -+ if (alloc->cpu_address_refcount > 0) { -+ alloc->cpu_address_refcount--; -+ if (alloc->cpu_address_refcount != 0) { -+ done = true; -+ } else { -+ dxg_unmap_iospace(alloc->cpu_address, -+ alloc->num_pages << PAGE_SHIFT); -+ alloc->cpu_address_mapped = false; -+ alloc->cpu_address = NULL; -+ } -+ } else { -+ DXG_ERR("Invalid cpu access refcount"); -+ done = true; -+ } -+ } -+ } -+ hmgrtable_unlock(&process->handle_table, DXGLOCK_EXCL); -+ if (done) -+ goto success; -+ if (ret < 0) -+ goto cleanup; -+ -+ /* -+ * The call acquires reference on the device. It is safe to access the -+ * adapter, because the device holds reference on it. -+ */ -+ device = dxgprocess_device_by_handle(process, args.device); -+ if (device == NULL) { -+ ret = -EINVAL; -+ goto cleanup; -+ } -+ adapter = device->adapter; -+ ret = dxgadapter_acquire_lock_shared(adapter); -+ if (ret < 0) { -+ adapter = NULL; -+ goto cleanup; -+ } -+ -+ ret = dxgvmb_send_unlock2(process, adapter, &args); -+ -+cleanup: -+ if (adapter) -+ dxgadapter_release_lock_shared(adapter); -+ -+ if (device) -+ kref_put(&device->device_kref, dxgdevice_release); -+ -+success: -+ DXG_TRACE("ioctl:%s %s %d", errorstr(ret), __func__, ret); -+ return ret; -+} -+ - static int - dxgkio_get_device_state(struct dxgprocess *process, void *__user inargs) - { -@@ -3909,7 +4065,7 @@ static struct ioctl_desc ioctls[] = { - /* 0x22 */ {}, - /* 0x23 */ {}, - /* 0x24 */ {}, --/* 0x25 */ {}, -+/* 0x25 */ {dxgkio_lock2, LX_DXLOCK2}, - /* 0x26 */ {}, - /* 0x27 */ {}, - /* 0x28 */ {}, -@@ -3932,7 +4088,7 @@ static struct ioctl_desc ioctls[] = { - LX_DXSUBMITSIGNALSYNCOBJECTSTOHWQUEUE}, - /* 0x36 */ {dxgkio_submit_wait_to_hwqueue, - LX_DXSUBMITWAITFORSYNCOBJECTSTOHWQUEUE}, --/* 0x37 */ {}, -+/* 0x37 */ {dxgkio_unlock2, LX_DXUNLOCK2}, - /* 0x38 */ {}, - /* 0x39 */ {}, - /* 0x3a */ {dxgkio_wait_sync_object_cpu, -diff --git a/include/uapi/misc/d3dkmthk.h b/include/uapi/misc/d3dkmthk.h -index 111111111111..222222222222 100644 ---- a/include/uapi/misc/d3dkmthk.h -+++ b/include/uapi/misc/d3dkmthk.h -@@ -668,6 +668,32 @@ struct d3dkmt_submitcommandtohwqueue { - #endif - }; - -+struct d3dddicb_lock2flags { -+ union { -+ struct { -+ __u32 reserved:32; -+ }; -+ __u32 value; -+ }; -+}; -+ -+struct d3dkmt_lock2 { -+ struct d3dkmthandle device; -+ struct d3dkmthandle allocation; -+ struct d3dddicb_lock2flags flags; -+ __u32 reserved; -+#ifdef __KERNEL__ -+ void *data; -+#else -+ __u64 data; -+#endif -+}; -+ -+struct d3dkmt_unlock2 { -+ struct d3dkmthandle device; -+ struct d3dkmthandle allocation; -+}; -+ - enum d3dkmt_standardallocationtype { - _D3DKMT_STANDARDALLOCATIONTYPE_EXISTINGHEAP = 1, - _D3DKMT_STANDARDALLOCATIONTYPE_CROSSADAPTER = 2, -@@ -1083,6 +1109,8 @@ struct d3dkmt_shareobjectwithhost { - _IOWR(0x47, 0x19, struct d3dkmt_destroydevice) - #define LX_DXDESTROYSYNCHRONIZATIONOBJECT \ - _IOWR(0x47, 0x1d, struct d3dkmt_destroysynchronizationobject) -+#define LX_DXLOCK2 \ -+ _IOWR(0x47, 0x25, struct d3dkmt_lock2) - #define LX_DXSIGNALSYNCHRONIZATIONOBJECTFROMCPU \ - _IOWR(0x47, 0x31, struct d3dkmt_signalsynchronizationobjectfromcpu) - #define LX_DXSIGNALSYNCHRONIZATIONOBJECTFROMGPU \ -@@ -1095,6 +1123,8 @@ struct d3dkmt_shareobjectwithhost { - _IOWR(0x47, 0x35, struct d3dkmt_submitsignalsyncobjectstohwqueue) - #define LX_DXSUBMITWAITFORSYNCOBJECTSTOHWQUEUE \ - _IOWR(0x47, 0x36, struct d3dkmt_submitwaitforsyncobjectstohwqueue) -+#define LX_DXUNLOCK2 \ -+ _IOWR(0x47, 0x37, struct d3dkmt_unlock2) - #define LX_DXWAITFORSYNCHRONIZATIONOBJECTFROMCPU \ - _IOWR(0x47, 0x3a, struct d3dkmt_waitforsynchronizationobjectfromcpu) - #define LX_DXWAITFORSYNCHRONIZATIONOBJECTFROMGPU \ --- -Armbian - diff --git a/patch/kernel/archive/wsl2-arm64-6.1/1686-drivers-hv-dxgkrnl-Manage-device-allocation-properties.patch b/patch/kernel/archive/wsl2-arm64-6.1/1686-drivers-hv-dxgkrnl-Manage-device-allocation-properties.patch deleted file mode 100644 index bd024641118d..000000000000 --- a/patch/kernel/archive/wsl2-arm64-6.1/1686-drivers-hv-dxgkrnl-Manage-device-allocation-properties.patch +++ /dev/null @@ -1,912 +0,0 @@ -From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001 -From: Iouri Tarassov -Date: Wed, 19 Jan 2022 11:14:22 -0800 -Subject: drivers: hv: dxgkrnl: Manage device allocation properties - -Implement ioctls to manage properties of a compute device allocation: - - LX_DXUPDATEALLOCPROPERTY, - - LX_DXSETALLOCATIONPRIORITY, - - LX_DXGETALLOCATIONPRIORITY, - - LX_DXQUERYALLOCATIONRESIDENCY. - - LX_DXCHANGEVIDEOMEMORYRESERVATION, - -The LX_DXUPDATEALLOCPROPERTY ioctl requests the host to update -various properties of a compute devoce allocation. - -The LX_DXSETALLOCATIONPRIORITY and LX_DXGETALLOCATIONPRIORITY ioctls -are used to set/get allocation priority, which defines the -importance of the allocation to be in the local device memory. - -The LX_DXQUERYALLOCATIONRESIDENCY ioctl queries if the allocation -is located in the compute device accessible memory. - -The LX_DXCHANGEVIDEOMEMORYRESERVATION ioctl changes compute device -memory reservation of an allocation. - -Signed-off-by: Iouri Tarassov -[kms: Forward port to v6.1] -Signed-off-by: Kelsey Steele ---- - drivers/hv/dxgkrnl/dxgkrnl.h | 21 + - drivers/hv/dxgkrnl/dxgvmbus.c | 300 ++++++++++ - drivers/hv/dxgkrnl/dxgvmbus.h | 50 ++ - drivers/hv/dxgkrnl/ioctl.c | 217 ++++++- - include/uapi/misc/d3dkmthk.h | 127 ++++ - 5 files changed, 708 insertions(+), 7 deletions(-) - -diff --git a/drivers/hv/dxgkrnl/dxgkrnl.h b/drivers/hv/dxgkrnl/dxgkrnl.h -index 111111111111..222222222222 100644 ---- a/drivers/hv/dxgkrnl/dxgkrnl.h -+++ b/drivers/hv/dxgkrnl/dxgkrnl.h -@@ -851,6 +851,23 @@ int dxgvmb_send_lock2(struct dxgprocess *process, - int dxgvmb_send_unlock2(struct dxgprocess *process, - struct dxgadapter *adapter, - struct d3dkmt_unlock2 *args); -+int dxgvmb_send_update_alloc_property(struct dxgprocess *process, -+ struct dxgadapter *adapter, -+ struct d3dddi_updateallocproperty *args, -+ struct d3dddi_updateallocproperty *__user -+ inargs); -+int dxgvmb_send_set_allocation_priority(struct dxgprocess *process, -+ struct dxgadapter *adapter, -+ struct d3dkmt_setallocationpriority *a); -+int dxgvmb_send_get_allocation_priority(struct dxgprocess *process, -+ struct dxgadapter *adapter, -+ struct d3dkmt_getallocationpriority *a); -+int dxgvmb_send_change_vidmem_reservation(struct dxgprocess *process, -+ struct dxgadapter *adapter, -+ struct d3dkmthandle other_process, -+ struct -+ d3dkmt_changevideomemoryreservation -+ *args); - int dxgvmb_send_create_hwqueue(struct dxgprocess *process, - struct dxgadapter *adapter, - struct d3dkmt_createhwqueue *args, -@@ -870,6 +887,10 @@ int dxgvmb_send_open_sync_object_nt(struct dxgprocess *process, - struct d3dkmt_opensyncobjectfromnthandle2 - *args, - struct dxgsyncobject *syncobj); -+int dxgvmb_send_query_alloc_residency(struct dxgprocess *process, -+ struct dxgadapter *adapter, -+ struct d3dkmt_queryallocationresidency -+ *args); - int dxgvmb_send_get_device_state(struct dxgprocess *process, - struct dxgadapter *adapter, - struct d3dkmt_getdevicestate *args, -diff --git a/drivers/hv/dxgkrnl/dxgvmbus.c b/drivers/hv/dxgkrnl/dxgvmbus.c -index 111111111111..222222222222 100644 ---- a/drivers/hv/dxgkrnl/dxgvmbus.c -+++ b/drivers/hv/dxgkrnl/dxgvmbus.c -@@ -1829,6 +1829,79 @@ int dxgvmb_send_destroy_allocation(struct dxgprocess *process, - return ret; - } - -+int dxgvmb_send_query_alloc_residency(struct dxgprocess *process, -+ struct dxgadapter *adapter, -+ struct d3dkmt_queryallocationresidency -+ *args) -+{ -+ int ret = -EINVAL; -+ struct dxgkvmb_command_queryallocationresidency *command = NULL; -+ u32 cmd_size = sizeof(*command); -+ u32 alloc_size = 0; -+ u32 result_allocation_size = 0; -+ struct dxgkvmb_command_queryallocationresidency_return *result = NULL; -+ u32 result_size = sizeof(*result); -+ struct dxgvmbusmsgres msg = {.hdr = NULL}; -+ -+ if (args->allocation_count > DXG_MAX_VM_BUS_PACKET_SIZE) { -+ ret = -EINVAL; -+ goto cleanup; -+ } -+ -+ if (args->allocation_count) { -+ alloc_size = args->allocation_count * -+ sizeof(struct d3dkmthandle); -+ cmd_size += alloc_size; -+ result_allocation_size = args->allocation_count * -+ sizeof(args->residency_status[0]); -+ } else { -+ result_allocation_size = sizeof(args->residency_status[0]); -+ } -+ result_size += result_allocation_size; -+ -+ ret = init_message_res(&msg, adapter, process, cmd_size, result_size); -+ if (ret) -+ goto cleanup; -+ command = (void *)msg.msg; -+ result = msg.res; -+ -+ command_vgpu_to_host_init2(&command->hdr, -+ DXGK_VMBCOMMAND_QUERYALLOCATIONRESIDENCY, -+ process->host_handle); -+ command->args = *args; -+ if (alloc_size) { -+ ret = copy_from_user(&command[1], args->allocations, -+ alloc_size); -+ if (ret) { -+ DXG_ERR("failed to copy alloc handles"); -+ ret = -EINVAL; -+ goto cleanup; -+ } -+ } -+ -+ ret = dxgvmb_send_sync_msg(msg.channel, msg.hdr, msg.size, -+ result, msg.res_size); -+ if (ret < 0) -+ goto cleanup; -+ -+ ret = ntstatus2int(result->status); -+ if (ret < 0) -+ goto cleanup; -+ -+ ret = copy_to_user(args->residency_status, &result[1], -+ result_allocation_size); -+ if (ret) { -+ DXG_ERR("failed to copy residency status"); -+ ret = -EINVAL; -+ } -+ -+cleanup: -+ free_message((struct dxgvmbusmsg *)&msg, process); -+ if (ret) -+ DXG_TRACE("err: %d", ret); -+ return ret; -+} -+ - int dxgvmb_send_get_device_state(struct dxgprocess *process, - struct dxgadapter *adapter, - struct d3dkmt_getdevicestate *args, -@@ -2461,6 +2534,233 @@ int dxgvmb_send_unlock2(struct dxgprocess *process, - return ret; - } - -+int dxgvmb_send_update_alloc_property(struct dxgprocess *process, -+ struct dxgadapter *adapter, -+ struct d3dddi_updateallocproperty *args, -+ struct d3dddi_updateallocproperty *__user -+ inargs) -+{ -+ int ret; -+ int ret1; -+ struct dxgkvmb_command_updateallocationproperty *command; -+ struct dxgkvmb_command_updateallocationproperty_return result = { }; -+ struct dxgvmbusmsg msg = {.hdr = NULL}; -+ -+ ret = init_message(&msg, adapter, process, sizeof(*command)); -+ if (ret) -+ goto cleanup; -+ command = (void *)msg.msg; -+ -+ command_vgpu_to_host_init2(&command->hdr, -+ DXGK_VMBCOMMAND_UPDATEALLOCATIONPROPERTY, -+ process->host_handle); -+ command->args = *args; -+ -+ ret = dxgvmb_send_sync_msg(msg.channel, msg.hdr, msg.size, -+ &result, sizeof(result)); -+ -+ if (ret < 0) -+ goto cleanup; -+ ret = ntstatus2int(result.status); -+ /* STATUS_PENING is a success code > 0 */ -+ if (ret == STATUS_PENDING) { -+ ret1 = copy_to_user(&inargs->paging_fence_value, -+ &result.paging_fence_value, -+ sizeof(u64)); -+ if (ret1) { -+ DXG_ERR("failed to copy paging fence"); -+ ret = -EINVAL; -+ } -+ } -+cleanup: -+ free_message(&msg, process); -+ if (ret) -+ DXG_TRACE("err: %d", ret); -+ return ret; -+} -+ -+int dxgvmb_send_set_allocation_priority(struct dxgprocess *process, -+ struct dxgadapter *adapter, -+ struct d3dkmt_setallocationpriority *args) -+{ -+ u32 cmd_size = sizeof(struct dxgkvmb_command_setallocationpriority); -+ u32 alloc_size = 0; -+ u32 priority_size = 0; -+ struct dxgkvmb_command_setallocationpriority *command; -+ int ret; -+ struct d3dkmthandle *allocations; -+ struct dxgvmbusmsg msg = {.hdr = NULL}; -+ -+ if (args->allocation_count > DXG_MAX_VM_BUS_PACKET_SIZE) { -+ ret = -EINVAL; -+ goto cleanup; -+ } -+ if (args->resource.v) { -+ priority_size = sizeof(u32); -+ if (args->allocation_count != 0) { -+ ret = -EINVAL; -+ goto cleanup; -+ } -+ } else { -+ if (args->allocation_count == 0) { -+ ret = -EINVAL; -+ goto cleanup; -+ } -+ alloc_size = args->allocation_count * -+ sizeof(struct d3dkmthandle); -+ cmd_size += alloc_size; -+ priority_size = sizeof(u32) * args->allocation_count; -+ } -+ cmd_size += priority_size; -+ -+ ret = init_message(&msg, adapter, process, cmd_size); -+ if (ret) -+ goto cleanup; -+ command = (void *)msg.msg; -+ -+ command_vgpu_to_host_init2(&command->hdr, -+ DXGK_VMBCOMMAND_SETALLOCATIONPRIORITY, -+ process->host_handle); -+ command->device = args->device; -+ command->allocation_count = args->allocation_count; -+ command->resource = args->resource; -+ allocations = (struct d3dkmthandle *) &command[1]; -+ ret = copy_from_user(allocations, args->allocation_list, -+ alloc_size); -+ if (ret) { -+ DXG_ERR("failed to copy alloc handle"); -+ ret = -EINVAL; -+ goto cleanup; -+ } -+ ret = copy_from_user((u8 *) allocations + alloc_size, -+ args->priorities, priority_size); -+ if (ret) { -+ DXG_ERR("failed to copy alloc priority"); -+ ret = -EINVAL; -+ goto cleanup; -+ } -+ -+ ret = dxgvmb_send_sync_msg_ntstatus(msg.channel, msg.hdr, msg.size); -+ -+cleanup: -+ free_message(&msg, process); -+ if (ret) -+ DXG_TRACE("err: %d", ret); -+ return ret; -+} -+ -+int dxgvmb_send_get_allocation_priority(struct dxgprocess *process, -+ struct dxgadapter *adapter, -+ struct d3dkmt_getallocationpriority *args) -+{ -+ u32 cmd_size = sizeof(struct dxgkvmb_command_getallocationpriority); -+ u32 result_size; -+ u32 alloc_size = 0; -+ u32 priority_size = 0; -+ struct dxgkvmb_command_getallocationpriority *command; -+ struct dxgkvmb_command_getallocationpriority_return *result; -+ int ret; -+ struct d3dkmthandle *allocations; -+ struct dxgvmbusmsgres msg = {.hdr = NULL}; -+ -+ if (args->allocation_count > DXG_MAX_VM_BUS_PACKET_SIZE) { -+ ret = -EINVAL; -+ goto cleanup; -+ } -+ if (args->resource.v) { -+ priority_size = sizeof(u32); -+ if (args->allocation_count != 0) { -+ ret = -EINVAL; -+ goto cleanup; -+ } -+ } else { -+ if (args->allocation_count == 0) { -+ ret = -EINVAL; -+ goto cleanup; -+ } -+ alloc_size = args->allocation_count * -+ sizeof(struct d3dkmthandle); -+ cmd_size += alloc_size; -+ priority_size = sizeof(u32) * args->allocation_count; -+ } -+ result_size = sizeof(*result) + priority_size; -+ -+ ret = init_message_res(&msg, adapter, process, cmd_size, result_size); -+ if (ret) -+ goto cleanup; -+ command = (void *)msg.msg; -+ result = msg.res; -+ -+ command_vgpu_to_host_init2(&command->hdr, -+ DXGK_VMBCOMMAND_GETALLOCATIONPRIORITY, -+ process->host_handle); -+ command->device = args->device; -+ command->allocation_count = args->allocation_count; -+ command->resource = args->resource; -+ allocations = (struct d3dkmthandle *) &command[1]; -+ ret = copy_from_user(allocations, args->allocation_list, -+ alloc_size); -+ if (ret) { -+ DXG_ERR("failed to copy alloc handles"); -+ ret = -EINVAL; -+ goto cleanup; -+ } -+ -+ ret = dxgvmb_send_sync_msg(msg.channel, msg.hdr, -+ msg.size + msg.res_size, -+ result, msg.res_size); -+ if (ret < 0) -+ goto cleanup; -+ -+ ret = ntstatus2int(result->status); -+ if (ret < 0) -+ goto cleanup; -+ -+ ret = copy_to_user(args->priorities, -+ (u8 *) result + sizeof(*result), -+ priority_size); -+ if (ret) { -+ DXG_ERR("failed to copy priorities"); -+ ret = -EINVAL; -+ } -+ -+cleanup: -+ free_message((struct dxgvmbusmsg *)&msg, process); -+ if (ret) -+ DXG_TRACE("err: %d", ret); -+ return ret; -+} -+ -+int dxgvmb_send_change_vidmem_reservation(struct dxgprocess *process, -+ struct dxgadapter *adapter, -+ struct d3dkmthandle other_process, -+ struct -+ d3dkmt_changevideomemoryreservation -+ *args) -+{ -+ struct dxgkvmb_command_changevideomemoryreservation *command; -+ int ret; -+ struct dxgvmbusmsg msg = {.hdr = NULL}; -+ -+ ret = init_message(&msg, adapter, process, sizeof(*command)); -+ if (ret) -+ goto cleanup; -+ command = (void *)msg.msg; -+ -+ command_vgpu_to_host_init2(&command->hdr, -+ DXGK_VMBCOMMAND_CHANGEVIDEOMEMORYRESERVATION, -+ process->host_handle); -+ command->args = *args; -+ command->args.process = other_process.v; -+ -+ ret = dxgvmb_send_sync_msg_ntstatus(msg.channel, msg.hdr, msg.size); -+cleanup: -+ free_message(&msg, process); -+ if (ret) -+ DXG_TRACE("err: %d", ret); -+ return ret; -+} -+ - int dxgvmb_send_create_hwqueue(struct dxgprocess *process, - struct dxgadapter *adapter, - struct d3dkmt_createhwqueue *args, -diff --git a/drivers/hv/dxgkrnl/dxgvmbus.h b/drivers/hv/dxgkrnl/dxgvmbus.h -index 111111111111..222222222222 100644 ---- a/drivers/hv/dxgkrnl/dxgvmbus.h -+++ b/drivers/hv/dxgkrnl/dxgvmbus.h -@@ -308,6 +308,29 @@ struct dxgkvmb_command_queryadapterinfo_return { - u8 private_data[1]; - }; - -+/* Returns ntstatus */ -+struct dxgkvmb_command_setallocationpriority { -+ struct dxgkvmb_command_vgpu_to_host hdr; -+ struct d3dkmthandle device; -+ struct d3dkmthandle resource; -+ u32 allocation_count; -+ /* struct d3dkmthandle allocations[allocation_count or 0]; */ -+ /* u32 priorities[allocation_count or 1]; */ -+}; -+ -+struct dxgkvmb_command_getallocationpriority { -+ struct dxgkvmb_command_vgpu_to_host hdr; -+ struct d3dkmthandle device; -+ struct d3dkmthandle resource; -+ u32 allocation_count; -+ /* struct d3dkmthandle allocations[allocation_count or 0]; */ -+}; -+ -+struct dxgkvmb_command_getallocationpriority_return { -+ struct ntstatus status; -+ /* u32 priorities[allocation_count or 1]; */ -+}; -+ - struct dxgkvmb_command_createdevice { - struct dxgkvmb_command_vgpu_to_host hdr; - struct d3dkmt_createdeviceflags flags; -@@ -589,6 +612,22 @@ struct dxgkvmb_command_unlock2 { - bool use_legacy_unlock; - }; - -+struct dxgkvmb_command_updateallocationproperty { -+ struct dxgkvmb_command_vgpu_to_host hdr; -+ struct d3dddi_updateallocproperty args; -+}; -+ -+struct dxgkvmb_command_updateallocationproperty_return { -+ u64 paging_fence_value; -+ struct ntstatus status; -+}; -+ -+/* Returns ntstatus */ -+struct dxgkvmb_command_changevideomemoryreservation { -+ struct dxgkvmb_command_vgpu_to_host hdr; -+ struct d3dkmt_changevideomemoryreservation args; -+}; -+ - /* Returns the same structure */ - struct dxgkvmb_command_createhwqueue { - struct dxgkvmb_command_vgpu_to_host hdr; -@@ -609,6 +648,17 @@ struct dxgkvmb_command_destroyhwqueue { - struct d3dkmthandle hwqueue; - }; - -+struct dxgkvmb_command_queryallocationresidency { -+ struct dxgkvmb_command_vgpu_to_host hdr; -+ struct d3dkmt_queryallocationresidency args; -+ /* struct d3dkmthandle allocations[0 or number of allocations] */ -+}; -+ -+struct dxgkvmb_command_queryallocationresidency_return { -+ struct ntstatus status; -+ /* d3dkmt_allocationresidencystatus[NumAllocations] */ -+}; -+ - struct dxgkvmb_command_getdevicestate { - struct dxgkvmb_command_vgpu_to_host hdr; - struct d3dkmt_getdevicestate args; -diff --git a/drivers/hv/dxgkrnl/ioctl.c b/drivers/hv/dxgkrnl/ioctl.c -index 111111111111..222222222222 100644 ---- a/drivers/hv/dxgkrnl/ioctl.c -+++ b/drivers/hv/dxgkrnl/ioctl.c -@@ -3214,7 +3214,7 @@ dxgkio_lock2(struct dxgprocess *process, void *__user inargs) - kref_put(&device->device_kref, dxgdevice_release); - - success: -- DXG_TRACE("ioctl:%s %s %d", errorstr(ret), __func__, ret); -+ DXG_TRACE("ioctl:%s %d", errorstr(ret), ret); - return ret; - } - -@@ -3294,7 +3294,209 @@ dxgkio_unlock2(struct dxgprocess *process, void *__user inargs) - kref_put(&device->device_kref, dxgdevice_release); - - success: -- DXG_TRACE("ioctl:%s %s %d", errorstr(ret), __func__, ret); -+ DXG_TRACE("ioctl:%s %d", errorstr(ret), ret); -+ return ret; -+} -+ -+static int -+dxgkio_update_alloc_property(struct dxgprocess *process, void *__user inargs) -+{ -+ struct d3dddi_updateallocproperty args; -+ int ret; -+ struct dxgadapter *adapter = NULL; -+ struct dxgdevice *device = NULL; -+ -+ ret = copy_from_user(&args, inargs, sizeof(args)); -+ if (ret) { -+ DXG_ERR("failed to copy input args"); -+ ret = -EINVAL; -+ goto cleanup; -+ } -+ -+ device = dxgprocess_device_by_object_handle(process, -+ HMGRENTRY_TYPE_DXGPAGINGQUEUE, -+ args.paging_queue); -+ if (device == NULL) { -+ ret = -EINVAL; -+ goto cleanup; -+ } -+ adapter = device->adapter; -+ ret = dxgadapter_acquire_lock_shared(adapter); -+ if (ret < 0) { -+ adapter = NULL; -+ goto cleanup; -+ } -+ -+ ret = dxgvmb_send_update_alloc_property(process, adapter, -+ &args, inargs); -+ -+cleanup: -+ if (adapter) -+ dxgadapter_release_lock_shared(adapter); -+ -+ if (device) -+ kref_put(&device->device_kref, dxgdevice_release); -+ -+ DXG_TRACE("ioctl:%s %d", errorstr(ret), ret); -+ return ret; -+} -+ -+static int -+dxgkio_query_alloc_residency(struct dxgprocess *process, void *__user inargs) -+{ -+ struct d3dkmt_queryallocationresidency args; -+ struct dxgadapter *adapter = NULL; -+ struct dxgdevice *device = NULL; -+ int ret; -+ -+ ret = copy_from_user(&args, inargs, sizeof(args)); -+ if (ret) { -+ DXG_ERR("failed to copy input args"); -+ ret = -EINVAL; -+ goto cleanup; -+ } -+ -+ if ((args.allocation_count == 0) == (args.resource.v == 0)) { -+ ret = -EINVAL; -+ goto cleanup; -+ } -+ -+ device = dxgprocess_device_by_handle(process, args.device); -+ if (device == NULL) { -+ ret = -EINVAL; -+ goto cleanup; -+ } -+ adapter = device->adapter; -+ ret = dxgadapter_acquire_lock_shared(adapter); -+ if (ret < 0) { -+ adapter = NULL; -+ goto cleanup; -+ } -+ ret = dxgvmb_send_query_alloc_residency(process, adapter, &args); -+cleanup: -+ if (adapter) -+ dxgadapter_release_lock_shared(adapter); -+ if (device) -+ kref_put(&device->device_kref, dxgdevice_release); -+ DXG_TRACE("ioctl:%s %d", errorstr(ret), ret); -+ return ret; -+} -+ -+static int -+dxgkio_set_allocation_priority(struct dxgprocess *process, void *__user inargs) -+{ -+ struct d3dkmt_setallocationpriority args; -+ struct dxgadapter *adapter = NULL; -+ struct dxgdevice *device = NULL; -+ int ret; -+ -+ ret = copy_from_user(&args, inargs, sizeof(args)); -+ if (ret) { -+ DXG_ERR("failed to copy input args"); -+ ret = -EINVAL; -+ goto cleanup; -+ } -+ device = dxgprocess_device_by_handle(process, args.device); -+ if (device == NULL) { -+ ret = -EINVAL; -+ goto cleanup; -+ } -+ adapter = device->adapter; -+ ret = dxgadapter_acquire_lock_shared(adapter); -+ if (ret < 0) { -+ adapter = NULL; -+ goto cleanup; -+ } -+ ret = dxgvmb_send_set_allocation_priority(process, adapter, &args); -+cleanup: -+ if (adapter) -+ dxgadapter_release_lock_shared(adapter); -+ if (device) -+ kref_put(&device->device_kref, dxgdevice_release); -+ DXG_TRACE("ioctl:%s %d", errorstr(ret), ret); -+ return ret; -+} -+ -+static int -+dxgkio_get_allocation_priority(struct dxgprocess *process, void *__user inargs) -+{ -+ struct d3dkmt_getallocationpriority args; -+ struct dxgadapter *adapter = NULL; -+ struct dxgdevice *device = NULL; -+ int ret; -+ -+ ret = copy_from_user(&args, inargs, sizeof(args)); -+ if (ret) { -+ DXG_ERR("failed to copy input args"); -+ ret = -EINVAL; -+ goto cleanup; -+ } -+ device = dxgprocess_device_by_handle(process, args.device); -+ if (device == NULL) { -+ ret = -EINVAL; -+ goto cleanup; -+ } -+ adapter = device->adapter; -+ ret = dxgadapter_acquire_lock_shared(adapter); -+ if (ret < 0) { -+ adapter = NULL; -+ goto cleanup; -+ } -+ ret = dxgvmb_send_get_allocation_priority(process, adapter, &args); -+cleanup: -+ if (adapter) -+ dxgadapter_release_lock_shared(adapter); -+ if (device) -+ kref_put(&device->device_kref, dxgdevice_release); -+ DXG_TRACE("ioctl:%s %d", errorstr(ret), ret); -+ return ret; -+} -+ -+static int -+dxgkio_change_vidmem_reservation(struct dxgprocess *process, void *__user inargs) -+{ -+ struct d3dkmt_changevideomemoryreservation args; -+ int ret; -+ struct dxgadapter *adapter = NULL; -+ bool adapter_locked = false; -+ -+ ret = copy_from_user(&args, inargs, sizeof(args)); -+ if (ret) { -+ DXG_ERR("failed to copy input args"); -+ ret = -EINVAL; -+ goto cleanup; -+ } -+ -+ if (args.process != 0) { -+ DXG_ERR("setting memory reservation for other process"); -+ ret = -EINVAL; -+ goto cleanup; -+ } -+ -+ adapter = dxgprocess_adapter_by_handle(process, args.adapter); -+ if (adapter == NULL) { -+ ret = -EINVAL; -+ goto cleanup; -+ } -+ -+ ret = dxgadapter_acquire_lock_shared(adapter); -+ if (ret < 0) { -+ adapter = NULL; -+ goto cleanup; -+ } -+ adapter_locked = true; -+ args.adapter.v = 0; -+ ret = dxgvmb_send_change_vidmem_reservation(process, adapter, -+ zerohandle, &args); -+ -+cleanup: -+ -+ if (adapter_locked) -+ dxgadapter_release_lock_shared(adapter); -+ if (adapter) -+ kref_put(&adapter->adapter_kref, dxgadapter_release); -+ -+ DXG_TRACE("ioctl:%s %d", errorstr(ret), ret); - return ret; - } - -@@ -4050,7 +4252,8 @@ static struct ioctl_desc ioctls[] = { - /* 0x13 */ {dxgkio_destroy_allocation, LX_DXDESTROYALLOCATION2}, - /* 0x14 */ {dxgkio_enum_adapters, LX_DXENUMADAPTERS2}, - /* 0x15 */ {dxgkio_close_adapter, LX_DXCLOSEADAPTER}, --/* 0x16 */ {}, -+/* 0x16 */ {dxgkio_change_vidmem_reservation, -+ LX_DXCHANGEVIDEOMEMORYRESERVATION}, - /* 0x17 */ {}, - /* 0x18 */ {dxgkio_create_hwqueue, LX_DXCREATEHWQUEUE}, - /* 0x19 */ {dxgkio_destroy_device, LX_DXDESTROYDEVICE}, -@@ -4070,11 +4273,11 @@ static struct ioctl_desc ioctls[] = { - /* 0x27 */ {}, - /* 0x28 */ {}, - /* 0x29 */ {}, --/* 0x2a */ {}, -+/* 0x2a */ {dxgkio_query_alloc_residency, LX_DXQUERYALLOCATIONRESIDENCY}, - /* 0x2b */ {}, - /* 0x2c */ {}, - /* 0x2d */ {}, --/* 0x2e */ {}, -+/* 0x2e */ {dxgkio_set_allocation_priority, LX_DXSETALLOCATIONPRIORITY}, - /* 0x2f */ {}, - /* 0x30 */ {}, - /* 0x31 */ {dxgkio_signal_sync_object_cpu, -@@ -4089,13 +4292,13 @@ static struct ioctl_desc ioctls[] = { - /* 0x36 */ {dxgkio_submit_wait_to_hwqueue, - LX_DXSUBMITWAITFORSYNCOBJECTSTOHWQUEUE}, - /* 0x37 */ {dxgkio_unlock2, LX_DXUNLOCK2}, --/* 0x38 */ {}, -+/* 0x38 */ {dxgkio_update_alloc_property, LX_DXUPDATEALLOCPROPERTY}, - /* 0x39 */ {}, - /* 0x3a */ {dxgkio_wait_sync_object_cpu, - LX_DXWAITFORSYNCHRONIZATIONOBJECTFROMCPU}, - /* 0x3b */ {dxgkio_wait_sync_object_gpu, - LX_DXWAITFORSYNCHRONIZATIONOBJECTFROMGPU}, --/* 0x3c */ {}, -+/* 0x3c */ {dxgkio_get_allocation_priority, LX_DXGETALLOCATIONPRIORITY}, - /* 0x3d */ {}, - /* 0x3e */ {dxgkio_enum_adapters3, LX_DXENUMADAPTERS3}, - /* 0x3f */ {dxgkio_share_objects, LX_DXSHAREOBJECTS}, -diff --git a/include/uapi/misc/d3dkmthk.h b/include/uapi/misc/d3dkmthk.h -index 111111111111..222222222222 100644 ---- a/include/uapi/misc/d3dkmthk.h -+++ b/include/uapi/misc/d3dkmthk.h -@@ -668,6 +668,63 @@ struct d3dkmt_submitcommandtohwqueue { - #endif - }; - -+struct d3dkmt_setallocationpriority { -+ struct d3dkmthandle device; -+ struct d3dkmthandle resource; -+#ifdef __KERNEL__ -+ const struct d3dkmthandle *allocation_list; -+#else -+ __u64 allocation_list; -+#endif -+ __u32 allocation_count; -+ __u32 reserved; -+#ifdef __KERNEL__ -+ const __u32 *priorities; -+#else -+ __u64 priorities; -+#endif -+}; -+ -+struct d3dkmt_getallocationpriority { -+ struct d3dkmthandle device; -+ struct d3dkmthandle resource; -+#ifdef __KERNEL__ -+ const struct d3dkmthandle *allocation_list; -+#else -+ __u64 allocation_list; -+#endif -+ __u32 allocation_count; -+ __u32 reserved; -+#ifdef __KERNEL__ -+ __u32 *priorities; -+#else -+ __u64 priorities; -+#endif -+}; -+ -+enum d3dkmt_allocationresidencystatus { -+ _D3DKMT_ALLOCATIONRESIDENCYSTATUS_RESIDENTINGPUMEMORY = 1, -+ _D3DKMT_ALLOCATIONRESIDENCYSTATUS_RESIDENTINSHAREDMEMORY = 2, -+ _D3DKMT_ALLOCATIONRESIDENCYSTATUS_NOTRESIDENT = 3, -+}; -+ -+struct d3dkmt_queryallocationresidency { -+ struct d3dkmthandle device; -+ struct d3dkmthandle resource; -+#ifdef __KERNEL__ -+ struct d3dkmthandle *allocations; -+#else -+ __u64 allocations; -+#endif -+ __u32 allocation_count; -+ __u32 reserved; -+#ifdef __KERNEL__ -+ enum d3dkmt_allocationresidencystatus *residency_status; -+#else -+ __u64 residency_status; -+#endif -+}; -+ - struct d3dddicb_lock2flags { - union { - struct { -@@ -835,6 +892,11 @@ struct d3dkmt_destroyallocation2 { - struct d3dddicb_destroyallocation2flags flags; - }; - -+enum d3dkmt_memory_segment_group { -+ _D3DKMT_MEMORY_SEGMENT_GROUP_LOCAL = 0, -+ _D3DKMT_MEMORY_SEGMENT_GROUP_NON_LOCAL = 1 -+}; -+ - struct d3dkmt_adaptertype { - union { - struct { -@@ -886,6 +948,61 @@ struct d3dddi_openallocationinfo2 { - __u64 reserved[6]; - }; - -+struct d3dddi_updateallocproperty_flags { -+ union { -+ struct { -+ __u32 accessed_physically:1; -+ __u32 reserved:31; -+ }; -+ __u32 value; -+ }; -+}; -+ -+struct d3dddi_segmentpreference { -+ union { -+ struct { -+ __u32 segment_id0:5; -+ __u32 direction0:1; -+ __u32 segment_id1:5; -+ __u32 direction1:1; -+ __u32 segment_id2:5; -+ __u32 direction2:1; -+ __u32 segment_id3:5; -+ __u32 direction3:1; -+ __u32 segment_id4:5; -+ __u32 direction4:1; -+ __u32 reserved:2; -+ }; -+ __u32 value; -+ }; -+}; -+ -+struct d3dddi_updateallocproperty { -+ struct d3dkmthandle paging_queue; -+ struct d3dkmthandle allocation; -+ __u32 supported_segment_set; -+ struct d3dddi_segmentpreference preferred_segment; -+ struct d3dddi_updateallocproperty_flags flags; -+ __u64 paging_fence_value; -+ union { -+ struct { -+ __u32 set_accessed_physically:1; -+ __u32 set_supported_segmentSet:1; -+ __u32 set_preferred_segment:1; -+ __u32 reserved:29; -+ }; -+ __u32 property_mask_value; -+ }; -+}; -+ -+struct d3dkmt_changevideomemoryreservation { -+ __u64 process; -+ struct d3dkmthandle adapter; -+ enum d3dkmt_memory_segment_group memory_segment_group; -+ __u64 reservation; -+ __u32 physical_adapter_index; -+}; -+ - struct d3dkmt_createhwqueue { - struct d3dkmthandle context; - struct d3dddi_createhwqueueflags flags; -@@ -1099,6 +1216,8 @@ struct d3dkmt_shareobjectwithhost { - _IOWR(0x47, 0x14, struct d3dkmt_enumadapters2) - #define LX_DXCLOSEADAPTER \ - _IOWR(0x47, 0x15, struct d3dkmt_closeadapter) -+#define LX_DXCHANGEVIDEOMEMORYRESERVATION \ -+ _IOWR(0x47, 0x16, struct d3dkmt_changevideomemoryreservation) - #define LX_DXCREATEHWQUEUE \ - _IOWR(0x47, 0x18, struct d3dkmt_createhwqueue) - #define LX_DXDESTROYHWQUEUE \ -@@ -1111,6 +1230,10 @@ struct d3dkmt_shareobjectwithhost { - _IOWR(0x47, 0x1d, struct d3dkmt_destroysynchronizationobject) - #define LX_DXLOCK2 \ - _IOWR(0x47, 0x25, struct d3dkmt_lock2) -+#define LX_DXQUERYALLOCATIONRESIDENCY \ -+ _IOWR(0x47, 0x2a, struct d3dkmt_queryallocationresidency) -+#define LX_DXSETALLOCATIONPRIORITY \ -+ _IOWR(0x47, 0x2e, struct d3dkmt_setallocationpriority) - #define LX_DXSIGNALSYNCHRONIZATIONOBJECTFROMCPU \ - _IOWR(0x47, 0x31, struct d3dkmt_signalsynchronizationobjectfromcpu) - #define LX_DXSIGNALSYNCHRONIZATIONOBJECTFROMGPU \ -@@ -1125,10 +1248,14 @@ struct d3dkmt_shareobjectwithhost { - _IOWR(0x47, 0x36, struct d3dkmt_submitwaitforsyncobjectstohwqueue) - #define LX_DXUNLOCK2 \ - _IOWR(0x47, 0x37, struct d3dkmt_unlock2) -+#define LX_DXUPDATEALLOCPROPERTY \ -+ _IOWR(0x47, 0x38, struct d3dddi_updateallocproperty) - #define LX_DXWAITFORSYNCHRONIZATIONOBJECTFROMCPU \ - _IOWR(0x47, 0x3a, struct d3dkmt_waitforsynchronizationobjectfromcpu) - #define LX_DXWAITFORSYNCHRONIZATIONOBJECTFROMGPU \ - _IOWR(0x47, 0x3b, struct d3dkmt_waitforsynchronizationobjectfromgpu) -+#define LX_DXGETALLOCATIONPRIORITY \ -+ _IOWR(0x47, 0x3c, struct d3dkmt_getallocationpriority) - #define LX_DXENUMADAPTERS3 \ - _IOWR(0x47, 0x3e, struct d3dkmt_enumadapters3) - #define LX_DXSHAREOBJECTS \ --- -Armbian - diff --git a/patch/kernel/archive/wsl2-arm64-6.1/1687-drivers-hv-dxgkrnl-Flush-heap-transitions.patch b/patch/kernel/archive/wsl2-arm64-6.1/1687-drivers-hv-dxgkrnl-Flush-heap-transitions.patch deleted file mode 100644 index 1dbb3e8773f3..000000000000 --- a/patch/kernel/archive/wsl2-arm64-6.1/1687-drivers-hv-dxgkrnl-Flush-heap-transitions.patch +++ /dev/null @@ -1,194 +0,0 @@ -From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001 -From: Iouri Tarassov -Date: Tue, 18 Jan 2022 17:25:37 -0800 -Subject: drivers: hv: dxgkrnl: Flush heap transitions - -Implement the ioctl to flush heap transitions -(LX_DXFLUSHHEAPTRANSITIONS). - -The ioctl is used to ensure that the video memory manager on the host -flushes all internal operations. - -Signed-off-by: Iouri Tarassov -[kms: Forward port to v6.1] -Signed-off-by: Kelsey Steele ---- - drivers/hv/dxgkrnl/dxgadapter.c | 2 +- - drivers/hv/dxgkrnl/dxgkrnl.h | 3 + - drivers/hv/dxgkrnl/dxgvmbus.c | 23 +++++ - drivers/hv/dxgkrnl/dxgvmbus.h | 5 + - drivers/hv/dxgkrnl/ioctl.c | 49 +++++++++- - include/uapi/misc/d3dkmthk.h | 6 ++ - 6 files changed, 86 insertions(+), 2 deletions(-) - -diff --git a/drivers/hv/dxgkrnl/dxgadapter.c b/drivers/hv/dxgkrnl/dxgadapter.c -index 111111111111..222222222222 100644 ---- a/drivers/hv/dxgkrnl/dxgadapter.c -+++ b/drivers/hv/dxgkrnl/dxgadapter.c -@@ -942,7 +942,7 @@ else - if (alloc->priv_drv_data) - vfree(alloc->priv_drv_data); - if (alloc->cpu_address_mapped) -- pr_err("Alloc IO space is mapped: %p", alloc); -+ DXG_ERR("Alloc IO space is mapped: %p", alloc); - kfree(alloc); - } - -diff --git a/drivers/hv/dxgkrnl/dxgkrnl.h b/drivers/hv/dxgkrnl/dxgkrnl.h -index 111111111111..222222222222 100644 ---- a/drivers/hv/dxgkrnl/dxgkrnl.h -+++ b/drivers/hv/dxgkrnl/dxgkrnl.h -@@ -882,6 +882,9 @@ int dxgvmb_send_query_adapter_info(struct dxgprocess *process, - int dxgvmb_send_submit_command_hwqueue(struct dxgprocess *process, - struct dxgadapter *adapter, - struct d3dkmt_submitcommandtohwqueue *a); -+int dxgvmb_send_flush_heap_transitions(struct dxgprocess *process, -+ struct dxgadapter *adapter, -+ struct d3dkmt_flushheaptransitions *arg); - int dxgvmb_send_open_sync_object_nt(struct dxgprocess *process, - struct dxgvmbuschannel *channel, - struct d3dkmt_opensyncobjectfromnthandle2 -diff --git a/drivers/hv/dxgkrnl/dxgvmbus.c b/drivers/hv/dxgkrnl/dxgvmbus.c -index 111111111111..222222222222 100644 ---- a/drivers/hv/dxgkrnl/dxgvmbus.c -+++ b/drivers/hv/dxgkrnl/dxgvmbus.c -@@ -1829,6 +1829,29 @@ int dxgvmb_send_destroy_allocation(struct dxgprocess *process, - return ret; - } - -+int dxgvmb_send_flush_heap_transitions(struct dxgprocess *process, -+ struct dxgadapter *adapter, -+ struct d3dkmt_flushheaptransitions *args) -+{ -+ struct dxgkvmb_command_flushheaptransitions *command; -+ int ret; -+ struct dxgvmbusmsg msg = {.hdr = NULL}; -+ -+ ret = init_message(&msg, adapter, process, sizeof(*command)); -+ if (ret) -+ goto cleanup; -+ command = (void *)msg.msg; -+ command_vgpu_to_host_init2(&command->hdr, -+ DXGK_VMBCOMMAND_FLUSHHEAPTRANSITIONS, -+ process->host_handle); -+ ret = dxgvmb_send_sync_msg_ntstatus(msg.channel, msg.hdr, msg.size); -+cleanup: -+ free_message(&msg, process); -+ if (ret) -+ DXG_TRACE("err: %d", ret); -+ return ret; -+} -+ - int dxgvmb_send_query_alloc_residency(struct dxgprocess *process, - struct dxgadapter *adapter, - struct d3dkmt_queryallocationresidency -diff --git a/drivers/hv/dxgkrnl/dxgvmbus.h b/drivers/hv/dxgkrnl/dxgvmbus.h -index 111111111111..222222222222 100644 ---- a/drivers/hv/dxgkrnl/dxgvmbus.h -+++ b/drivers/hv/dxgkrnl/dxgvmbus.h -@@ -367,6 +367,11 @@ struct dxgkvmb_command_submitcommandtohwqueue { - /* PrivateDriverData */ - }; - -+/* Returns ntstatus */ -+struct dxgkvmb_command_flushheaptransitions { -+ struct dxgkvmb_command_vgpu_to_host hdr; -+}; -+ - struct dxgkvmb_command_createallocation_allocinfo { - u32 flags; - u32 priv_drv_data_size; -diff --git a/drivers/hv/dxgkrnl/ioctl.c b/drivers/hv/dxgkrnl/ioctl.c -index 111111111111..222222222222 100644 ---- a/drivers/hv/dxgkrnl/ioctl.c -+++ b/drivers/hv/dxgkrnl/ioctl.c -@@ -3500,6 +3500,53 @@ dxgkio_change_vidmem_reservation(struct dxgprocess *process, void *__user inargs - return ret; - } - -+static int -+dxgkio_flush_heap_transitions(struct dxgprocess *process, void *__user inargs) -+{ -+ struct d3dkmt_flushheaptransitions args; -+ int ret; -+ struct dxgadapter *adapter = NULL; -+ bool adapter_locked = false; -+ -+ ret = copy_from_user(&args, inargs, sizeof(args)); -+ if (ret) { -+ DXG_ERR("failed to copy input args"); -+ ret = -EINVAL; -+ goto cleanup; -+ } -+ -+ adapter = dxgprocess_adapter_by_handle(process, args.adapter); -+ if (adapter == NULL) { -+ ret = -EINVAL; -+ goto cleanup; -+ } -+ -+ ret = dxgadapter_acquire_lock_shared(adapter); -+ if (ret < 0) { -+ adapter = NULL; -+ goto cleanup; -+ } -+ adapter_locked = true; -+ -+ args.adapter = adapter->host_handle; -+ ret = dxgvmb_send_flush_heap_transitions(process, adapter, &args); -+ if (ret < 0) -+ goto cleanup; -+ ret = copy_to_user(inargs, &args, sizeof(args)); -+ if (ret) { -+ DXG_ERR("failed to copy output args"); -+ ret = -EINVAL; -+ } -+ -+cleanup: -+ -+ if (adapter_locked) -+ dxgadapter_release_lock_shared(adapter); -+ if (adapter) -+ kref_put(&adapter->adapter_kref, dxgadapter_release); -+ return ret; -+} -+ - static int - dxgkio_get_device_state(struct dxgprocess *process, void *__user inargs) - { -@@ -4262,7 +4309,7 @@ static struct ioctl_desc ioctls[] = { - /* 0x1c */ {dxgkio_destroy_paging_queue, LX_DXDESTROYPAGINGQUEUE}, - /* 0x1d */ {dxgkio_destroy_sync_object, LX_DXDESTROYSYNCHRONIZATIONOBJECT}, - /* 0x1e */ {}, --/* 0x1f */ {}, -+/* 0x1f */ {dxgkio_flush_heap_transitions, LX_DXFLUSHHEAPTRANSITIONS}, - /* 0x20 */ {}, - /* 0x21 */ {}, - /* 0x22 */ {}, -diff --git a/include/uapi/misc/d3dkmthk.h b/include/uapi/misc/d3dkmthk.h -index 111111111111..222222222222 100644 ---- a/include/uapi/misc/d3dkmthk.h -+++ b/include/uapi/misc/d3dkmthk.h -@@ -936,6 +936,10 @@ struct d3dkmt_queryadapterinfo { - __u32 private_data_size; - }; - -+struct d3dkmt_flushheaptransitions { -+ struct d3dkmthandle adapter; -+}; -+ - struct d3dddi_openallocationinfo2 { - struct d3dkmthandle allocation; - #ifdef __KERNEL__ -@@ -1228,6 +1232,8 @@ struct d3dkmt_shareobjectwithhost { - _IOWR(0x47, 0x19, struct d3dkmt_destroydevice) - #define LX_DXDESTROYSYNCHRONIZATIONOBJECT \ - _IOWR(0x47, 0x1d, struct d3dkmt_destroysynchronizationobject) -+#define LX_DXFLUSHHEAPTRANSITIONS \ -+ _IOWR(0x47, 0x1f, struct d3dkmt_flushheaptransitions) - #define LX_DXLOCK2 \ - _IOWR(0x47, 0x25, struct d3dkmt_lock2) - #define LX_DXQUERYALLOCATIONRESIDENCY \ --- -Armbian - diff --git a/patch/kernel/archive/wsl2-arm64-6.1/1688-drivers-hv-dxgkrnl-Query-video-memory-information.patch b/patch/kernel/archive/wsl2-arm64-6.1/1688-drivers-hv-dxgkrnl-Query-video-memory-information.patch deleted file mode 100644 index beff76cde6d8..000000000000 --- a/patch/kernel/archive/wsl2-arm64-6.1/1688-drivers-hv-dxgkrnl-Query-video-memory-information.patch +++ /dev/null @@ -1,237 +0,0 @@ -From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001 -From: Iouri Tarassov -Date: Tue, 8 Feb 2022 18:34:07 -0800 -Subject: drivers: hv: dxgkrnl: Query video memory information - -Implement the ioctl to query video memory information from the host -(LX_DXQUERYVIDEOMEMORYINFO). - -Signed-off-by: Iouri Tarassov -[kms: Forward port to v6.1] -Signed-off-by: Kelsey Steele ---- - drivers/hv/dxgkrnl/dxgkrnl.h | 5 + - drivers/hv/dxgkrnl/dxgvmbus.c | 64 ++++++++++ - drivers/hv/dxgkrnl/dxgvmbus.h | 14 ++ - drivers/hv/dxgkrnl/ioctl.c | 50 +++++++- - include/uapi/misc/d3dkmthk.h | 13 ++ - 5 files changed, 145 insertions(+), 1 deletion(-) - -diff --git a/drivers/hv/dxgkrnl/dxgkrnl.h b/drivers/hv/dxgkrnl/dxgkrnl.h -index 111111111111..222222222222 100644 ---- a/drivers/hv/dxgkrnl/dxgkrnl.h -+++ b/drivers/hv/dxgkrnl/dxgkrnl.h -@@ -894,6 +894,11 @@ int dxgvmb_send_query_alloc_residency(struct dxgprocess *process, - struct dxgadapter *adapter, - struct d3dkmt_queryallocationresidency - *args); -+int dxgvmb_send_query_vidmem_info(struct dxgprocess *process, -+ struct dxgadapter *adapter, -+ struct d3dkmt_queryvideomemoryinfo *args, -+ struct d3dkmt_queryvideomemoryinfo -+ *__user iargs); - int dxgvmb_send_get_device_state(struct dxgprocess *process, - struct dxgadapter *adapter, - struct d3dkmt_getdevicestate *args, -diff --git a/drivers/hv/dxgkrnl/dxgvmbus.c b/drivers/hv/dxgkrnl/dxgvmbus.c -index 111111111111..222222222222 100644 ---- a/drivers/hv/dxgkrnl/dxgvmbus.c -+++ b/drivers/hv/dxgkrnl/dxgvmbus.c -@@ -1925,6 +1925,70 @@ int dxgvmb_send_query_alloc_residency(struct dxgprocess *process, - return ret; - } - -+int dxgvmb_send_query_vidmem_info(struct dxgprocess *process, -+ struct dxgadapter *adapter, -+ struct d3dkmt_queryvideomemoryinfo *args, -+ struct d3dkmt_queryvideomemoryinfo *__user -+ output) -+{ -+ int ret; -+ struct dxgkvmb_command_queryvideomemoryinfo *command; -+ struct dxgkvmb_command_queryvideomemoryinfo_return result = { }; -+ struct dxgvmbusmsg msg = {.hdr = NULL}; -+ -+ ret = init_message(&msg, adapter, process, sizeof(*command)); -+ if (ret) -+ goto cleanup; -+ command = (void *)msg.msg; -+ command_vgpu_to_host_init2(&command->hdr, -+ dxgk_vmbcommand_queryvideomemoryinfo, -+ process->host_handle); -+ command->adapter = args->adapter; -+ command->memory_segment_group = args->memory_segment_group; -+ command->physical_adapter_index = args->physical_adapter_index; -+ -+ ret = dxgvmb_send_sync_msg(msg.channel, msg.hdr, msg.size, -+ &result, sizeof(result)); -+ if (ret < 0) -+ goto cleanup; -+ -+ ret = copy_to_user(&output->budget, &result.budget, -+ sizeof(output->budget)); -+ if (ret) { -+ pr_err("%s failed to copy budget", __func__); -+ ret = -EINVAL; -+ goto cleanup; -+ } -+ ret = copy_to_user(&output->current_usage, &result.current_usage, -+ sizeof(output->current_usage)); -+ if (ret) { -+ pr_err("%s failed to copy current usage", __func__); -+ ret = -EINVAL; -+ goto cleanup; -+ } -+ ret = copy_to_user(&output->current_reservation, -+ &result.current_reservation, -+ sizeof(output->current_reservation)); -+ if (ret) { -+ pr_err("%s failed to copy reservation", __func__); -+ ret = -EINVAL; -+ goto cleanup; -+ } -+ ret = copy_to_user(&output->available_for_reservation, -+ &result.available_for_reservation, -+ sizeof(output->available_for_reservation)); -+ if (ret) { -+ pr_err("%s failed to copy avail reservation", __func__); -+ ret = -EINVAL; -+ } -+ -+cleanup: -+ free_message(&msg, process); -+ if (ret) -+ dev_dbg(DXGDEV, "err: %d", ret); -+ return ret; -+} -+ - int dxgvmb_send_get_device_state(struct dxgprocess *process, - struct dxgadapter *adapter, - struct d3dkmt_getdevicestate *args, -diff --git a/drivers/hv/dxgkrnl/dxgvmbus.h b/drivers/hv/dxgkrnl/dxgvmbus.h -index 111111111111..222222222222 100644 ---- a/drivers/hv/dxgkrnl/dxgvmbus.h -+++ b/drivers/hv/dxgkrnl/dxgvmbus.h -@@ -664,6 +664,20 @@ struct dxgkvmb_command_queryallocationresidency_return { - /* d3dkmt_allocationresidencystatus[NumAllocations] */ - }; - -+struct dxgkvmb_command_queryvideomemoryinfo { -+ struct dxgkvmb_command_vgpu_to_host hdr; -+ struct d3dkmthandle adapter; -+ enum d3dkmt_memory_segment_group memory_segment_group; -+ u32 physical_adapter_index; -+}; -+ -+struct dxgkvmb_command_queryvideomemoryinfo_return { -+ u64 budget; -+ u64 current_usage; -+ u64 current_reservation; -+ u64 available_for_reservation; -+}; -+ - struct dxgkvmb_command_getdevicestate { - struct dxgkvmb_command_vgpu_to_host hdr; - struct d3dkmt_getdevicestate args; -diff --git a/drivers/hv/dxgkrnl/ioctl.c b/drivers/hv/dxgkrnl/ioctl.c -index 111111111111..222222222222 100644 ---- a/drivers/hv/dxgkrnl/ioctl.c -+++ b/drivers/hv/dxgkrnl/ioctl.c -@@ -3547,6 +3547,54 @@ dxgkio_flush_heap_transitions(struct dxgprocess *process, void *__user inargs) - return ret; - } - -+static int -+dxgkio_query_vidmem_info(struct dxgprocess *process, void *__user inargs) -+{ -+ struct d3dkmt_queryvideomemoryinfo args; -+ int ret; -+ struct dxgadapter *adapter = NULL; -+ bool adapter_locked = false; -+ -+ ret = copy_from_user(&args, inargs, sizeof(args)); -+ if (ret) { -+ DXG_ERR("failed to copy input args"); -+ ret = -EINVAL; -+ goto cleanup; -+ } -+ -+ if (args.process != 0) { -+ DXG_ERR("query vidmem info from another process"); -+ ret = -EINVAL; -+ goto cleanup; -+ } -+ -+ adapter = dxgprocess_adapter_by_handle(process, args.adapter); -+ if (adapter == NULL) { -+ ret = -EINVAL; -+ goto cleanup; -+ } -+ -+ ret = dxgadapter_acquire_lock_shared(adapter); -+ if (ret < 0) { -+ adapter = NULL; -+ goto cleanup; -+ } -+ adapter_locked = true; -+ -+ args.adapter = adapter->host_handle; -+ ret = dxgvmb_send_query_vidmem_info(process, adapter, &args, inargs); -+ -+cleanup: -+ -+ if (adapter_locked) -+ dxgadapter_release_lock_shared(adapter); -+ if (adapter) -+ kref_put(&adapter->adapter_kref, dxgadapter_release); -+ if (ret < 0) -+ DXG_ERR("failed: %x", ret); -+ return ret; -+} -+ - static int - dxgkio_get_device_state(struct dxgprocess *process, void *__user inargs) - { -@@ -4287,7 +4335,7 @@ static struct ioctl_desc ioctls[] = { - /* 0x07 */ {dxgkio_create_paging_queue, LX_DXCREATEPAGINGQUEUE}, - /* 0x08 */ {}, - /* 0x09 */ {dxgkio_query_adapter_info, LX_DXQUERYADAPTERINFO}, --/* 0x0a */ {}, -+/* 0x0a */ {dxgkio_query_vidmem_info, LX_DXQUERYVIDEOMEMORYINFO}, - /* 0x0b */ {}, - /* 0x0c */ {}, - /* 0x0d */ {}, -diff --git a/include/uapi/misc/d3dkmthk.h b/include/uapi/misc/d3dkmthk.h -index 111111111111..222222222222 100644 ---- a/include/uapi/misc/d3dkmthk.h -+++ b/include/uapi/misc/d3dkmthk.h -@@ -897,6 +897,17 @@ enum d3dkmt_memory_segment_group { - _D3DKMT_MEMORY_SEGMENT_GROUP_NON_LOCAL = 1 - }; - -+struct d3dkmt_queryvideomemoryinfo { -+ __u64 process; -+ struct d3dkmthandle adapter; -+ enum d3dkmt_memory_segment_group memory_segment_group; -+ __u64 budget; -+ __u64 current_usage; -+ __u64 current_reservation; -+ __u64 available_for_reservation; -+ __u32 physical_adapter_index; -+}; -+ - struct d3dkmt_adaptertype { - union { - struct { -@@ -1204,6 +1215,8 @@ struct d3dkmt_shareobjectwithhost { - _IOWR(0x47, 0x07, struct d3dkmt_createpagingqueue) - #define LX_DXQUERYADAPTERINFO \ - _IOWR(0x47, 0x09, struct d3dkmt_queryadapterinfo) -+#define LX_DXQUERYVIDEOMEMORYINFO \ -+ _IOWR(0x47, 0x0a, struct d3dkmt_queryvideomemoryinfo) - #define LX_DXGETDEVICESTATE \ - _IOWR(0x47, 0x0e, struct d3dkmt_getdevicestate) - #define LX_DXSUBMITCOMMAND \ --- -Armbian - diff --git a/patch/kernel/archive/wsl2-arm64-6.1/1689-drivers-hv-dxgkrnl-The-escape-ioctl.patch b/patch/kernel/archive/wsl2-arm64-6.1/1689-drivers-hv-dxgkrnl-The-escape-ioctl.patch deleted file mode 100644 index ecb89843272b..000000000000 --- a/patch/kernel/archive/wsl2-arm64-6.1/1689-drivers-hv-dxgkrnl-The-escape-ioctl.patch +++ /dev/null @@ -1,305 +0,0 @@ -From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001 -From: Iouri Tarassov -Date: Tue, 18 Jan 2022 15:50:30 -0800 -Subject: drivers: hv: dxgkrnl: The escape ioctl - -Implement the escape ioctl (LX_DXESCAPE). - -This ioctl is used to send/receive private data between user mode -compute device driver (guest) and kernel mode compute device -driver (host). It allows the user mode driver to extend the virtual -compute device API. - -Signed-off-by: Iouri Tarassov -[kms: Forward port to v6.1] -Signed-off-by: Kelsey Steele ---- - drivers/hv/dxgkrnl/dxgkrnl.h | 3 + - drivers/hv/dxgkrnl/dxgvmbus.c | 75 +++++++++- - drivers/hv/dxgkrnl/dxgvmbus.h | 12 ++ - drivers/hv/dxgkrnl/ioctl.c | 42 +++++- - include/uapi/misc/d3dkmthk.h | 41 +++++ - 5 files changed, 167 insertions(+), 6 deletions(-) - -diff --git a/drivers/hv/dxgkrnl/dxgkrnl.h b/drivers/hv/dxgkrnl/dxgkrnl.h -index 111111111111..222222222222 100644 ---- a/drivers/hv/dxgkrnl/dxgkrnl.h -+++ b/drivers/hv/dxgkrnl/dxgkrnl.h -@@ -894,6 +894,9 @@ int dxgvmb_send_query_alloc_residency(struct dxgprocess *process, - struct dxgadapter *adapter, - struct d3dkmt_queryallocationresidency - *args); -+int dxgvmb_send_escape(struct dxgprocess *process, -+ struct dxgadapter *adapter, -+ struct d3dkmt_escape *args); - int dxgvmb_send_query_vidmem_info(struct dxgprocess *process, - struct dxgadapter *adapter, - struct d3dkmt_queryvideomemoryinfo *args, -diff --git a/drivers/hv/dxgkrnl/dxgvmbus.c b/drivers/hv/dxgkrnl/dxgvmbus.c -index 111111111111..222222222222 100644 ---- a/drivers/hv/dxgkrnl/dxgvmbus.c -+++ b/drivers/hv/dxgkrnl/dxgvmbus.c -@@ -1925,6 +1925,70 @@ int dxgvmb_send_query_alloc_residency(struct dxgprocess *process, - return ret; - } - -+int dxgvmb_send_escape(struct dxgprocess *process, -+ struct dxgadapter *adapter, -+ struct d3dkmt_escape *args) -+{ -+ int ret; -+ struct dxgkvmb_command_escape *command = NULL; -+ u32 cmd_size = sizeof(*command); -+ struct dxgvmbusmsg msg = {.hdr = NULL}; -+ -+ if (args->priv_drv_data_size > DXG_MAX_VM_BUS_PACKET_SIZE) { -+ ret = -EINVAL; -+ goto cleanup; -+ } -+ -+ cmd_size = cmd_size - sizeof(args->priv_drv_data[0]) + -+ args->priv_drv_data_size; -+ -+ ret = init_message(&msg, adapter, process, cmd_size); -+ if (ret) -+ goto cleanup; -+ command = (void *)msg.msg; -+ command_vgpu_to_host_init2(&command->hdr, -+ DXGK_VMBCOMMAND_ESCAPE, -+ process->host_handle); -+ command->adapter = args->adapter; -+ command->device = args->device; -+ command->type = args->type; -+ command->flags = args->flags; -+ command->priv_drv_data_size = args->priv_drv_data_size; -+ command->context = args->context; -+ if (args->priv_drv_data_size) { -+ ret = copy_from_user(command->priv_drv_data, -+ args->priv_drv_data, -+ args->priv_drv_data_size); -+ if (ret) { -+ DXG_ERR("failed to copy priv data"); -+ ret = -EINVAL; -+ goto cleanup; -+ } -+ } -+ -+ ret = dxgvmb_send_sync_msg(msg.channel, msg.hdr, msg.size, -+ command->priv_drv_data, -+ args->priv_drv_data_size); -+ if (ret < 0) -+ goto cleanup; -+ -+ if (args->priv_drv_data_size) { -+ ret = copy_to_user(args->priv_drv_data, -+ command->priv_drv_data, -+ args->priv_drv_data_size); -+ if (ret) { -+ DXG_ERR("failed to copy priv data"); -+ ret = -EINVAL; -+ } -+ } -+ -+cleanup: -+ free_message(&msg, process); -+ if (ret) -+ DXG_TRACE("err: %d", ret); -+ return ret; -+} -+ - int dxgvmb_send_query_vidmem_info(struct dxgprocess *process, - struct dxgadapter *adapter, - struct d3dkmt_queryvideomemoryinfo *args, -@@ -1955,14 +2019,14 @@ int dxgvmb_send_query_vidmem_info(struct dxgprocess *process, - ret = copy_to_user(&output->budget, &result.budget, - sizeof(output->budget)); - if (ret) { -- pr_err("%s failed to copy budget", __func__); -+ DXG_ERR("failed to copy budget"); - ret = -EINVAL; - goto cleanup; - } - ret = copy_to_user(&output->current_usage, &result.current_usage, - sizeof(output->current_usage)); - if (ret) { -- pr_err("%s failed to copy current usage", __func__); -+ DXG_ERR("failed to copy current usage"); - ret = -EINVAL; - goto cleanup; - } -@@ -1970,7 +2034,7 @@ int dxgvmb_send_query_vidmem_info(struct dxgprocess *process, - &result.current_reservation, - sizeof(output->current_reservation)); - if (ret) { -- pr_err("%s failed to copy reservation", __func__); -+ DXG_ERR("failed to copy reservation"); - ret = -EINVAL; - goto cleanup; - } -@@ -1978,14 +2042,14 @@ int dxgvmb_send_query_vidmem_info(struct dxgprocess *process, - &result.available_for_reservation, - sizeof(output->available_for_reservation)); - if (ret) { -- pr_err("%s failed to copy avail reservation", __func__); -+ DXG_ERR("failed to copy avail reservation"); - ret = -EINVAL; - } - - cleanup: - free_message(&msg, process); - if (ret) -- dev_dbg(DXGDEV, "err: %d", ret); -+ DXG_TRACE("err: %d", ret); - return ret; - } - -@@ -3152,3 +3216,4 @@ int dxgvmb_send_submit_command_hwqueue(struct dxgprocess *process, - DXG_TRACE("err: %d", ret); - return ret; - } -+ -diff --git a/drivers/hv/dxgkrnl/dxgvmbus.h b/drivers/hv/dxgkrnl/dxgvmbus.h -index 111111111111..222222222222 100644 ---- a/drivers/hv/dxgkrnl/dxgvmbus.h -+++ b/drivers/hv/dxgkrnl/dxgvmbus.h -@@ -664,6 +664,18 @@ struct dxgkvmb_command_queryallocationresidency_return { - /* d3dkmt_allocationresidencystatus[NumAllocations] */ - }; - -+/* Returns only private data */ -+struct dxgkvmb_command_escape { -+ struct dxgkvmb_command_vgpu_to_host hdr; -+ struct d3dkmthandle adapter; -+ struct d3dkmthandle device; -+ enum d3dkmt_escapetype type; -+ struct d3dddi_escapeflags flags; -+ u32 priv_drv_data_size; -+ struct d3dkmthandle context; -+ u8 priv_drv_data[1]; -+}; -+ - struct dxgkvmb_command_queryvideomemoryinfo { - struct dxgkvmb_command_vgpu_to_host hdr; - struct d3dkmthandle adapter; -diff --git a/drivers/hv/dxgkrnl/ioctl.c b/drivers/hv/dxgkrnl/ioctl.c -index 111111111111..222222222222 100644 ---- a/drivers/hv/dxgkrnl/ioctl.c -+++ b/drivers/hv/dxgkrnl/ioctl.c -@@ -3547,6 +3547,46 @@ dxgkio_flush_heap_transitions(struct dxgprocess *process, void *__user inargs) - return ret; - } - -+static int -+dxgkio_escape(struct dxgprocess *process, void *__user inargs) -+{ -+ struct d3dkmt_escape args; -+ int ret; -+ struct dxgadapter *adapter = NULL; -+ bool adapter_locked = false; -+ -+ ret = copy_from_user(&args, inargs, sizeof(args)); -+ if (ret) { -+ ret = -EINVAL; -+ goto cleanup; -+ } -+ -+ adapter = dxgprocess_adapter_by_handle(process, args.adapter); -+ if (adapter == NULL) { -+ ret = -EINVAL; -+ goto cleanup; -+ } -+ -+ ret = dxgadapter_acquire_lock_shared(adapter); -+ if (ret < 0) { -+ adapter = NULL; -+ goto cleanup; -+ } -+ adapter_locked = true; -+ -+ args.adapter = adapter->host_handle; -+ ret = dxgvmb_send_escape(process, adapter, &args); -+ -+cleanup: -+ -+ if (adapter_locked) -+ dxgadapter_release_lock_shared(adapter); -+ if (adapter) -+ kref_put(&adapter->adapter_kref, dxgadapter_release); -+ DXG_TRACE("ioctl:%s %d", errorstr(ret), ret); -+ return ret; -+} -+ - static int - dxgkio_query_vidmem_info(struct dxgprocess *process, void *__user inargs) - { -@@ -4338,7 +4378,7 @@ static struct ioctl_desc ioctls[] = { - /* 0x0a */ {dxgkio_query_vidmem_info, LX_DXQUERYVIDEOMEMORYINFO}, - /* 0x0b */ {}, - /* 0x0c */ {}, --/* 0x0d */ {}, -+/* 0x0d */ {dxgkio_escape, LX_DXESCAPE}, - /* 0x0e */ {dxgkio_get_device_state, LX_DXGETDEVICESTATE}, - /* 0x0f */ {dxgkio_submit_command, LX_DXSUBMITCOMMAND}, - /* 0x10 */ {dxgkio_create_sync_object, LX_DXCREATESYNCHRONIZATIONOBJECT}, -diff --git a/include/uapi/misc/d3dkmthk.h b/include/uapi/misc/d3dkmthk.h -index 111111111111..222222222222 100644 ---- a/include/uapi/misc/d3dkmthk.h -+++ b/include/uapi/misc/d3dkmthk.h -@@ -236,6 +236,45 @@ struct d3dddi_destroypagingqueue { - struct d3dkmthandle paging_queue; - }; - -+enum d3dkmt_escapetype { -+ _D3DKMT_ESCAPE_DRIVERPRIVATE = 0, -+ _D3DKMT_ESCAPE_VIDMM = 1, -+ _D3DKMT_ESCAPE_VIDSCH = 3, -+ _D3DKMT_ESCAPE_DEVICE = 4, -+ _D3DKMT_ESCAPE_DRT_TEST = 8, -+}; -+ -+struct d3dddi_escapeflags { -+ union { -+ struct { -+ __u32 hardware_access:1; -+ __u32 device_status_query:1; -+ __u32 change_frame_latency:1; -+ __u32 no_adapter_synchronization:1; -+ __u32 reserved:1; -+ __u32 virtual_machine_data:1; -+ __u32 driver_known_escape:1; -+ __u32 driver_common_escape:1; -+ __u32 reserved2:24; -+ }; -+ __u32 value; -+ }; -+}; -+ -+struct d3dkmt_escape { -+ struct d3dkmthandle adapter; -+ struct d3dkmthandle device; -+ enum d3dkmt_escapetype type; -+ struct d3dddi_escapeflags flags; -+#ifdef __KERNEL__ -+ void *priv_drv_data; -+#else -+ __u64 priv_drv_data; -+#endif -+ __u32 priv_drv_data_size; -+ struct d3dkmthandle context; -+}; -+ - enum dxgk_render_pipeline_stage { - _DXGK_RENDER_PIPELINE_STAGE_UNKNOWN = 0, - _DXGK_RENDER_PIPELINE_STAGE_INPUT_ASSEMBLER = 1, -@@ -1217,6 +1256,8 @@ struct d3dkmt_shareobjectwithhost { - _IOWR(0x47, 0x09, struct d3dkmt_queryadapterinfo) - #define LX_DXQUERYVIDEOMEMORYINFO \ - _IOWR(0x47, 0x0a, struct d3dkmt_queryvideomemoryinfo) -+#define LX_DXESCAPE \ -+ _IOWR(0x47, 0x0d, struct d3dkmt_escape) - #define LX_DXGETDEVICESTATE \ - _IOWR(0x47, 0x0e, struct d3dkmt_getdevicestate) - #define LX_DXSUBMITCOMMAND \ --- -Armbian - diff --git a/patch/kernel/archive/wsl2-arm64-6.1/1690-drivers-hv-dxgkrnl-Ioctl-to-put-device-to-error-state.patch b/patch/kernel/archive/wsl2-arm64-6.1/1690-drivers-hv-dxgkrnl-Ioctl-to-put-device-to-error-state.patch deleted file mode 100644 index 89911a1cfc92..000000000000 --- a/patch/kernel/archive/wsl2-arm64-6.1/1690-drivers-hv-dxgkrnl-Ioctl-to-put-device-to-error-state.patch +++ /dev/null @@ -1,180 +0,0 @@ -From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001 -From: Iouri Tarassov -Date: Wed, 9 Feb 2022 10:57:57 -0800 -Subject: drivers: hv: dxgkrnl: Ioctl to put device to error state - -Implement the ioctl to put the virtual compute device to the error -state (LX_DXMARKDEVICEASERROR). - -This ioctl is used by the user mode driver when it detects an -unrecoverable error condition. - -When a compute device is put to the error state, all subsequent -ioctl calls to the device will fail. - -Signed-off-by: Iouri Tarassov -[kms: Forward port to v6.1] -Signed-off-by: Kelsey Steele ---- - drivers/hv/dxgkrnl/dxgkrnl.h | 3 + - drivers/hv/dxgkrnl/dxgvmbus.c | 25 ++++++ - drivers/hv/dxgkrnl/dxgvmbus.h | 5 ++ - drivers/hv/dxgkrnl/ioctl.c | 38 +++++++++- - include/uapi/misc/d3dkmthk.h | 12 +++ - 5 files changed, 82 insertions(+), 1 deletion(-) - -diff --git a/drivers/hv/dxgkrnl/dxgkrnl.h b/drivers/hv/dxgkrnl/dxgkrnl.h -index 111111111111..222222222222 100644 ---- a/drivers/hv/dxgkrnl/dxgkrnl.h -+++ b/drivers/hv/dxgkrnl/dxgkrnl.h -@@ -856,6 +856,9 @@ int dxgvmb_send_update_alloc_property(struct dxgprocess *process, - struct d3dddi_updateallocproperty *args, - struct d3dddi_updateallocproperty *__user - inargs); -+int dxgvmb_send_mark_device_as_error(struct dxgprocess *process, -+ struct dxgadapter *adapter, -+ struct d3dkmt_markdeviceaserror *args); - int dxgvmb_send_set_allocation_priority(struct dxgprocess *process, - struct dxgadapter *adapter, - struct d3dkmt_setallocationpriority *a); -diff --git a/drivers/hv/dxgkrnl/dxgvmbus.c b/drivers/hv/dxgkrnl/dxgvmbus.c -index 111111111111..222222222222 100644 ---- a/drivers/hv/dxgkrnl/dxgvmbus.c -+++ b/drivers/hv/dxgkrnl/dxgvmbus.c -@@ -2730,6 +2730,31 @@ int dxgvmb_send_update_alloc_property(struct dxgprocess *process, - return ret; - } - -+int dxgvmb_send_mark_device_as_error(struct dxgprocess *process, -+ struct dxgadapter *adapter, -+ struct d3dkmt_markdeviceaserror *args) -+{ -+ struct dxgkvmb_command_markdeviceaserror *command; -+ int ret; -+ struct dxgvmbusmsg msg = {.hdr = NULL}; -+ -+ ret = init_message(&msg, adapter, process, sizeof(*command)); -+ if (ret) -+ goto cleanup; -+ command = (void *)msg.msg; -+ -+ command_vgpu_to_host_init2(&command->hdr, -+ DXGK_VMBCOMMAND_MARKDEVICEASERROR, -+ process->host_handle); -+ command->args = *args; -+ ret = dxgvmb_send_sync_msg_ntstatus(msg.channel, msg.hdr, msg.size); -+cleanup: -+ free_message(&msg, process); -+ if (ret) -+ DXG_TRACE("err: %d", ret); -+ return ret; -+} -+ - int dxgvmb_send_set_allocation_priority(struct dxgprocess *process, - struct dxgadapter *adapter, - struct d3dkmt_setallocationpriority *args) -diff --git a/drivers/hv/dxgkrnl/dxgvmbus.h b/drivers/hv/dxgkrnl/dxgvmbus.h -index 111111111111..222222222222 100644 ---- a/drivers/hv/dxgkrnl/dxgvmbus.h -+++ b/drivers/hv/dxgkrnl/dxgvmbus.h -@@ -627,6 +627,11 @@ struct dxgkvmb_command_updateallocationproperty_return { - struct ntstatus status; - }; - -+struct dxgkvmb_command_markdeviceaserror { -+ struct dxgkvmb_command_vgpu_to_host hdr; -+ struct d3dkmt_markdeviceaserror args; -+}; -+ - /* Returns ntstatus */ - struct dxgkvmb_command_changevideomemoryreservation { - struct dxgkvmb_command_vgpu_to_host hdr; -diff --git a/drivers/hv/dxgkrnl/ioctl.c b/drivers/hv/dxgkrnl/ioctl.c -index 111111111111..222222222222 100644 ---- a/drivers/hv/dxgkrnl/ioctl.c -+++ b/drivers/hv/dxgkrnl/ioctl.c -@@ -3341,6 +3341,42 @@ dxgkio_update_alloc_property(struct dxgprocess *process, void *__user inargs) - return ret; - } - -+static int -+dxgkio_mark_device_as_error(struct dxgprocess *process, void *__user inargs) -+{ -+ struct d3dkmt_markdeviceaserror args; -+ struct dxgadapter *adapter = NULL; -+ struct dxgdevice *device = NULL; -+ int ret; -+ -+ ret = copy_from_user(&args, inargs, sizeof(args)); -+ if (ret) { -+ DXG_ERR("failed to copy input args"); -+ ret = -EINVAL; -+ goto cleanup; -+ } -+ device = dxgprocess_device_by_handle(process, args.device); -+ if (device == NULL) { -+ ret = -EINVAL; -+ goto cleanup; -+ } -+ adapter = device->adapter; -+ ret = dxgadapter_acquire_lock_shared(adapter); -+ if (ret < 0) { -+ adapter = NULL; -+ goto cleanup; -+ } -+ device->execution_state = _D3DKMT_DEVICEEXECUTION_RESET; -+ ret = dxgvmb_send_mark_device_as_error(process, adapter, &args); -+cleanup: -+ if (adapter) -+ dxgadapter_release_lock_shared(adapter); -+ if (device) -+ kref_put(&device->device_kref, dxgdevice_release); -+ DXG_TRACE("ioctl:%s %d", errorstr(ret), ret); -+ return ret; -+} -+ - static int - dxgkio_query_alloc_residency(struct dxgprocess *process, void *__user inargs) - { -@@ -4404,7 +4440,7 @@ static struct ioctl_desc ioctls[] = { - /* 0x23 */ {}, - /* 0x24 */ {}, - /* 0x25 */ {dxgkio_lock2, LX_DXLOCK2}, --/* 0x26 */ {}, -+/* 0x26 */ {dxgkio_mark_device_as_error, LX_DXMARKDEVICEASERROR}, - /* 0x27 */ {}, - /* 0x28 */ {}, - /* 0x29 */ {}, -diff --git a/include/uapi/misc/d3dkmthk.h b/include/uapi/misc/d3dkmthk.h -index 111111111111..222222222222 100644 ---- a/include/uapi/misc/d3dkmthk.h -+++ b/include/uapi/misc/d3dkmthk.h -@@ -790,6 +790,16 @@ struct d3dkmt_unlock2 { - struct d3dkmthandle allocation; - }; - -+enum d3dkmt_device_error_reason { -+ _D3DKMT_DEVICE_ERROR_REASON_GENERIC = 0x80000000, -+ _D3DKMT_DEVICE_ERROR_REASON_DRIVER_ERROR = 0x80000006, -+}; -+ -+struct d3dkmt_markdeviceaserror { -+ struct d3dkmthandle device; -+ enum d3dkmt_device_error_reason reason; -+}; -+ - enum d3dkmt_standardallocationtype { - _D3DKMT_STANDARDALLOCATIONTYPE_EXISTINGHEAP = 1, - _D3DKMT_STANDARDALLOCATIONTYPE_CROSSADAPTER = 2, -@@ -1290,6 +1300,8 @@ struct d3dkmt_shareobjectwithhost { - _IOWR(0x47, 0x1f, struct d3dkmt_flushheaptransitions) - #define LX_DXLOCK2 \ - _IOWR(0x47, 0x25, struct d3dkmt_lock2) -+#define LX_DXMARKDEVICEASERROR \ -+ _IOWR(0x47, 0x26, struct d3dkmt_markdeviceaserror) - #define LX_DXQUERYALLOCATIONRESIDENCY \ - _IOWR(0x47, 0x2a, struct d3dkmt_queryallocationresidency) - #define LX_DXSETALLOCATIONPRIORITY \ --- -Armbian - diff --git a/patch/kernel/archive/wsl2-arm64-6.1/1691-drivers-hv-dxgkrnl-Ioctls-to-query-statistics-and-clock-calibration.patch b/patch/kernel/archive/wsl2-arm64-6.1/1691-drivers-hv-dxgkrnl-Ioctls-to-query-statistics-and-clock-calibration.patch deleted file mode 100644 index 61dc6cd5c752..000000000000 --- a/patch/kernel/archive/wsl2-arm64-6.1/1691-drivers-hv-dxgkrnl-Ioctls-to-query-statistics-and-clock-calibration.patch +++ /dev/null @@ -1,423 +0,0 @@ -From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001 -From: Iouri Tarassov -Date: Wed, 9 Feb 2022 11:01:57 -0800 -Subject: drivers: hv: dxgkrnl: Ioctls to query statistics and clock - calibration - -Implement ioctls to query statistics from the VGPU device -(LX_DXQUERYSTATISTICS) and to query clock calibration -(LX_DXQUERYCLOCKCALIBRATION). - -The LX_DXQUERYSTATISTICS ioctl is used to query various statistics from -the compute device on the host. - -The LX_DXQUERYCLOCKCALIBRATION ioctl queries the compute device clock -and is used for performance monitoring. - -Signed-off-by: Iouri Tarassov -[kms: Forward port to v6.1] -Signed-off-by: Kelsey Steele ---- - drivers/hv/dxgkrnl/dxgkrnl.h | 8 + - drivers/hv/dxgkrnl/dxgvmbus.c | 77 +++++++ - drivers/hv/dxgkrnl/dxgvmbus.h | 21 ++ - drivers/hv/dxgkrnl/ioctl.c | 111 +++++++++- - include/uapi/misc/d3dkmthk.h | 62 ++++++ - 5 files changed, 277 insertions(+), 2 deletions(-) - -diff --git a/drivers/hv/dxgkrnl/dxgkrnl.h b/drivers/hv/dxgkrnl/dxgkrnl.h -index 111111111111..222222222222 100644 ---- a/drivers/hv/dxgkrnl/dxgkrnl.h -+++ b/drivers/hv/dxgkrnl/dxgkrnl.h -@@ -885,6 +885,11 @@ int dxgvmb_send_query_adapter_info(struct dxgprocess *process, - int dxgvmb_send_submit_command_hwqueue(struct dxgprocess *process, - struct dxgadapter *adapter, - struct d3dkmt_submitcommandtohwqueue *a); -+int dxgvmb_send_query_clock_calibration(struct dxgprocess *process, -+ struct dxgadapter *adapter, -+ struct d3dkmt_queryclockcalibration *a, -+ struct d3dkmt_queryclockcalibration -+ *__user inargs); - int dxgvmb_send_flush_heap_transitions(struct dxgprocess *process, - struct dxgadapter *adapter, - struct d3dkmt_flushheaptransitions *arg); -@@ -929,6 +934,9 @@ int dxgvmb_send_get_stdalloc_data(struct dxgdevice *device, - void *prive_alloc_data, - u32 *res_priv_data_size, - void *priv_res_data); -+int dxgvmb_send_query_statistics(struct dxgprocess *process, -+ struct dxgadapter *adapter, -+ struct d3dkmt_querystatistics *args); - int dxgvmb_send_async_msg(struct dxgvmbuschannel *channel, - void *command, - u32 cmd_size); -diff --git a/drivers/hv/dxgkrnl/dxgvmbus.c b/drivers/hv/dxgkrnl/dxgvmbus.c -index 111111111111..222222222222 100644 ---- a/drivers/hv/dxgkrnl/dxgvmbus.c -+++ b/drivers/hv/dxgkrnl/dxgvmbus.c -@@ -1829,6 +1829,48 @@ int dxgvmb_send_destroy_allocation(struct dxgprocess *process, - return ret; - } - -+int dxgvmb_send_query_clock_calibration(struct dxgprocess *process, -+ struct dxgadapter *adapter, -+ struct d3dkmt_queryclockcalibration -+ *args, -+ struct d3dkmt_queryclockcalibration -+ *__user inargs) -+{ -+ struct dxgkvmb_command_queryclockcalibration *command; -+ struct dxgkvmb_command_queryclockcalibration_return result; -+ int ret; -+ struct dxgvmbusmsg msg = {.hdr = NULL}; -+ -+ ret = init_message(&msg, adapter, process, sizeof(*command)); -+ if (ret) -+ goto cleanup; -+ command = (void *)msg.msg; -+ -+ command_vgpu_to_host_init2(&command->hdr, -+ DXGK_VMBCOMMAND_QUERYCLOCKCALIBRATION, -+ process->host_handle); -+ command->args = *args; -+ -+ ret = dxgvmb_send_sync_msg(msg.channel, msg.hdr, msg.size, -+ &result, sizeof(result)); -+ if (ret < 0) -+ goto cleanup; -+ ret = copy_to_user(&inargs->clock_data, &result.clock_data, -+ sizeof(result.clock_data)); -+ if (ret) { -+ pr_err("%s failed to copy clock data", __func__); -+ ret = -EINVAL; -+ goto cleanup; -+ } -+ ret = ntstatus2int(result.status); -+ -+cleanup: -+ free_message(&msg, process); -+ if (ret) -+ DXG_TRACE("err: %d", ret); -+ return ret; -+} -+ - int dxgvmb_send_flush_heap_transitions(struct dxgprocess *process, - struct dxgadapter *adapter, - struct d3dkmt_flushheaptransitions *args) -@@ -3242,3 +3284,38 @@ int dxgvmb_send_submit_command_hwqueue(struct dxgprocess *process, - return ret; - } - -+int dxgvmb_send_query_statistics(struct dxgprocess *process, -+ struct dxgadapter *adapter, -+ struct d3dkmt_querystatistics *args) -+{ -+ struct dxgkvmb_command_querystatistics *command; -+ struct dxgkvmb_command_querystatistics_return *result; -+ int ret; -+ struct dxgvmbusmsgres msg = {.hdr = NULL}; -+ -+ ret = init_message_res(&msg, adapter, process, sizeof(*command), -+ sizeof(*result)); -+ if (ret) -+ goto cleanup; -+ command = msg.msg; -+ result = msg.res; -+ -+ command_vgpu_to_host_init2(&command->hdr, -+ DXGK_VMBCOMMAND_QUERYSTATISTICS, -+ process->host_handle); -+ command->args = *args; -+ -+ ret = dxgvmb_send_sync_msg(msg.channel, msg.hdr, msg.size, -+ result, msg.res_size); -+ if (ret < 0) -+ goto cleanup; -+ -+ args->result = result->result; -+ ret = ntstatus2int(result->status); -+ -+cleanup: -+ free_message((struct dxgvmbusmsg *)&msg, process); -+ if (ret) -+ DXG_TRACE("err: %d", ret); -+ return ret; -+} -diff --git a/drivers/hv/dxgkrnl/dxgvmbus.h b/drivers/hv/dxgkrnl/dxgvmbus.h -index 111111111111..222222222222 100644 ---- a/drivers/hv/dxgkrnl/dxgvmbus.h -+++ b/drivers/hv/dxgkrnl/dxgvmbus.h -@@ -372,6 +372,16 @@ struct dxgkvmb_command_flushheaptransitions { - struct dxgkvmb_command_vgpu_to_host hdr; - }; - -+struct dxgkvmb_command_queryclockcalibration { -+ struct dxgkvmb_command_vgpu_to_host hdr; -+ struct d3dkmt_queryclockcalibration args; -+}; -+ -+struct dxgkvmb_command_queryclockcalibration_return { -+ struct ntstatus status; -+ struct dxgk_gpuclockdata clock_data; -+}; -+ - struct dxgkvmb_command_createallocation_allocinfo { - u32 flags; - u32 priv_drv_data_size; -@@ -408,6 +418,17 @@ struct dxgkvmb_command_openresource_return { - /* struct d3dkmthandle allocation[allocation_count]; */ - }; - -+struct dxgkvmb_command_querystatistics { -+ struct dxgkvmb_command_vgpu_to_host hdr; -+ struct d3dkmt_querystatistics args; -+}; -+ -+struct dxgkvmb_command_querystatistics_return { -+ struct ntstatus status; -+ u32 reserved; -+ struct d3dkmt_querystatistics_result result; -+}; -+ - struct dxgkvmb_command_getstandardallocprivdata { - struct dxgkvmb_command_vgpu_to_host hdr; - enum d3dkmdt_standardallocationtype alloc_type; -diff --git a/drivers/hv/dxgkrnl/ioctl.c b/drivers/hv/dxgkrnl/ioctl.c -index 111111111111..222222222222 100644 ---- a/drivers/hv/dxgkrnl/ioctl.c -+++ b/drivers/hv/dxgkrnl/ioctl.c -@@ -149,6 +149,65 @@ static int dxgkio_open_adapter_from_luid(struct dxgprocess *process, - return ret; - } - -+static int dxgkio_query_statistics(struct dxgprocess *process, -+ void __user *inargs) -+{ -+ struct d3dkmt_querystatistics *args; -+ int ret; -+ struct dxgadapter *entry; -+ struct dxgadapter *adapter = NULL; -+ struct winluid tmp; -+ struct dxgglobal *dxgglobal = dxggbl(); -+ -+ args = vzalloc(sizeof(struct d3dkmt_querystatistics)); -+ if (args == NULL) { -+ ret = -ENOMEM; -+ goto cleanup; -+ } -+ -+ ret = copy_from_user(args, inargs, sizeof(*args)); -+ if (ret) { -+ DXG_ERR("failed to copy input args"); -+ ret = -EINVAL; -+ goto cleanup; -+ } -+ -+ dxgglobal_acquire_adapter_list_lock(DXGLOCK_SHARED); -+ list_for_each_entry(entry, &dxgglobal->adapter_list_head, -+ adapter_list_entry) { -+ if (dxgadapter_acquire_lock_shared(entry) == 0) { -+ if (*(u64 *) &entry->luid == -+ *(u64 *) &args->adapter_luid) { -+ adapter = entry; -+ break; -+ } -+ dxgadapter_release_lock_shared(entry); -+ } -+ } -+ dxgglobal_release_adapter_list_lock(DXGLOCK_SHARED); -+ if (adapter) { -+ tmp = args->adapter_luid; -+ args->adapter_luid = adapter->host_adapter_luid; -+ ret = dxgvmb_send_query_statistics(process, adapter, args); -+ if (ret >= 0) { -+ args->adapter_luid = tmp; -+ ret = copy_to_user(inargs, args, sizeof(*args)); -+ if (ret) { -+ DXG_ERR("failed to copy args"); -+ ret = -EINVAL; -+ } -+ } -+ dxgadapter_release_lock_shared(adapter); -+ } -+ -+cleanup: -+ if (args) -+ vfree(args); -+ -+ DXG_TRACE("ioctl:%s %d", errorstr(ret), ret); -+ return ret; -+} -+ - static int - dxgkp_enum_adapters(struct dxgprocess *process, - union d3dkmt_enumadapters_filter filter, -@@ -3536,6 +3595,54 @@ dxgkio_change_vidmem_reservation(struct dxgprocess *process, void *__user inargs - return ret; - } - -+static int -+dxgkio_query_clock_calibration(struct dxgprocess *process, void *__user inargs) -+{ -+ struct d3dkmt_queryclockcalibration args; -+ int ret; -+ struct dxgadapter *adapter = NULL; -+ bool adapter_locked = false; -+ -+ ret = copy_from_user(&args, inargs, sizeof(args)); -+ if (ret) { -+ DXG_ERR("failed to copy input args"); -+ ret = -EINVAL; -+ goto cleanup; -+ } -+ -+ adapter = dxgprocess_adapter_by_handle(process, args.adapter); -+ if (adapter == NULL) { -+ ret = -EINVAL; -+ goto cleanup; -+ } -+ -+ ret = dxgadapter_acquire_lock_shared(adapter); -+ if (ret < 0) { -+ adapter = NULL; -+ goto cleanup; -+ } -+ adapter_locked = true; -+ -+ args.adapter = adapter->host_handle; -+ ret = dxgvmb_send_query_clock_calibration(process, adapter, -+ &args, inargs); -+ if (ret < 0) -+ goto cleanup; -+ ret = copy_to_user(inargs, &args, sizeof(args)); -+ if (ret) { -+ DXG_ERR("failed to copy output args"); -+ ret = -EINVAL; -+ } -+ -+cleanup: -+ -+ if (adapter_locked) -+ dxgadapter_release_lock_shared(adapter); -+ if (adapter) -+ kref_put(&adapter->adapter_kref, dxgadapter_release); -+ return ret; -+} -+ - static int - dxgkio_flush_heap_transitions(struct dxgprocess *process, void *__user inargs) - { -@@ -4470,14 +4577,14 @@ static struct ioctl_desc ioctls[] = { - /* 0x3b */ {dxgkio_wait_sync_object_gpu, - LX_DXWAITFORSYNCHRONIZATIONOBJECTFROMGPU}, - /* 0x3c */ {dxgkio_get_allocation_priority, LX_DXGETALLOCATIONPRIORITY}, --/* 0x3d */ {}, -+/* 0x3d */ {dxgkio_query_clock_calibration, LX_DXQUERYCLOCKCALIBRATION}, - /* 0x3e */ {dxgkio_enum_adapters3, LX_DXENUMADAPTERS3}, - /* 0x3f */ {dxgkio_share_objects, LX_DXSHAREOBJECTS}, - /* 0x40 */ {dxgkio_open_sync_object_nt, LX_DXOPENSYNCOBJECTFROMNTHANDLE2}, - /* 0x41 */ {dxgkio_query_resource_info_nt, - LX_DXQUERYRESOURCEINFOFROMNTHANDLE}, - /* 0x42 */ {dxgkio_open_resource_nt, LX_DXOPENRESOURCEFROMNTHANDLE}, --/* 0x43 */ {}, -+/* 0x43 */ {dxgkio_query_statistics, LX_DXQUERYSTATISTICS}, - /* 0x44 */ {dxgkio_share_object_with_host, LX_DXSHAREOBJECTWITHHOST}, - /* 0x45 */ {}, - }; -diff --git a/include/uapi/misc/d3dkmthk.h b/include/uapi/misc/d3dkmthk.h -index 111111111111..222222222222 100644 ---- a/include/uapi/misc/d3dkmthk.h -+++ b/include/uapi/misc/d3dkmthk.h -@@ -996,6 +996,34 @@ struct d3dkmt_queryadapterinfo { - __u32 private_data_size; - }; - -+#pragma pack(push, 1) -+ -+struct dxgk_gpuclockdata_flags { -+ union { -+ struct { -+ __u32 context_management_processor:1; -+ __u32 reserved:31; -+ }; -+ __u32 value; -+ }; -+}; -+ -+struct dxgk_gpuclockdata { -+ __u64 gpu_frequency; -+ __u64 gpu_clock_counter; -+ __u64 cpu_clock_counter; -+ struct dxgk_gpuclockdata_flags flags; -+} __packed; -+ -+struct d3dkmt_queryclockcalibration { -+ struct d3dkmthandle adapter; -+ __u32 node_ordinal; -+ __u32 physical_adapter_index; -+ struct dxgk_gpuclockdata clock_data; -+}; -+ -+#pragma pack(pop) -+ - struct d3dkmt_flushheaptransitions { - struct d3dkmthandle adapter; - }; -@@ -1238,6 +1266,36 @@ struct d3dkmt_enumadapters3 { - #endif - }; - -+enum d3dkmt_querystatistics_type { -+ _D3DKMT_QUERYSTATISTICS_ADAPTER = 0, -+ _D3DKMT_QUERYSTATISTICS_PROCESS = 1, -+ _D3DKMT_QUERYSTATISTICS_PROCESS_ADAPTER = 2, -+ _D3DKMT_QUERYSTATISTICS_SEGMENT = 3, -+ _D3DKMT_QUERYSTATISTICS_PROCESS_SEGMENT = 4, -+ _D3DKMT_QUERYSTATISTICS_NODE = 5, -+ _D3DKMT_QUERYSTATISTICS_PROCESS_NODE = 6, -+ _D3DKMT_QUERYSTATISTICS_VIDPNSOURCE = 7, -+ _D3DKMT_QUERYSTATISTICS_PROCESS_VIDPNSOURCE = 8, -+ _D3DKMT_QUERYSTATISTICS_PROCESS_SEGMENT_GROUP = 9, -+ _D3DKMT_QUERYSTATISTICS_PHYSICAL_ADAPTER = 10, -+}; -+ -+struct d3dkmt_querystatistics_result { -+ char size[0x308]; -+}; -+ -+struct d3dkmt_querystatistics { -+ union { -+ struct { -+ enum d3dkmt_querystatistics_type type; -+ struct winluid adapter_luid; -+ __u64 process; -+ struct d3dkmt_querystatistics_result result; -+ }; -+ char size[0x328]; -+ }; -+}; -+ - struct d3dkmt_shareobjectwithhost { - struct d3dkmthandle device_handle; - struct d3dkmthandle object_handle; -@@ -1328,6 +1386,8 @@ struct d3dkmt_shareobjectwithhost { - _IOWR(0x47, 0x3b, struct d3dkmt_waitforsynchronizationobjectfromgpu) - #define LX_DXGETALLOCATIONPRIORITY \ - _IOWR(0x47, 0x3c, struct d3dkmt_getallocationpriority) -+#define LX_DXQUERYCLOCKCALIBRATION \ -+ _IOWR(0x47, 0x3d, struct d3dkmt_queryclockcalibration) - #define LX_DXENUMADAPTERS3 \ - _IOWR(0x47, 0x3e, struct d3dkmt_enumadapters3) - #define LX_DXSHAREOBJECTS \ -@@ -1338,6 +1398,8 @@ struct d3dkmt_shareobjectwithhost { - _IOWR(0x47, 0x41, struct d3dkmt_queryresourceinfofromnthandle) - #define LX_DXOPENRESOURCEFROMNTHANDLE \ - _IOWR(0x47, 0x42, struct d3dkmt_openresourcefromnthandle) -+#define LX_DXQUERYSTATISTICS \ -+ _IOWR(0x47, 0x43, struct d3dkmt_querystatistics) - #define LX_DXSHAREOBJECTWITHHOST \ - _IOWR(0x47, 0x44, struct d3dkmt_shareobjectwithhost) - --- -Armbian - diff --git a/patch/kernel/archive/wsl2-arm64-6.1/1692-drivers-hv-dxgkrnl-Offer-and-reclaim-allocations.patch b/patch/kernel/archive/wsl2-arm64-6.1/1692-drivers-hv-dxgkrnl-Offer-and-reclaim-allocations.patch deleted file mode 100644 index 87535045c65f..000000000000 --- a/patch/kernel/archive/wsl2-arm64-6.1/1692-drivers-hv-dxgkrnl-Offer-and-reclaim-allocations.patch +++ /dev/null @@ -1,466 +0,0 @@ -From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001 -From: Iouri Tarassov -Date: Tue, 18 Jan 2022 15:01:55 -0800 -Subject: drivers: hv: dxgkrnl: Offer and reclaim allocations - -Implement ioctls to offer and reclaim compute device allocations: - - LX_DXOFFERALLOCATIONS, - - LX_DXRECLAIMALLOCATIONS2 - -When a user mode driver (UMD) does not need to access an allocation, -it can "offer" it by issuing the LX_DXOFFERALLOCATIONS ioctl. This -means that the allocation is not in use and its local device memory -could be evicted. The freed space could be given to another allocation. -When the allocation is again needed, the UMD can attempt to"reclaim" -the allocation by issuing the LX_DXRECLAIMALLOCATIONS2 ioctl. If the -allocation is still not evicted, the reclaim operation succeeds and no -other action is required. If the reclaim operation fails, the caller -must restore the content of the allocation before it can be used by -the device. - -Signed-off-by: Iouri Tarassov -[kms: Forward port to v6.1] -Signed-off-by: Kelsey Steele ---- - drivers/hv/dxgkrnl/dxgkrnl.h | 8 + - drivers/hv/dxgkrnl/dxgvmbus.c | 124 +++++++++- - drivers/hv/dxgkrnl/dxgvmbus.h | 27 ++ - drivers/hv/dxgkrnl/ioctl.c | 117 ++++++++- - include/uapi/misc/d3dkmthk.h | 67 +++++ - 5 files changed, 340 insertions(+), 3 deletions(-) - -diff --git a/drivers/hv/dxgkrnl/dxgkrnl.h b/drivers/hv/dxgkrnl/dxgkrnl.h -index 111111111111..222222222222 100644 ---- a/drivers/hv/dxgkrnl/dxgkrnl.h -+++ b/drivers/hv/dxgkrnl/dxgkrnl.h -@@ -865,6 +865,14 @@ int dxgvmb_send_set_allocation_priority(struct dxgprocess *process, - int dxgvmb_send_get_allocation_priority(struct dxgprocess *process, - struct dxgadapter *adapter, - struct d3dkmt_getallocationpriority *a); -+int dxgvmb_send_offer_allocations(struct dxgprocess *process, -+ struct dxgadapter *adapter, -+ struct d3dkmt_offerallocations *args); -+int dxgvmb_send_reclaim_allocations(struct dxgprocess *process, -+ struct dxgadapter *adapter, -+ struct d3dkmthandle device, -+ struct d3dkmt_reclaimallocations2 *args, -+ u64 __user *paging_fence_value); - int dxgvmb_send_change_vidmem_reservation(struct dxgprocess *process, - struct dxgadapter *adapter, - struct d3dkmthandle other_process, -diff --git a/drivers/hv/dxgkrnl/dxgvmbus.c b/drivers/hv/dxgkrnl/dxgvmbus.c -index 111111111111..222222222222 100644 ---- a/drivers/hv/dxgkrnl/dxgvmbus.c -+++ b/drivers/hv/dxgkrnl/dxgvmbus.c -@@ -1858,7 +1858,7 @@ int dxgvmb_send_query_clock_calibration(struct dxgprocess *process, - ret = copy_to_user(&inargs->clock_data, &result.clock_data, - sizeof(result.clock_data)); - if (ret) { -- pr_err("%s failed to copy clock data", __func__); -+ DXG_ERR("failed to copy clock data"); - ret = -EINVAL; - goto cleanup; - } -@@ -2949,6 +2949,128 @@ int dxgvmb_send_get_allocation_priority(struct dxgprocess *process, - return ret; - } - -+int dxgvmb_send_offer_allocations(struct dxgprocess *process, -+ struct dxgadapter *adapter, -+ struct d3dkmt_offerallocations *args) -+{ -+ struct dxgkvmb_command_offerallocations *command; -+ int ret = -EINVAL; -+ u32 alloc_size = sizeof(struct d3dkmthandle) * args->allocation_count; -+ u32 cmd_size = sizeof(struct dxgkvmb_command_offerallocations) + -+ alloc_size - sizeof(struct d3dkmthandle); -+ struct dxgvmbusmsg msg = {.hdr = NULL}; -+ -+ ret = init_message(&msg, adapter, process, cmd_size); -+ if (ret) -+ goto cleanup; -+ command = (void *)msg.msg; -+ -+ command_vgpu_to_host_init2(&command->hdr, -+ DXGK_VMBCOMMAND_OFFERALLOCATIONS, -+ process->host_handle); -+ command->flags = args->flags; -+ command->priority = args->priority; -+ command->device = args->device; -+ command->allocation_count = args->allocation_count; -+ if (args->resources) { -+ command->resources = true; -+ ret = copy_from_user(command->allocations, args->resources, -+ alloc_size); -+ } else { -+ ret = copy_from_user(command->allocations, -+ args->allocations, alloc_size); -+ } -+ if (ret) { -+ DXG_ERR("failed to copy input handles"); -+ ret = -EINVAL; -+ goto cleanup; -+ } -+ -+ ret = dxgvmb_send_sync_msg_ntstatus(msg.channel, msg.hdr, msg.size); -+ -+cleanup: -+ free_message(&msg, process); -+ if (ret) -+ pr_debug("err: %s %d", __func__, ret); -+ return ret; -+} -+ -+int dxgvmb_send_reclaim_allocations(struct dxgprocess *process, -+ struct dxgadapter *adapter, -+ struct d3dkmthandle device, -+ struct d3dkmt_reclaimallocations2 *args, -+ u64 __user *paging_fence_value) -+{ -+ struct dxgkvmb_command_reclaimallocations *command; -+ struct dxgkvmb_command_reclaimallocations_return *result; -+ int ret; -+ u32 alloc_size = sizeof(struct d3dkmthandle) * args->allocation_count; -+ u32 cmd_size = sizeof(struct dxgkvmb_command_reclaimallocations) + -+ alloc_size - sizeof(struct d3dkmthandle); -+ u32 result_size = sizeof(*result); -+ struct dxgvmbusmsgres msg = {.hdr = NULL}; -+ -+ if (args->results) -+ result_size += (args->allocation_count - 1) * -+ sizeof(enum d3dddi_reclaim_result); -+ -+ ret = init_message_res(&msg, adapter, process, cmd_size, result_size); -+ if (ret) -+ goto cleanup; -+ command = (void *)msg.msg; -+ result = msg.res; -+ -+ command_vgpu_to_host_init2(&command->hdr, -+ DXGK_VMBCOMMAND_RECLAIMALLOCATIONS, -+ process->host_handle); -+ command->device = device; -+ command->paging_queue = args->paging_queue; -+ command->allocation_count = args->allocation_count; -+ command->write_results = args->results != NULL; -+ if (args->resources) { -+ command->resources = true; -+ ret = copy_from_user(command->allocations, args->resources, -+ alloc_size); -+ } else { -+ ret = copy_from_user(command->allocations, -+ args->allocations, alloc_size); -+ } -+ if (ret) { -+ DXG_ERR("failed to copy input handles"); -+ ret = -EINVAL; -+ goto cleanup; -+ } -+ -+ ret = dxgvmb_send_sync_msg(msg.channel, msg.hdr, msg.size, -+ result, msg.res_size); -+ if (ret < 0) -+ goto cleanup; -+ ret = copy_to_user(paging_fence_value, -+ &result->paging_fence_value, sizeof(u64)); -+ if (ret) { -+ DXG_ERR("failed to copy paging fence"); -+ ret = -EINVAL; -+ goto cleanup; -+ } -+ -+ ret = ntstatus2int(result->status); -+ if (NT_SUCCESS(result->status) && args->results) { -+ ret = copy_to_user(args->results, result->discarded, -+ sizeof(result->discarded[0]) * -+ args->allocation_count); -+ if (ret) { -+ DXG_ERR("failed to copy results"); -+ ret = -EINVAL; -+ } -+ } -+ -+cleanup: -+ free_message((struct dxgvmbusmsg *)&msg, process); -+ if (ret) -+ pr_debug("err: %s %d", __func__, ret); -+ return ret; -+} -+ - int dxgvmb_send_change_vidmem_reservation(struct dxgprocess *process, - struct dxgadapter *adapter, - struct d3dkmthandle other_process, -diff --git a/drivers/hv/dxgkrnl/dxgvmbus.h b/drivers/hv/dxgkrnl/dxgvmbus.h -index 111111111111..222222222222 100644 ---- a/drivers/hv/dxgkrnl/dxgvmbus.h -+++ b/drivers/hv/dxgkrnl/dxgvmbus.h -@@ -653,6 +653,33 @@ struct dxgkvmb_command_markdeviceaserror { - struct d3dkmt_markdeviceaserror args; - }; - -+/* Returns ntstatus */ -+struct dxgkvmb_command_offerallocations { -+ struct dxgkvmb_command_vgpu_to_host hdr; -+ struct d3dkmthandle device; -+ u32 allocation_count; -+ enum d3dkmt_offer_priority priority; -+ struct d3dkmt_offer_flags flags; -+ bool resources; -+ struct d3dkmthandle allocations[1]; -+}; -+ -+struct dxgkvmb_command_reclaimallocations { -+ struct dxgkvmb_command_vgpu_to_host hdr; -+ struct d3dkmthandle device; -+ struct d3dkmthandle paging_queue; -+ u32 allocation_count; -+ bool resources; -+ bool write_results; -+ struct d3dkmthandle allocations[1]; -+}; -+ -+struct dxgkvmb_command_reclaimallocations_return { -+ u64 paging_fence_value; -+ struct ntstatus status; -+ enum d3dddi_reclaim_result discarded[1]; -+}; -+ - /* Returns ntstatus */ - struct dxgkvmb_command_changevideomemoryreservation { - struct dxgkvmb_command_vgpu_to_host hdr; -diff --git a/drivers/hv/dxgkrnl/ioctl.c b/drivers/hv/dxgkrnl/ioctl.c -index 111111111111..222222222222 100644 ---- a/drivers/hv/dxgkrnl/ioctl.c -+++ b/drivers/hv/dxgkrnl/ioctl.c -@@ -1961,6 +1961,119 @@ dxgkio_destroy_allocation(struct dxgprocess *process, void *__user inargs) - return ret; - } - -+static int -+dxgkio_offer_allocations(struct dxgprocess *process, void *__user inargs) -+{ -+ int ret; -+ struct d3dkmt_offerallocations args; -+ struct dxgdevice *device = NULL; -+ struct dxgadapter *adapter = NULL; -+ -+ ret = copy_from_user(&args, inargs, sizeof(args)); -+ if (ret) { -+ DXG_ERR("failed to copy input args"); -+ ret = -EINVAL; -+ goto cleanup; -+ } -+ -+ if (args.allocation_count > D3DKMT_MAKERESIDENT_ALLOC_MAX || -+ args.allocation_count == 0) { -+ DXG_ERR("invalid number of allocations"); -+ ret = -EINVAL; -+ goto cleanup; -+ } -+ -+ if ((args.resources == NULL) == (args.allocations == NULL)) { -+ DXG_ERR("invalid pointer to resources/allocations"); -+ ret = -EINVAL; -+ goto cleanup; -+ } -+ -+ device = dxgprocess_device_by_handle(process, args.device); -+ if (device == NULL) { -+ ret = -EINVAL; -+ goto cleanup; -+ } -+ -+ adapter = device->adapter; -+ ret = dxgadapter_acquire_lock_shared(adapter); -+ if (ret < 0) { -+ adapter = NULL; -+ goto cleanup; -+ } -+ -+ ret = dxgvmb_send_offer_allocations(process, adapter, &args); -+ -+cleanup: -+ -+ if (adapter) -+ dxgadapter_release_lock_shared(adapter); -+ if (device) -+ kref_put(&device->device_kref, dxgdevice_release); -+ -+ DXG_TRACE("ioctl:%s %d", errorstr(ret), ret); -+ return ret; -+} -+ -+static int -+dxgkio_reclaim_allocations(struct dxgprocess *process, void *__user inargs) -+{ -+ int ret; -+ struct d3dkmt_reclaimallocations2 args; -+ struct dxgdevice *device = NULL; -+ struct dxgadapter *adapter = NULL; -+ struct d3dkmt_reclaimallocations2 * __user in_args = inargs; -+ -+ ret = copy_from_user(&args, inargs, sizeof(args)); -+ if (ret) { -+ DXG_ERR("failed to copy input args"); -+ ret = -EINVAL; -+ goto cleanup; -+ } -+ -+ if (args.allocation_count > D3DKMT_MAKERESIDENT_ALLOC_MAX || -+ args.allocation_count == 0) { -+ DXG_ERR("invalid number of allocations"); -+ ret = -EINVAL; -+ goto cleanup; -+ } -+ -+ if ((args.resources == NULL) == (args.allocations == NULL)) { -+ DXG_ERR("invalid pointer to resources/allocations"); -+ ret = -EINVAL; -+ goto cleanup; -+ } -+ -+ device = dxgprocess_device_by_object_handle(process, -+ HMGRENTRY_TYPE_DXGPAGINGQUEUE, -+ args.paging_queue); -+ if (device == NULL) { -+ ret = -EINVAL; -+ goto cleanup; -+ } -+ -+ adapter = device->adapter; -+ ret = dxgadapter_acquire_lock_shared(adapter); -+ if (ret < 0) { -+ adapter = NULL; -+ goto cleanup; -+ } -+ -+ ret = dxgvmb_send_reclaim_allocations(process, adapter, -+ device->handle, &args, -+ &in_args->paging_fence_value); -+ -+cleanup: -+ -+ if (adapter) -+ dxgadapter_release_lock_shared(adapter); -+ if (device) -+ kref_put(&device->device_kref, dxgdevice_release); -+ -+ DXG_TRACE("ioctl:%s %d", errorstr(ret), ret); -+ return ret; -+} -+ - static int - dxgkio_submit_command(struct dxgprocess *process, void *__user inargs) - { -@@ -4548,12 +4661,12 @@ static struct ioctl_desc ioctls[] = { - /* 0x24 */ {}, - /* 0x25 */ {dxgkio_lock2, LX_DXLOCK2}, - /* 0x26 */ {dxgkio_mark_device_as_error, LX_DXMARKDEVICEASERROR}, --/* 0x27 */ {}, -+/* 0x27 */ {dxgkio_offer_allocations, LX_DXOFFERALLOCATIONS}, - /* 0x28 */ {}, - /* 0x29 */ {}, - /* 0x2a */ {dxgkio_query_alloc_residency, LX_DXQUERYALLOCATIONRESIDENCY}, - /* 0x2b */ {}, --/* 0x2c */ {}, -+/* 0x2c */ {dxgkio_reclaim_allocations, LX_DXRECLAIMALLOCATIONS2}, - /* 0x2d */ {}, - /* 0x2e */ {dxgkio_set_allocation_priority, LX_DXSETALLOCATIONPRIORITY}, - /* 0x2f */ {}, -diff --git a/include/uapi/misc/d3dkmthk.h b/include/uapi/misc/d3dkmthk.h -index 111111111111..222222222222 100644 ---- a/include/uapi/misc/d3dkmthk.h -+++ b/include/uapi/misc/d3dkmthk.h -@@ -61,6 +61,7 @@ struct winluid { - #define D3DDDI_MAX_WRITTEN_PRIMARIES 16 - - #define D3DKMT_CREATEALLOCATION_MAX 1024 -+#define D3DKMT_MAKERESIDENT_ALLOC_MAX (1024 * 10) - #define D3DKMT_ADAPTERS_MAX 64 - #define D3DDDI_MAX_BROADCAST_CONTEXT 64 - #define D3DDDI_MAX_OBJECT_WAITED_ON 32 -@@ -1087,6 +1088,68 @@ struct d3dddi_updateallocproperty { - }; - }; - -+enum d3dkmt_offer_priority { -+ _D3DKMT_OFFER_PRIORITY_LOW = 1, -+ _D3DKMT_OFFER_PRIORITY_NORMAL = 2, -+ _D3DKMT_OFFER_PRIORITY_HIGH = 3, -+ _D3DKMT_OFFER_PRIORITY_AUTO = 4, -+}; -+ -+struct d3dkmt_offer_flags { -+ union { -+ struct { -+ __u32 offer_immediately:1; -+ __u32 allow_decommit:1; -+ __u32 reserved:30; -+ }; -+ __u32 value; -+ }; -+}; -+ -+struct d3dkmt_offerallocations { -+ struct d3dkmthandle device; -+ __u32 reserved; -+#ifdef __KERNEL__ -+ struct d3dkmthandle *resources; -+ const struct d3dkmthandle *allocations; -+#else -+ __u64 resources; -+ __u64 allocations; -+#endif -+ __u32 allocation_count; -+ enum d3dkmt_offer_priority priority; -+ struct d3dkmt_offer_flags flags; -+ __u32 reserved1; -+}; -+ -+enum d3dddi_reclaim_result { -+ _D3DDDI_RECLAIM_RESULT_OK = 0, -+ _D3DDDI_RECLAIM_RESULT_DISCARDED = 1, -+ _D3DDDI_RECLAIM_RESULT_NOT_COMMITTED = 2, -+}; -+ -+struct d3dkmt_reclaimallocations2 { -+ struct d3dkmthandle paging_queue; -+ __u32 allocation_count; -+#ifdef __KERNEL__ -+ struct d3dkmthandle *resources; -+ struct d3dkmthandle *allocations; -+#else -+ __u64 resources; -+ __u64 allocations; -+#endif -+ union { -+#ifdef __KERNEL__ -+ __u32 *discarded; -+ enum d3dddi_reclaim_result *results; -+#else -+ __u64 discarded; -+ __u64 results; -+#endif -+ }; -+ __u64 paging_fence_value; -+}; -+ - struct d3dkmt_changevideomemoryreservation { - __u64 process; - struct d3dkmthandle adapter; -@@ -1360,8 +1423,12 @@ struct d3dkmt_shareobjectwithhost { - _IOWR(0x47, 0x25, struct d3dkmt_lock2) - #define LX_DXMARKDEVICEASERROR \ - _IOWR(0x47, 0x26, struct d3dkmt_markdeviceaserror) -+#define LX_DXOFFERALLOCATIONS \ -+ _IOWR(0x47, 0x27, struct d3dkmt_offerallocations) - #define LX_DXQUERYALLOCATIONRESIDENCY \ - _IOWR(0x47, 0x2a, struct d3dkmt_queryallocationresidency) -+#define LX_DXRECLAIMALLOCATIONS2 \ -+ _IOWR(0x47, 0x2c, struct d3dkmt_reclaimallocations2) - #define LX_DXSETALLOCATIONPRIORITY \ - _IOWR(0x47, 0x2e, struct d3dkmt_setallocationpriority) - #define LX_DXSIGNALSYNCHRONIZATIONOBJECTFROMCPU \ --- -Armbian - diff --git a/patch/kernel/archive/wsl2-arm64-6.1/1693-drivers-hv-dxgkrnl-Ioctls-to-manage-scheduling-priority.patch b/patch/kernel/archive/wsl2-arm64-6.1/1693-drivers-hv-dxgkrnl-Ioctls-to-manage-scheduling-priority.patch deleted file mode 100644 index 4ff04c894bd6..000000000000 --- a/patch/kernel/archive/wsl2-arm64-6.1/1693-drivers-hv-dxgkrnl-Ioctls-to-manage-scheduling-priority.patch +++ /dev/null @@ -1,427 +0,0 @@ -From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001 -From: Iouri Tarassov -Date: Fri, 14 Jan 2022 17:57:41 -0800 -Subject: drivers: hv: dxgkrnl: Ioctls to manage scheduling priority - -Implement iocts to manage compute device scheduling priority: - - LX_DXGETCONTEXTINPROCESSSCHEDULINGPRIORITY - - LX_DXGETCONTEXTSCHEDULINGPRIORITY - - LX_DXSETCONTEXTINPROCESSSCHEDULINGPRIORITY - - LX_DXSETCONTEXTSCHEDULINGPRIORITY - -Each compute device execution context has an assigned scheduling -priority. It is used by the compute device scheduler on the host to -pick contexts for execution. There is a global priority and a -priority within a process. - -Signed-off-by: Iouri Tarassov -[kms: Forward port to v6.1] -Signed-off-by: Kelsey Steele ---- - drivers/hv/dxgkrnl/dxgkrnl.h | 9 + - drivers/hv/dxgkrnl/dxgvmbus.c | 67 +++- - drivers/hv/dxgkrnl/dxgvmbus.h | 19 + - drivers/hv/dxgkrnl/ioctl.c | 177 +++++++++- - include/uapi/misc/d3dkmthk.h | 28 ++ - 5 files changed, 294 insertions(+), 6 deletions(-) - -diff --git a/drivers/hv/dxgkrnl/dxgkrnl.h b/drivers/hv/dxgkrnl/dxgkrnl.h -index 111111111111..222222222222 100644 ---- a/drivers/hv/dxgkrnl/dxgkrnl.h -+++ b/drivers/hv/dxgkrnl/dxgkrnl.h -@@ -865,6 +865,15 @@ int dxgvmb_send_set_allocation_priority(struct dxgprocess *process, - int dxgvmb_send_get_allocation_priority(struct dxgprocess *process, - struct dxgadapter *adapter, - struct d3dkmt_getallocationpriority *a); -+int dxgvmb_send_set_context_sch_priority(struct dxgprocess *process, -+ struct dxgadapter *adapter, -+ struct d3dkmthandle context, -+ int priority, bool in_process); -+int dxgvmb_send_get_context_sch_priority(struct dxgprocess *process, -+ struct dxgadapter *adapter, -+ struct d3dkmthandle context, -+ int *priority, -+ bool in_process); - int dxgvmb_send_offer_allocations(struct dxgprocess *process, - struct dxgadapter *adapter, - struct d3dkmt_offerallocations *args); -diff --git a/drivers/hv/dxgkrnl/dxgvmbus.c b/drivers/hv/dxgkrnl/dxgvmbus.c -index 111111111111..222222222222 100644 ---- a/drivers/hv/dxgkrnl/dxgvmbus.c -+++ b/drivers/hv/dxgkrnl/dxgvmbus.c -@@ -2949,6 +2949,69 @@ int dxgvmb_send_get_allocation_priority(struct dxgprocess *process, - return ret; - } - -+int dxgvmb_send_set_context_sch_priority(struct dxgprocess *process, -+ struct dxgadapter *adapter, -+ struct d3dkmthandle context, -+ int priority, -+ bool in_process) -+{ -+ struct dxgkvmb_command_setcontextschedulingpriority2 *command; -+ int ret; -+ struct dxgvmbusmsg msg = {.hdr = NULL}; -+ -+ ret = init_message(&msg, adapter, process, sizeof(*command)); -+ if (ret) -+ goto cleanup; -+ command = (void *)msg.msg; -+ -+ command_vgpu_to_host_init2(&command->hdr, -+ DXGK_VMBCOMMAND_SETCONTEXTSCHEDULINGPRIORITY, -+ process->host_handle); -+ command->context = context; -+ command->priority = priority; -+ command->in_process = in_process; -+ ret = dxgvmb_send_sync_msg_ntstatus(msg.channel, msg.hdr, msg.size); -+cleanup: -+ free_message(&msg, process); -+ if (ret) -+ DXG_TRACE("err: %d", ret); -+ return ret; -+} -+ -+int dxgvmb_send_get_context_sch_priority(struct dxgprocess *process, -+ struct dxgadapter *adapter, -+ struct d3dkmthandle context, -+ int *priority, -+ bool in_process) -+{ -+ struct dxgkvmb_command_getcontextschedulingpriority *command; -+ struct dxgkvmb_command_getcontextschedulingpriority_return result = { }; -+ int ret; -+ struct dxgvmbusmsg msg = {.hdr = NULL}; -+ -+ ret = init_message(&msg, adapter, process, sizeof(*command)); -+ if (ret) -+ goto cleanup; -+ command = (void *)msg.msg; -+ -+ command_vgpu_to_host_init2(&command->hdr, -+ DXGK_VMBCOMMAND_GETCONTEXTSCHEDULINGPRIORITY, -+ process->host_handle); -+ command->context = context; -+ command->in_process = in_process; -+ ret = dxgvmb_send_sync_msg(msg.channel, msg.hdr, msg.size, -+ &result, sizeof(result)); -+ if (ret >= 0) { -+ ret = ntstatus2int(result.status); -+ *priority = result.priority; -+ } -+cleanup: -+ free_message(&msg, process); -+ if (ret) -+ DXG_TRACE("err: %d", ret); -+ return ret; -+} -+ - int dxgvmb_send_offer_allocations(struct dxgprocess *process, - struct dxgadapter *adapter, - struct d3dkmt_offerallocations *args) -@@ -2991,7 +3054,7 @@ int dxgvmb_send_offer_allocations(struct dxgprocess *process, - cleanup: - free_message(&msg, process); - if (ret) -- pr_debug("err: %s %d", __func__, ret); -+ DXG_TRACE("err: %d", ret); - return ret; - } - -@@ -3067,7 +3130,7 @@ int dxgvmb_send_reclaim_allocations(struct dxgprocess *process, - cleanup: - free_message((struct dxgvmbusmsg *)&msg, process); - if (ret) -- pr_debug("err: %s %d", __func__, ret); -+ DXG_TRACE("err: %d", ret); - return ret; - } - -diff --git a/drivers/hv/dxgkrnl/dxgvmbus.h b/drivers/hv/dxgkrnl/dxgvmbus.h -index 111111111111..222222222222 100644 ---- a/drivers/hv/dxgkrnl/dxgvmbus.h -+++ b/drivers/hv/dxgkrnl/dxgvmbus.h -@@ -331,6 +331,25 @@ struct dxgkvmb_command_getallocationpriority_return { - /* u32 priorities[allocation_count or 1]; */ - }; - -+/* Returns ntstatus */ -+struct dxgkvmb_command_setcontextschedulingpriority2 { -+ struct dxgkvmb_command_vgpu_to_host hdr; -+ struct d3dkmthandle context; -+ int priority; -+ bool in_process; -+}; -+ -+struct dxgkvmb_command_getcontextschedulingpriority { -+ struct dxgkvmb_command_vgpu_to_host hdr; -+ struct d3dkmthandle context; -+ bool in_process; -+}; -+ -+struct dxgkvmb_command_getcontextschedulingpriority_return { -+ struct ntstatus status; -+ int priority; -+}; -+ - struct dxgkvmb_command_createdevice { - struct dxgkvmb_command_vgpu_to_host hdr; - struct d3dkmt_createdeviceflags flags; -diff --git a/drivers/hv/dxgkrnl/ioctl.c b/drivers/hv/dxgkrnl/ioctl.c -index 111111111111..222222222222 100644 ---- a/drivers/hv/dxgkrnl/ioctl.c -+++ b/drivers/hv/dxgkrnl/ioctl.c -@@ -3660,6 +3660,171 @@ dxgkio_get_allocation_priority(struct dxgprocess *process, void *__user inargs) - return ret; - } - -+static int -+set_context_scheduling_priority(struct dxgprocess *process, -+ struct d3dkmthandle hcontext, -+ int priority, bool in_process) -+{ -+ int ret = 0; -+ struct dxgdevice *device = NULL; -+ struct dxgadapter *adapter = NULL; -+ -+ device = dxgprocess_device_by_object_handle(process, -+ HMGRENTRY_TYPE_DXGCONTEXT, -+ hcontext); -+ if (device == NULL) { -+ ret = -EINVAL; -+ goto cleanup; -+ } -+ adapter = device->adapter; -+ ret = dxgadapter_acquire_lock_shared(adapter); -+ if (ret < 0) { -+ adapter = NULL; -+ goto cleanup; -+ } -+ ret = dxgvmb_send_set_context_sch_priority(process, adapter, -+ hcontext, priority, -+ in_process); -+ if (ret < 0) -+ DXG_ERR("send_set_context_scheduling_priority failed"); -+cleanup: -+ if (adapter) -+ dxgadapter_release_lock_shared(adapter); -+ if (device) -+ kref_put(&device->device_kref, dxgdevice_release); -+ -+ return ret; -+} -+ -+static int -+dxgkio_set_context_scheduling_priority(struct dxgprocess *process, -+ void *__user inargs) -+{ -+ struct d3dkmt_setcontextschedulingpriority args; -+ int ret; -+ -+ ret = copy_from_user(&args, inargs, sizeof(args)); -+ if (ret) { -+ DXG_ERR("failed to copy input args"); -+ ret = -EINVAL; -+ goto cleanup; -+ } -+ -+ ret = set_context_scheduling_priority(process, args.context, -+ args.priority, false); -+cleanup: -+ DXG_TRACE("ioctl:%s %d", errorstr(ret), ret); -+ return ret; -+} -+ -+static int -+get_context_scheduling_priority(struct dxgprocess *process, -+ struct d3dkmthandle hcontext, -+ int __user *priority, -+ bool in_process) -+{ -+ int ret; -+ struct dxgdevice *device = NULL; -+ struct dxgadapter *adapter = NULL; -+ int pri = 0; -+ -+ device = dxgprocess_device_by_object_handle(process, -+ HMGRENTRY_TYPE_DXGCONTEXT, -+ hcontext); -+ if (device == NULL) { -+ ret = -EINVAL; -+ goto cleanup; -+ } -+ adapter = device->adapter; -+ ret = dxgadapter_acquire_lock_shared(adapter); -+ if (ret < 0) { -+ adapter = NULL; -+ goto cleanup; -+ } -+ ret = dxgvmb_send_get_context_sch_priority(process, adapter, -+ hcontext, &pri, in_process); -+ if (ret < 0) -+ goto cleanup; -+ ret = copy_to_user(priority, &pri, sizeof(pri)); -+ if (ret) { -+ DXG_ERR("failed to copy priority to user"); -+ ret = -EINVAL; -+ } -+ -+cleanup: -+ if (adapter) -+ dxgadapter_release_lock_shared(adapter); -+ if (device) -+ kref_put(&device->device_kref, dxgdevice_release); -+ -+ return ret; -+} -+ -+static int -+dxgkio_get_context_scheduling_priority(struct dxgprocess *process, -+ void *__user inargs) -+{ -+ struct d3dkmt_getcontextschedulingpriority args; -+ struct d3dkmt_getcontextschedulingpriority __user *input = inargs; -+ int ret; -+ -+ ret = copy_from_user(&args, inargs, sizeof(args)); -+ if (ret) { -+ DXG_ERR("failed to copy input args"); -+ ret = -EINVAL; -+ goto cleanup; -+ } -+ -+ ret = get_context_scheduling_priority(process, args.context, -+ &input->priority, false); -+cleanup: -+ DXG_TRACE("ioctl:%s %d", errorstr(ret), ret); -+ return ret; -+} -+ -+static int -+dxgkio_set_context_process_scheduling_priority(struct dxgprocess *process, -+ void *__user inargs) -+{ -+ struct d3dkmt_setcontextinprocessschedulingpriority args; -+ int ret; -+ -+ ret = copy_from_user(&args, inargs, sizeof(args)); -+ if (ret) { -+ DXG_ERR("failed to copy input args"); -+ ret = -EINVAL; -+ goto cleanup; -+ } -+ -+ ret = set_context_scheduling_priority(process, args.context, -+ args.priority, true); -+cleanup: -+ DXG_TRACE("ioctl:%s %d", errorstr(ret), ret); -+ return ret; -+} -+ -+static int -+dxgkio_get_context_process_scheduling_priority(struct dxgprocess *process, -+ void __user *inargs) -+{ -+ struct d3dkmt_getcontextinprocessschedulingpriority args; -+ int ret; -+ -+ ret = copy_from_user(&args, inargs, sizeof(args)); -+ if (ret) { -+ DXG_ERR("failed to copy input args"); -+ ret = -EINVAL; -+ goto cleanup; -+ } -+ -+ ret = get_context_scheduling_priority(process, args.context, -+ &((struct d3dkmt_getcontextinprocessschedulingpriority *) -+ inargs)->priority, true); -+cleanup: -+ DXG_TRACE("ioctl:%s %d", errorstr(ret), ret); -+ return ret; -+} -+ - static int - dxgkio_change_vidmem_reservation(struct dxgprocess *process, void *__user inargs) - { -@@ -4655,8 +4820,10 @@ static struct ioctl_desc ioctls[] = { - /* 0x1e */ {}, - /* 0x1f */ {dxgkio_flush_heap_transitions, LX_DXFLUSHHEAPTRANSITIONS}, - /* 0x20 */ {}, --/* 0x21 */ {}, --/* 0x22 */ {}, -+/* 0x21 */ {dxgkio_get_context_process_scheduling_priority, -+ LX_DXGETCONTEXTINPROCESSSCHEDULINGPRIORITY}, -+/* 0x22 */ {dxgkio_get_context_scheduling_priority, -+ LX_DXGETCONTEXTSCHEDULINGPRIORITY}, - /* 0x23 */ {}, - /* 0x24 */ {}, - /* 0x25 */ {dxgkio_lock2, LX_DXLOCK2}, -@@ -4669,8 +4836,10 @@ static struct ioctl_desc ioctls[] = { - /* 0x2c */ {dxgkio_reclaim_allocations, LX_DXRECLAIMALLOCATIONS2}, - /* 0x2d */ {}, - /* 0x2e */ {dxgkio_set_allocation_priority, LX_DXSETALLOCATIONPRIORITY}, --/* 0x2f */ {}, --/* 0x30 */ {}, -+/* 0x2f */ {dxgkio_set_context_process_scheduling_priority, -+ LX_DXSETCONTEXTINPROCESSSCHEDULINGPRIORITY}, -+/* 0x30 */ {dxgkio_set_context_scheduling_priority, -+ LX_DXSETCONTEXTSCHEDULINGPRIORITY}, - /* 0x31 */ {dxgkio_signal_sync_object_cpu, - LX_DXSIGNALSYNCHRONIZATIONOBJECTFROMCPU}, - /* 0x32 */ {dxgkio_signal_sync_object_gpu, -diff --git a/include/uapi/misc/d3dkmthk.h b/include/uapi/misc/d3dkmthk.h -index 111111111111..222222222222 100644 ---- a/include/uapi/misc/d3dkmthk.h -+++ b/include/uapi/misc/d3dkmthk.h -@@ -708,6 +708,26 @@ struct d3dkmt_submitcommandtohwqueue { - #endif - }; - -+struct d3dkmt_setcontextschedulingpriority { -+ struct d3dkmthandle context; -+ int priority; -+}; -+ -+struct d3dkmt_setcontextinprocessschedulingpriority { -+ struct d3dkmthandle context; -+ int priority; -+}; -+ -+struct d3dkmt_getcontextschedulingpriority { -+ struct d3dkmthandle context; -+ int priority; -+}; -+ -+struct d3dkmt_getcontextinprocessschedulingpriority { -+ struct d3dkmthandle context; -+ int priority; -+}; -+ - struct d3dkmt_setallocationpriority { - struct d3dkmthandle device; - struct d3dkmthandle resource; -@@ -1419,6 +1439,10 @@ struct d3dkmt_shareobjectwithhost { - _IOWR(0x47, 0x1d, struct d3dkmt_destroysynchronizationobject) - #define LX_DXFLUSHHEAPTRANSITIONS \ - _IOWR(0x47, 0x1f, struct d3dkmt_flushheaptransitions) -+#define LX_DXGETCONTEXTINPROCESSSCHEDULINGPRIORITY \ -+ _IOWR(0x47, 0x21, struct d3dkmt_getcontextinprocessschedulingpriority) -+#define LX_DXGETCONTEXTSCHEDULINGPRIORITY \ -+ _IOWR(0x47, 0x22, struct d3dkmt_getcontextschedulingpriority) - #define LX_DXLOCK2 \ - _IOWR(0x47, 0x25, struct d3dkmt_lock2) - #define LX_DXMARKDEVICEASERROR \ -@@ -1431,6 +1455,10 @@ struct d3dkmt_shareobjectwithhost { - _IOWR(0x47, 0x2c, struct d3dkmt_reclaimallocations2) - #define LX_DXSETALLOCATIONPRIORITY \ - _IOWR(0x47, 0x2e, struct d3dkmt_setallocationpriority) -+#define LX_DXSETCONTEXTINPROCESSSCHEDULINGPRIORITY \ -+ _IOWR(0x47, 0x2f, struct d3dkmt_setcontextinprocessschedulingpriority) -+#define LX_DXSETCONTEXTSCHEDULINGPRIORITY \ -+ _IOWR(0x47, 0x30, struct d3dkmt_setcontextschedulingpriority) - #define LX_DXSIGNALSYNCHRONIZATIONOBJECTFROMCPU \ - _IOWR(0x47, 0x31, struct d3dkmt_signalsynchronizationobjectfromcpu) - #define LX_DXSIGNALSYNCHRONIZATIONOBJECTFROMGPU \ --- -Armbian - diff --git a/patch/kernel/archive/wsl2-arm64-6.1/1694-drivers-hv-dxgkrnl-Manage-residency-of-allocations.patch b/patch/kernel/archive/wsl2-arm64-6.1/1694-drivers-hv-dxgkrnl-Manage-residency-of-allocations.patch deleted file mode 100644 index f991dfd2bdca..000000000000 --- a/patch/kernel/archive/wsl2-arm64-6.1/1694-drivers-hv-dxgkrnl-Manage-residency-of-allocations.patch +++ /dev/null @@ -1,447 +0,0 @@ -From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001 -From: Iouri Tarassov -Date: Fri, 14 Jan 2022 17:33:52 -0800 -Subject: drivers: hv: dxgkrnl: Manage residency of allocations - -Implement ioctls to manage residency of compute device allocations: - - LX_DXMAKERESIDENT, - - LX_DXEVICT. - -An allocation is "resident" when the compute devoce is setup to -access it. It means that the allocation is in the local device -memory or in non-pageable system memory. - -The current design does not support on demand compute device page -faulting. An allocation must be resident before the compute device -is allowed to access it. - -The LX_DXMAKERESIDENT ioctl instructs the video memory manager to -make the given allocations resident. The operation is submitted to -a paging queue (dxgpagingqueue). When the ioctl returns a "pending" -status, a monitored fence sync object can be used to synchronize -with the completion of the operation. - -The LX_DXEVICT ioctl istructs the video memory manager to evict -the given allocations from device accessible memory. - -Signed-off-by: Iouri Tarassov -[kms: Forward port to v6.1] -Signed-off-by: Kelsey Steele ---- - drivers/hv/dxgkrnl/dxgkrnl.h | 4 + - drivers/hv/dxgkrnl/dxgvmbus.c | 98 +++++++ - drivers/hv/dxgkrnl/dxgvmbus.h | 27 ++ - drivers/hv/dxgkrnl/ioctl.c | 141 +++++++++- - include/uapi/misc/d3dkmthk.h | 54 ++++ - 5 files changed, 322 insertions(+), 2 deletions(-) - -diff --git a/drivers/hv/dxgkrnl/dxgkrnl.h b/drivers/hv/dxgkrnl/dxgkrnl.h -index 111111111111..222222222222 100644 ---- a/drivers/hv/dxgkrnl/dxgkrnl.h -+++ b/drivers/hv/dxgkrnl/dxgkrnl.h -@@ -810,6 +810,10 @@ int dxgvmb_send_create_allocation(struct dxgprocess *pr, struct dxgdevice *dev, - int dxgvmb_send_destroy_allocation(struct dxgprocess *pr, struct dxgdevice *dev, - struct d3dkmt_destroyallocation2 *args, - struct d3dkmthandle *alloc_handles); -+int dxgvmb_send_make_resident(struct dxgprocess *pr, struct dxgadapter *adapter, -+ struct d3dddi_makeresident *args); -+int dxgvmb_send_evict(struct dxgprocess *pr, struct dxgadapter *adapter, -+ struct d3dkmt_evict *args); - int dxgvmb_send_submit_command(struct dxgprocess *pr, - struct dxgadapter *adapter, - struct d3dkmt_submitcommand *args); -diff --git a/drivers/hv/dxgkrnl/dxgvmbus.c b/drivers/hv/dxgkrnl/dxgvmbus.c -index 111111111111..222222222222 100644 ---- a/drivers/hv/dxgkrnl/dxgvmbus.c -+++ b/drivers/hv/dxgkrnl/dxgvmbus.c -@@ -2279,6 +2279,104 @@ int dxgvmb_send_get_stdalloc_data(struct dxgdevice *device, - return ret; - } - -+int dxgvmb_send_make_resident(struct dxgprocess *process, -+ struct dxgadapter *adapter, -+ struct d3dddi_makeresident *args) -+{ -+ int ret; -+ u32 cmd_size; -+ struct dxgkvmb_command_makeresident_return result = { }; -+ struct dxgkvmb_command_makeresident *command = NULL; -+ struct dxgvmbusmsg msg = {.hdr = NULL}; -+ -+ cmd_size = (args->alloc_count - 1) * sizeof(struct d3dkmthandle) + -+ sizeof(struct dxgkvmb_command_makeresident); -+ -+ ret = init_message(&msg, adapter, process, cmd_size); -+ if (ret) -+ goto cleanup; -+ command = (void *)msg.msg; -+ -+ ret = copy_from_user(command->allocations, args->allocation_list, -+ args->alloc_count * -+ sizeof(struct d3dkmthandle)); -+ if (ret) { -+ DXG_ERR("failed to copy alloc handles"); -+ ret = -EINVAL; -+ goto cleanup; -+ } -+ command_vgpu_to_host_init2(&command->hdr, -+ DXGK_VMBCOMMAND_MAKERESIDENT, -+ process->host_handle); -+ command->alloc_count = args->alloc_count; -+ command->paging_queue = args->paging_queue; -+ command->flags = args->flags; -+ -+ ret = dxgvmb_send_sync_msg(msg.channel, msg.hdr, msg.size, -+ &result, sizeof(result)); -+ if (ret < 0) { -+ DXG_ERR("send_make_resident failed %x", ret); -+ goto cleanup; -+ } -+ -+ args->paging_fence_value = result.paging_fence_value; -+ args->num_bytes_to_trim = result.num_bytes_to_trim; -+ ret = ntstatus2int(result.status); -+ -+cleanup: -+ -+ free_message(&msg, process); -+ if (ret) -+ DXG_TRACE("err: %d", ret); -+ return ret; -+} -+ -+int dxgvmb_send_evict(struct dxgprocess *process, -+ struct dxgadapter *adapter, -+ struct d3dkmt_evict *args) -+{ -+ int ret; -+ u32 cmd_size; -+ struct dxgkvmb_command_evict_return result = { }; -+ struct dxgkvmb_command_evict *command = NULL; -+ struct dxgvmbusmsg msg = {.hdr = NULL}; -+ -+ cmd_size = (args->alloc_count - 1) * sizeof(struct d3dkmthandle) + -+ sizeof(struct dxgkvmb_command_evict); -+ ret = init_message(&msg, adapter, process, cmd_size); -+ if (ret) -+ goto cleanup; -+ command = (void *)msg.msg; -+ ret = copy_from_user(command->allocations, args->allocations, -+ args->alloc_count * -+ sizeof(struct d3dkmthandle)); -+ if (ret) { -+ DXG_ERR("failed to copy alloc handles"); -+ ret = -EINVAL; -+ goto cleanup; -+ } -+ command_vgpu_to_host_init2(&command->hdr, -+ DXGK_VMBCOMMAND_EVICT, process->host_handle); -+ command->alloc_count = args->alloc_count; -+ command->device = args->device; -+ command->flags = args->flags; -+ -+ ret = dxgvmb_send_sync_msg(msg.channel, msg.hdr, msg.size, -+ &result, sizeof(result)); -+ if (ret < 0) { -+ DXG_ERR("send_evict failed %x", ret); -+ goto cleanup; -+ } -+ args->num_bytes_to_trim = result.num_bytes_to_trim; -+ -+cleanup: -+ -+ free_message(&msg, process); -+ if (ret) -+ DXG_TRACE("err: %d", ret); -+ return ret; -+} -+ - int dxgvmb_send_submit_command(struct dxgprocess *process, - struct dxgadapter *adapter, - struct d3dkmt_submitcommand *args) -diff --git a/drivers/hv/dxgkrnl/dxgvmbus.h b/drivers/hv/dxgkrnl/dxgvmbus.h -index 111111111111..222222222222 100644 ---- a/drivers/hv/dxgkrnl/dxgvmbus.h -+++ b/drivers/hv/dxgkrnl/dxgvmbus.h -@@ -372,6 +372,33 @@ struct dxgkvmb_command_flushdevice { - enum dxgdevice_flushschedulerreason reason; - }; - -+struct dxgkvmb_command_makeresident { -+ struct dxgkvmb_command_vgpu_to_host hdr; -+ struct d3dkmthandle device; -+ struct d3dkmthandle paging_queue; -+ struct d3dddi_makeresident_flags flags; -+ u32 alloc_count; -+ struct d3dkmthandle allocations[1]; -+}; -+ -+struct dxgkvmb_command_makeresident_return { -+ u64 paging_fence_value; -+ u64 num_bytes_to_trim; -+ struct ntstatus status; -+}; -+ -+struct dxgkvmb_command_evict { -+ struct dxgkvmb_command_vgpu_to_host hdr; -+ struct d3dkmthandle device; -+ struct d3dddi_evict_flags flags; -+ u32 alloc_count; -+ struct d3dkmthandle allocations[1]; -+}; -+ -+struct dxgkvmb_command_evict_return { -+ u64 num_bytes_to_trim; -+}; -+ - struct dxgkvmb_command_submitcommand { - struct dxgkvmb_command_vgpu_to_host hdr; - struct d3dkmt_submitcommand args; -diff --git a/drivers/hv/dxgkrnl/ioctl.c b/drivers/hv/dxgkrnl/ioctl.c -index 111111111111..222222222222 100644 ---- a/drivers/hv/dxgkrnl/ioctl.c -+++ b/drivers/hv/dxgkrnl/ioctl.c -@@ -1961,6 +1961,143 @@ dxgkio_destroy_allocation(struct dxgprocess *process, void *__user inargs) - return ret; - } - -+static int -+dxgkio_make_resident(struct dxgprocess *process, void *__user inargs) -+{ -+ int ret, ret2; -+ struct d3dddi_makeresident args; -+ struct d3dddi_makeresident *input = inargs; -+ struct dxgdevice *device = NULL; -+ struct dxgadapter *adapter = NULL; -+ -+ ret = copy_from_user(&args, inargs, sizeof(args)); -+ if (ret) { -+ DXG_ERR("failed to copy input args"); -+ ret = -EINVAL; -+ goto cleanup; -+ } -+ -+ if (args.alloc_count > D3DKMT_MAKERESIDENT_ALLOC_MAX || -+ args.alloc_count == 0) { -+ DXG_ERR("invalid number of allocations"); -+ ret = -EINVAL; -+ goto cleanup; -+ } -+ if (args.paging_queue.v == 0) { -+ DXG_ERR("paging queue is missing"); -+ ret = -EINVAL; -+ goto cleanup; -+ } -+ -+ device = dxgprocess_device_by_object_handle(process, -+ HMGRENTRY_TYPE_DXGPAGINGQUEUE, -+ args.paging_queue); -+ if (device == NULL) { -+ ret = -EINVAL; -+ goto cleanup; -+ } -+ -+ adapter = device->adapter; -+ ret = dxgadapter_acquire_lock_shared(adapter); -+ if (ret < 0) { -+ adapter = NULL; -+ goto cleanup; -+ } -+ -+ ret = dxgvmb_send_make_resident(process, adapter, &args); -+ if (ret < 0) -+ goto cleanup; -+ /* STATUS_PENING is a success code > 0. It is returned to user mode */ -+ if (!(ret == STATUS_PENDING || ret == 0)) { -+ DXG_ERR("Unexpected error %x", ret); -+ goto cleanup; -+ } -+ -+ ret2 = copy_to_user(&input->paging_fence_value, -+ &args.paging_fence_value, sizeof(u64)); -+ if (ret2) { -+ DXG_ERR("failed to copy paging fence"); -+ ret = -EINVAL; -+ goto cleanup; -+ } -+ -+ ret2 = copy_to_user(&input->num_bytes_to_trim, -+ &args.num_bytes_to_trim, sizeof(u64)); -+ if (ret2) { -+ DXG_ERR("failed to copy bytes to trim"); -+ ret = -EINVAL; -+ goto cleanup; -+ } -+ -+cleanup: -+ -+ if (adapter) -+ dxgadapter_release_lock_shared(adapter); -+ if (device) -+ kref_put(&device->device_kref, dxgdevice_release); -+ -+ DXG_TRACE("ioctl:%s %d", errorstr(ret), ret); -+ -+ return ret; -+} -+ -+static int -+dxgkio_evict(struct dxgprocess *process, void *__user inargs) -+{ -+ int ret; -+ struct d3dkmt_evict args; -+ struct d3dkmt_evict *input = inargs; -+ struct dxgdevice *device = NULL; -+ struct dxgadapter *adapter = NULL; -+ -+ ret = copy_from_user(&args, inargs, sizeof(args)); -+ if (ret) { -+ DXG_ERR("failed to copy input args"); -+ ret = -EINVAL; -+ goto cleanup; -+ } -+ -+ if (args.alloc_count > D3DKMT_MAKERESIDENT_ALLOC_MAX || -+ args.alloc_count == 0) { -+ DXG_ERR("invalid number of allocations"); -+ ret = -EINVAL; -+ goto cleanup; -+ } -+ -+ device = dxgprocess_device_by_handle(process, args.device); -+ if (device == NULL) { -+ ret = -EINVAL; -+ goto cleanup; -+ } -+ -+ adapter = device->adapter; -+ ret = dxgadapter_acquire_lock_shared(adapter); -+ if (ret < 0) { -+ adapter = NULL; -+ goto cleanup; -+ } -+ -+ ret = dxgvmb_send_evict(process, adapter, &args); -+ if (ret < 0) -+ goto cleanup; -+ -+ ret = copy_to_user(&input->num_bytes_to_trim, -+ &args.num_bytes_to_trim, sizeof(u64)); -+ if (ret) { -+ DXG_ERR("failed to copy bytes to trim to user"); -+ ret = -EINVAL; -+ } -+cleanup: -+ -+ if (adapter) -+ dxgadapter_release_lock_shared(adapter); -+ if (device) -+ kref_put(&device->device_kref, dxgdevice_release); -+ -+ DXG_TRACE("ioctl:%s %d", errorstr(ret), ret); -+ return ret; -+} -+ - static int - dxgkio_offer_allocations(struct dxgprocess *process, void *__user inargs) - { -@@ -4797,7 +4934,7 @@ static struct ioctl_desc ioctls[] = { - /* 0x08 */ {}, - /* 0x09 */ {dxgkio_query_adapter_info, LX_DXQUERYADAPTERINFO}, - /* 0x0a */ {dxgkio_query_vidmem_info, LX_DXQUERYVIDEOMEMORYINFO}, --/* 0x0b */ {}, -+/* 0x0b */ {dxgkio_make_resident, LX_DXMAKERESIDENT}, - /* 0x0c */ {}, - /* 0x0d */ {dxgkio_escape, LX_DXESCAPE}, - /* 0x0e */ {dxgkio_get_device_state, LX_DXGETDEVICESTATE}, -@@ -4817,7 +4954,7 @@ static struct ioctl_desc ioctls[] = { - /* 0x1b */ {dxgkio_destroy_hwqueue, LX_DXDESTROYHWQUEUE}, - /* 0x1c */ {dxgkio_destroy_paging_queue, LX_DXDESTROYPAGINGQUEUE}, - /* 0x1d */ {dxgkio_destroy_sync_object, LX_DXDESTROYSYNCHRONIZATIONOBJECT}, --/* 0x1e */ {}, -+/* 0x1e */ {dxgkio_evict, LX_DXEVICT}, - /* 0x1f */ {dxgkio_flush_heap_transitions, LX_DXFLUSHHEAPTRANSITIONS}, - /* 0x20 */ {}, - /* 0x21 */ {dxgkio_get_context_process_scheduling_priority, -diff --git a/include/uapi/misc/d3dkmthk.h b/include/uapi/misc/d3dkmthk.h -index 111111111111..222222222222 100644 ---- a/include/uapi/misc/d3dkmthk.h -+++ b/include/uapi/misc/d3dkmthk.h -@@ -962,6 +962,56 @@ struct d3dkmt_destroyallocation2 { - struct d3dddicb_destroyallocation2flags flags; - }; - -+struct d3dddi_makeresident_flags { -+ union { -+ struct { -+ __u32 cant_trim_further:1; -+ __u32 must_succeed:1; -+ __u32 reserved:30; -+ }; -+ __u32 value; -+ }; -+}; -+ -+struct d3dddi_makeresident { -+ struct d3dkmthandle paging_queue; -+ __u32 alloc_count; -+#ifdef __KERNEL__ -+ const struct d3dkmthandle *allocation_list; -+ const __u32 *priority_list; -+#else -+ __u64 allocation_list; -+ __u64 priority_list; -+#endif -+ struct d3dddi_makeresident_flags flags; -+ __u64 paging_fence_value; -+ __u64 num_bytes_to_trim; -+}; -+ -+struct d3dddi_evict_flags { -+ union { -+ struct { -+ __u32 evict_only_if_necessary:1; -+ __u32 not_written_to:1; -+ __u32 reserved:30; -+ }; -+ __u32 value; -+ }; -+}; -+ -+struct d3dkmt_evict { -+ struct d3dkmthandle device; -+ __u32 alloc_count; -+#ifdef __KERNEL__ -+ const struct d3dkmthandle *allocations; -+#else -+ __u64 allocations; -+#endif -+ struct d3dddi_evict_flags flags; -+ __u32 reserved; -+ __u64 num_bytes_to_trim; -+}; -+ - enum d3dkmt_memory_segment_group { - _D3DKMT_MEMORY_SEGMENT_GROUP_LOCAL = 0, - _D3DKMT_MEMORY_SEGMENT_GROUP_NON_LOCAL = 1 -@@ -1407,6 +1457,8 @@ struct d3dkmt_shareobjectwithhost { - _IOWR(0x47, 0x09, struct d3dkmt_queryadapterinfo) - #define LX_DXQUERYVIDEOMEMORYINFO \ - _IOWR(0x47, 0x0a, struct d3dkmt_queryvideomemoryinfo) -+#define LX_DXMAKERESIDENT \ -+ _IOWR(0x47, 0x0b, struct d3dddi_makeresident) - #define LX_DXESCAPE \ - _IOWR(0x47, 0x0d, struct d3dkmt_escape) - #define LX_DXGETDEVICESTATE \ -@@ -1437,6 +1489,8 @@ struct d3dkmt_shareobjectwithhost { - _IOWR(0x47, 0x19, struct d3dkmt_destroydevice) - #define LX_DXDESTROYSYNCHRONIZATIONOBJECT \ - _IOWR(0x47, 0x1d, struct d3dkmt_destroysynchronizationobject) -+#define LX_DXEVICT \ -+ _IOWR(0x47, 0x1e, struct d3dkmt_evict) - #define LX_DXFLUSHHEAPTRANSITIONS \ - _IOWR(0x47, 0x1f, struct d3dkmt_flushheaptransitions) - #define LX_DXGETCONTEXTINPROCESSSCHEDULINGPRIORITY \ --- -Armbian - diff --git a/patch/kernel/archive/wsl2-arm64-6.1/1695-drivers-hv-dxgkrnl-Manage-compute-device-virtual-addresses.patch b/patch/kernel/archive/wsl2-arm64-6.1/1695-drivers-hv-dxgkrnl-Manage-compute-device-virtual-addresses.patch deleted file mode 100644 index 66ab6b6a7527..000000000000 --- a/patch/kernel/archive/wsl2-arm64-6.1/1695-drivers-hv-dxgkrnl-Manage-compute-device-virtual-addresses.patch +++ /dev/null @@ -1,703 +0,0 @@ -From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001 -From: Iouri Tarassov -Date: Fri, 14 Jan 2022 17:13:04 -0800 -Subject: drivers: hv: dxgkrnl: Manage compute device virtual addresses - -Implement ioctls to manage compute device virtual addresses (VA): - - LX_DXRESERVEGPUVIRTUALADDRESS, - - LX_DXFREEGPUVIRTUALADDRESS, - - LX_DXMAPGPUVIRTUALADDRESS, - - LX_DXUPDATEGPUVIRTUALADDRESS. - -Compute devices access memory by using virtual addressses. -Each process has a dedicated VA space. The video memory manager -on the host is responsible with updating device page tables -before submitting a DMA buffer for execution. - -The LX_DXRESERVEGPUVIRTUALADDRESS ioctl reserves a portion of the -process compute device VA space. - -The LX_DXMAPGPUVIRTUALADDRESS ioctl reserves a portion of the process -compute device VA space and maps it to the given compute device -allocation. - -The LX_DXFREEGPUVIRTUALADDRESS frees the previously reserved portion -of the compute device VA space. - -The LX_DXUPDATEGPUVIRTUALADDRESS ioctl adds operations to modify the -compute device VA space to a compute device execution context. It -allows the operations to be queued and synchronized with execution -of other compute device DMA buffers.. - -Signed-off-by: Iouri Tarassov -[kms: Forward port to v6.1] -Signed-off-by: Kelsey Steele ---- - drivers/hv/dxgkrnl/dxgkrnl.h | 10 + - drivers/hv/dxgkrnl/dxgvmbus.c | 150 ++++++ - drivers/hv/dxgkrnl/dxgvmbus.h | 38 ++ - drivers/hv/dxgkrnl/ioctl.c | 228 +++++++++- - include/uapi/misc/d3dkmthk.h | 126 +++++ - 5 files changed, 548 insertions(+), 4 deletions(-) - -diff --git a/drivers/hv/dxgkrnl/dxgkrnl.h b/drivers/hv/dxgkrnl/dxgkrnl.h -index 111111111111..222222222222 100644 ---- a/drivers/hv/dxgkrnl/dxgkrnl.h -+++ b/drivers/hv/dxgkrnl/dxgkrnl.h -@@ -817,6 +817,16 @@ int dxgvmb_send_evict(struct dxgprocess *pr, struct dxgadapter *adapter, - int dxgvmb_send_submit_command(struct dxgprocess *pr, - struct dxgadapter *adapter, - struct d3dkmt_submitcommand *args); -+int dxgvmb_send_map_gpu_va(struct dxgprocess *pr, struct d3dkmthandle h, -+ struct dxgadapter *adapter, -+ struct d3dddi_mapgpuvirtualaddress *args); -+int dxgvmb_send_reserve_gpu_va(struct dxgprocess *pr, -+ struct dxgadapter *adapter, -+ struct d3dddi_reservegpuvirtualaddress *args); -+int dxgvmb_send_free_gpu_va(struct dxgprocess *pr, struct dxgadapter *adapter, -+ struct d3dkmt_freegpuvirtualaddress *args); -+int dxgvmb_send_update_gpu_va(struct dxgprocess *pr, struct dxgadapter *adapter, -+ struct d3dkmt_updategpuvirtualaddress *args); - int dxgvmb_send_create_sync_object(struct dxgprocess *pr, - struct dxgadapter *adapter, - struct d3dkmt_createsynchronizationobject2 -diff --git a/drivers/hv/dxgkrnl/dxgvmbus.c b/drivers/hv/dxgkrnl/dxgvmbus.c -index 111111111111..222222222222 100644 ---- a/drivers/hv/dxgkrnl/dxgvmbus.c -+++ b/drivers/hv/dxgkrnl/dxgvmbus.c -@@ -2432,6 +2432,156 @@ int dxgvmb_send_submit_command(struct dxgprocess *process, - return ret; - } - -+int dxgvmb_send_map_gpu_va(struct dxgprocess *process, -+ struct d3dkmthandle device, -+ struct dxgadapter *adapter, -+ struct d3dddi_mapgpuvirtualaddress *args) -+{ -+ struct dxgkvmb_command_mapgpuvirtualaddress *command; -+ struct dxgkvmb_command_mapgpuvirtualaddress_return result; -+ int ret; -+ struct dxgvmbusmsg msg = {.hdr = NULL}; -+ -+ ret = init_message(&msg, adapter, process, sizeof(*command)); -+ if (ret) -+ goto cleanup; -+ command = (void *)msg.msg; -+ -+ command_vgpu_to_host_init2(&command->hdr, -+ DXGK_VMBCOMMAND_MAPGPUVIRTUALADDRESS, -+ process->host_handle); -+ command->args = *args; -+ command->device = device; -+ -+ ret = dxgvmb_send_sync_msg(msg.channel, msg.hdr, msg.size, &result, -+ sizeof(result)); -+ if (ret < 0) -+ goto cleanup; -+ args->virtual_address = result.virtual_address; -+ args->paging_fence_value = result.paging_fence_value; -+ ret = ntstatus2int(result.status); -+ -+cleanup: -+ -+ free_message(&msg, process); -+ if (ret) -+ DXG_TRACE("err: %d", ret); -+ return ret; -+} -+ -+int dxgvmb_send_reserve_gpu_va(struct dxgprocess *process, -+ struct dxgadapter *adapter, -+ struct d3dddi_reservegpuvirtualaddress *args) -+{ -+ struct dxgkvmb_command_reservegpuvirtualaddress *command; -+ struct dxgkvmb_command_reservegpuvirtualaddress_return result; -+ int ret; -+ struct dxgvmbusmsg msg = {.hdr = NULL}; -+ -+ ret = init_message(&msg, adapter, process, sizeof(*command)); -+ if (ret) -+ goto cleanup; -+ command = (void *)msg.msg; -+ -+ command_vgpu_to_host_init2(&command->hdr, -+ DXGK_VMBCOMMAND_RESERVEGPUVIRTUALADDRESS, -+ process->host_handle); -+ command->args = *args; -+ -+ ret = dxgvmb_send_sync_msg(msg.channel, msg.hdr, msg.size, &result, -+ sizeof(result)); -+ args->virtual_address = result.virtual_address; -+ -+cleanup: -+ free_message(&msg, process); -+ if (ret) -+ DXG_TRACE("err: %d", ret); -+ return ret; -+} -+ -+int dxgvmb_send_free_gpu_va(struct dxgprocess *process, -+ struct dxgadapter *adapter, -+ struct d3dkmt_freegpuvirtualaddress *args) -+{ -+ struct dxgkvmb_command_freegpuvirtualaddress *command; -+ int ret; -+ struct dxgvmbusmsg msg = {.hdr = NULL}; -+ -+ ret = init_message(&msg, adapter, process, sizeof(*command)); -+ if (ret) -+ goto cleanup; -+ command = (void *)msg.msg; -+ -+ command_vgpu_to_host_init2(&command->hdr, -+ DXGK_VMBCOMMAND_FREEGPUVIRTUALADDRESS, -+ process->host_handle); -+ command->args = *args; -+ -+ ret = dxgvmb_send_sync_msg_ntstatus(msg.channel, msg.hdr, msg.size); -+ -+cleanup: -+ free_message(&msg, process); -+ if (ret) -+ DXG_TRACE("err: %d", ret); -+ return ret; -+} -+ -+int dxgvmb_send_update_gpu_va(struct dxgprocess *process, -+ struct dxgadapter *adapter, -+ struct d3dkmt_updategpuvirtualaddress *args) -+{ -+ struct dxgkvmb_command_updategpuvirtualaddress *command; -+ u32 cmd_size; -+ u32 op_size; -+ int ret; -+ struct dxgvmbusmsg msg = {.hdr = NULL}; -+ -+ if (args->num_operations == 0 || -+ (DXG_MAX_VM_BUS_PACKET_SIZE / -+ sizeof(struct d3dddi_updategpuvirtualaddress_operation)) < -+ args->num_operations) { -+ ret = -EINVAL; -+ DXG_ERR("Invalid number of operations: %d", -+ args->num_operations); -+ goto cleanup; -+ } -+ -+ op_size = args->num_operations * -+ sizeof(struct d3dddi_updategpuvirtualaddress_operation); -+ cmd_size = sizeof(struct dxgkvmb_command_updategpuvirtualaddress) + -+ op_size - sizeof(args->operations[0]); -+ -+ ret = init_message(&msg, adapter, process, cmd_size); -+ if (ret) -+ goto cleanup; -+ command = (void *)msg.msg; -+ -+ command_vgpu_to_host_init2(&command->hdr, -+ DXGK_VMBCOMMAND_UPDATEGPUVIRTUALADDRESS, -+ process->host_handle); -+ command->fence_value = args->fence_value; -+ command->device = args->device; -+ command->context = args->context; -+ command->fence_object = args->fence_object; -+ command->num_operations = args->num_operations; -+ command->flags = args->flags.value; -+ ret = copy_from_user(command->operations, args->operations, -+ op_size); -+ if (ret) { -+ DXG_ERR("failed to copy operations"); -+ ret = -EINVAL; -+ goto cleanup; -+ } -+ -+ ret = dxgvmb_send_sync_msg_ntstatus(msg.channel, msg.hdr, msg.size); -+ -+cleanup: -+ free_message(&msg, process); -+ if (ret) -+ DXG_TRACE("err: %d", ret); -+ return ret; -+} -+ - static void set_result(struct d3dkmt_createsynchronizationobject2 *args, - u64 fence_gpu_va, u8 *va) - { -diff --git a/drivers/hv/dxgkrnl/dxgvmbus.h b/drivers/hv/dxgkrnl/dxgvmbus.h -index 111111111111..222222222222 100644 ---- a/drivers/hv/dxgkrnl/dxgvmbus.h -+++ b/drivers/hv/dxgkrnl/dxgvmbus.h -@@ -418,6 +418,44 @@ struct dxgkvmb_command_flushheaptransitions { - struct dxgkvmb_command_vgpu_to_host hdr; - }; - -+struct dxgkvmb_command_freegpuvirtualaddress { -+ struct dxgkvmb_command_vgpu_to_host hdr; -+ struct d3dkmt_freegpuvirtualaddress args; -+}; -+ -+struct dxgkvmb_command_mapgpuvirtualaddress { -+ struct dxgkvmb_command_vgpu_to_host hdr; -+ struct d3dddi_mapgpuvirtualaddress args; -+ struct d3dkmthandle device; -+}; -+ -+struct dxgkvmb_command_mapgpuvirtualaddress_return { -+ u64 virtual_address; -+ u64 paging_fence_value; -+ struct ntstatus status; -+}; -+ -+struct dxgkvmb_command_reservegpuvirtualaddress { -+ struct dxgkvmb_command_vgpu_to_host hdr; -+ struct d3dddi_reservegpuvirtualaddress args; -+}; -+ -+struct dxgkvmb_command_reservegpuvirtualaddress_return { -+ u64 virtual_address; -+ u64 paging_fence_value; -+}; -+ -+struct dxgkvmb_command_updategpuvirtualaddress { -+ struct dxgkvmb_command_vgpu_to_host hdr; -+ u64 fence_value; -+ struct d3dkmthandle device; -+ struct d3dkmthandle context; -+ struct d3dkmthandle fence_object; -+ u32 num_operations; -+ u32 flags; -+ struct d3dddi_updategpuvirtualaddress_operation operations[1]; -+}; -+ - struct dxgkvmb_command_queryclockcalibration { - struct dxgkvmb_command_vgpu_to_host hdr; - struct d3dkmt_queryclockcalibration args; -diff --git a/drivers/hv/dxgkrnl/ioctl.c b/drivers/hv/dxgkrnl/ioctl.c -index 111111111111..222222222222 100644 ---- a/drivers/hv/dxgkrnl/ioctl.c -+++ b/drivers/hv/dxgkrnl/ioctl.c -@@ -2492,6 +2492,226 @@ dxgkio_submit_wait_to_hwqueue(struct dxgprocess *process, void *__user inargs) - return ret; - } - -+static int -+dxgkio_map_gpu_va(struct dxgprocess *process, void *__user inargs) -+{ -+ int ret, ret2; -+ struct d3dddi_mapgpuvirtualaddress args; -+ struct d3dddi_mapgpuvirtualaddress *input = inargs; -+ struct dxgdevice *device = NULL; -+ struct dxgadapter *adapter = NULL; -+ -+ ret = copy_from_user(&args, inargs, sizeof(args)); -+ if (ret) { -+ DXG_ERR("failed to copy input args"); -+ ret = -EINVAL; -+ goto cleanup; -+ } -+ -+ device = dxgprocess_device_by_object_handle(process, -+ HMGRENTRY_TYPE_DXGPAGINGQUEUE, -+ args.paging_queue); -+ if (device == NULL) { -+ ret = -EINVAL; -+ goto cleanup; -+ } -+ -+ adapter = device->adapter; -+ ret = dxgadapter_acquire_lock_shared(adapter); -+ if (ret < 0) { -+ adapter = NULL; -+ goto cleanup; -+ } -+ -+ ret = dxgvmb_send_map_gpu_va(process, zerohandle, adapter, &args); -+ if (ret < 0) -+ goto cleanup; -+ /* STATUS_PENING is a success code > 0. It is returned to user mode */ -+ if (!(ret == STATUS_PENDING || ret == 0)) { -+ DXG_ERR("Unexpected error %x", ret); -+ goto cleanup; -+ } -+ -+ ret2 = copy_to_user(&input->paging_fence_value, -+ &args.paging_fence_value, sizeof(u64)); -+ if (ret2) { -+ DXG_ERR("failed to copy paging fence to user"); -+ ret = -EINVAL; -+ goto cleanup; -+ } -+ -+ ret2 = copy_to_user(&input->virtual_address, &args.virtual_address, -+ sizeof(args.virtual_address)); -+ if (ret2) { -+ DXG_ERR("failed to copy va to user"); -+ ret = -EINVAL; -+ goto cleanup; -+ } -+ -+cleanup: -+ -+ if (adapter) -+ dxgadapter_release_lock_shared(adapter); -+ if (device) -+ kref_put(&device->device_kref, dxgdevice_release); -+ -+ DXG_TRACE("ioctl:%s %d", errorstr(ret), ret); -+ return ret; -+} -+ -+static int -+dxgkio_reserve_gpu_va(struct dxgprocess *process, void *__user inargs) -+{ -+ int ret; -+ struct d3dddi_reservegpuvirtualaddress args; -+ struct d3dddi_reservegpuvirtualaddress *input = inargs; -+ struct dxgadapter *adapter = NULL; -+ struct dxgdevice *device = NULL; -+ -+ ret = copy_from_user(&args, inargs, sizeof(args)); -+ if (ret) { -+ DXG_ERR("failed to copy input args"); -+ ret = -EINVAL; -+ goto cleanup; -+ } -+ -+ adapter = dxgprocess_adapter_by_handle(process, args.adapter); -+ if (adapter == NULL) { -+ device = dxgprocess_device_by_object_handle(process, -+ HMGRENTRY_TYPE_DXGPAGINGQUEUE, -+ args.adapter); -+ if (device == NULL) { -+ DXG_ERR("invalid adapter or paging queue: 0x%x", -+ args.adapter.v); -+ ret = -EINVAL; -+ goto cleanup; -+ } -+ adapter = device->adapter; -+ kref_get(&adapter->adapter_kref); -+ kref_put(&device->device_kref, dxgdevice_release); -+ } else { -+ args.adapter = adapter->host_handle; -+ } -+ -+ ret = dxgadapter_acquire_lock_shared(adapter); -+ if (ret < 0) { -+ kref_put(&adapter->adapter_kref, dxgadapter_release); -+ adapter = NULL; -+ goto cleanup; -+ } -+ -+ ret = dxgvmb_send_reserve_gpu_va(process, adapter, &args); -+ if (ret < 0) -+ goto cleanup; -+ -+ ret = copy_to_user(&input->virtual_address, &args.virtual_address, -+ sizeof(args.virtual_address)); -+ if (ret) { -+ DXG_ERR("failed to copy VA to user"); -+ ret = -EINVAL; -+ } -+ -+cleanup: -+ -+ if (adapter) { -+ dxgadapter_release_lock_shared(adapter); -+ kref_put(&adapter->adapter_kref, dxgadapter_release); -+ } -+ -+ DXG_TRACE("ioctl:%s %d", errorstr(ret), ret); -+ return ret; -+} -+ -+static int -+dxgkio_free_gpu_va(struct dxgprocess *process, void *__user inargs) -+{ -+ int ret; -+ struct d3dkmt_freegpuvirtualaddress args; -+ struct dxgadapter *adapter = NULL; -+ -+ ret = copy_from_user(&args, inargs, sizeof(args)); -+ if (ret) { -+ DXG_ERR("failed to copy input args"); -+ ret = -EINVAL; -+ goto cleanup; -+ } -+ -+ adapter = dxgprocess_adapter_by_handle(process, args.adapter); -+ if (adapter == NULL) { -+ ret = -EINVAL; -+ goto cleanup; -+ } -+ -+ ret = dxgadapter_acquire_lock_shared(adapter); -+ if (ret < 0) { -+ kref_put(&adapter->adapter_kref, dxgadapter_release); -+ adapter = NULL; -+ goto cleanup; -+ } -+ -+ args.adapter = adapter->host_handle; -+ ret = dxgvmb_send_free_gpu_va(process, adapter, &args); -+ -+cleanup: -+ -+ if (adapter) { -+ dxgadapter_release_lock_shared(adapter); -+ kref_put(&adapter->adapter_kref, dxgadapter_release); -+ } -+ -+ return ret; -+} -+ -+static int -+dxgkio_update_gpu_va(struct dxgprocess *process, void *__user inargs) -+{ -+ int ret; -+ struct d3dkmt_updategpuvirtualaddress args; -+ struct d3dkmt_updategpuvirtualaddress *input = inargs; -+ struct dxgadapter *adapter = NULL; -+ struct dxgdevice *device = NULL; -+ -+ ret = copy_from_user(&args, inargs, sizeof(args)); -+ if (ret) { -+ DXG_ERR("failed to copy input args"); -+ ret = -EINVAL; -+ goto cleanup; -+ } -+ -+ device = dxgprocess_device_by_handle(process, args.device); -+ if (device == NULL) { -+ ret = -EINVAL; -+ goto cleanup; -+ } -+ -+ adapter = device->adapter; -+ ret = dxgadapter_acquire_lock_shared(adapter); -+ if (ret < 0) { -+ adapter = NULL; -+ goto cleanup; -+ } -+ -+ ret = dxgvmb_send_update_gpu_va(process, adapter, &args); -+ if (ret < 0) -+ goto cleanup; -+ -+ ret = copy_to_user(&input->fence_value, &args.fence_value, -+ sizeof(args.fence_value)); -+ if (ret) { -+ DXG_ERR("failed to copy fence value to user"); -+ ret = -EINVAL; -+ } -+ -+cleanup: -+ -+ if (adapter) -+ dxgadapter_release_lock_shared(adapter); -+ if (device) -+ kref_put(&device->device_kref, dxgdevice_release); -+ -+ return ret; -+} -+ - static int - dxgkio_create_sync_object(struct dxgprocess *process, void *__user inargs) - { -@@ -4931,11 +5151,11 @@ static struct ioctl_desc ioctls[] = { - /* 0x05 */ {dxgkio_destroy_context, LX_DXDESTROYCONTEXT}, - /* 0x06 */ {dxgkio_create_allocation, LX_DXCREATEALLOCATION}, - /* 0x07 */ {dxgkio_create_paging_queue, LX_DXCREATEPAGINGQUEUE}, --/* 0x08 */ {}, -+/* 0x08 */ {dxgkio_reserve_gpu_va, LX_DXRESERVEGPUVIRTUALADDRESS}, - /* 0x09 */ {dxgkio_query_adapter_info, LX_DXQUERYADAPTERINFO}, - /* 0x0a */ {dxgkio_query_vidmem_info, LX_DXQUERYVIDEOMEMORYINFO}, - /* 0x0b */ {dxgkio_make_resident, LX_DXMAKERESIDENT}, --/* 0x0c */ {}, -+/* 0x0c */ {dxgkio_map_gpu_va, LX_DXMAPGPUVIRTUALADDRESS}, - /* 0x0d */ {dxgkio_escape, LX_DXESCAPE}, - /* 0x0e */ {dxgkio_get_device_state, LX_DXGETDEVICESTATE}, - /* 0x0f */ {dxgkio_submit_command, LX_DXSUBMITCOMMAND}, -@@ -4956,7 +5176,7 @@ static struct ioctl_desc ioctls[] = { - /* 0x1d */ {dxgkio_destroy_sync_object, LX_DXDESTROYSYNCHRONIZATIONOBJECT}, - /* 0x1e */ {dxgkio_evict, LX_DXEVICT}, - /* 0x1f */ {dxgkio_flush_heap_transitions, LX_DXFLUSHHEAPTRANSITIONS}, --/* 0x20 */ {}, -+/* 0x20 */ {dxgkio_free_gpu_va, LX_DXFREEGPUVIRTUALADDRESS}, - /* 0x21 */ {dxgkio_get_context_process_scheduling_priority, - LX_DXGETCONTEXTINPROCESSSCHEDULINGPRIORITY}, - /* 0x22 */ {dxgkio_get_context_scheduling_priority, -@@ -4990,7 +5210,7 @@ static struct ioctl_desc ioctls[] = { - LX_DXSUBMITWAITFORSYNCOBJECTSTOHWQUEUE}, - /* 0x37 */ {dxgkio_unlock2, LX_DXUNLOCK2}, - /* 0x38 */ {dxgkio_update_alloc_property, LX_DXUPDATEALLOCPROPERTY}, --/* 0x39 */ {}, -+/* 0x39 */ {dxgkio_update_gpu_va, LX_DXUPDATEGPUVIRTUALADDRESS}, - /* 0x3a */ {dxgkio_wait_sync_object_cpu, - LX_DXWAITFORSYNCHRONIZATIONOBJECTFROMCPU}, - /* 0x3b */ {dxgkio_wait_sync_object_gpu, -diff --git a/include/uapi/misc/d3dkmthk.h b/include/uapi/misc/d3dkmthk.h -index 111111111111..222222222222 100644 ---- a/include/uapi/misc/d3dkmthk.h -+++ b/include/uapi/misc/d3dkmthk.h -@@ -1012,6 +1012,124 @@ struct d3dkmt_evict { - __u64 num_bytes_to_trim; - }; - -+struct d3dddigpuva_protection_type { -+ union { -+ struct { -+ __u64 write:1; -+ __u64 execute:1; -+ __u64 zero:1; -+ __u64 no_access:1; -+ __u64 system_use_only:1; -+ __u64 reserved:59; -+ }; -+ __u64 value; -+ }; -+}; -+ -+enum d3dddi_updategpuvirtualaddress_operation_type { -+ _D3DDDI_UPDATEGPUVIRTUALADDRESS_MAP = 0, -+ _D3DDDI_UPDATEGPUVIRTUALADDRESS_UNMAP = 1, -+ _D3DDDI_UPDATEGPUVIRTUALADDRESS_COPY = 2, -+ _D3DDDI_UPDATEGPUVIRTUALADDRESS_MAP_PROTECT = 3, -+}; -+ -+struct d3dddi_updategpuvirtualaddress_operation { -+ enum d3dddi_updategpuvirtualaddress_operation_type operation; -+ union { -+ struct { -+ __u64 base_address; -+ __u64 size; -+ struct d3dkmthandle allocation; -+ __u64 allocation_offset; -+ __u64 allocation_size; -+ } map; -+ struct { -+ __u64 base_address; -+ __u64 size; -+ struct d3dkmthandle allocation; -+ __u64 allocation_offset; -+ __u64 allocation_size; -+ struct d3dddigpuva_protection_type protection; -+ __u64 driver_protection; -+ } map_protect; -+ struct { -+ __u64 base_address; -+ __u64 size; -+ struct d3dddigpuva_protection_type protection; -+ } unmap; -+ struct { -+ __u64 source_address; -+ __u64 size; -+ __u64 dest_address; -+ } copy; -+ }; -+}; -+ -+enum d3dddigpuva_reservation_type { -+ _D3DDDIGPUVA_RESERVE_NO_ACCESS = 0, -+ _D3DDDIGPUVA_RESERVE_ZERO = 1, -+ _D3DDDIGPUVA_RESERVE_NO_COMMIT = 2 -+}; -+ -+struct d3dkmt_updategpuvirtualaddress { -+ struct d3dkmthandle device; -+ struct d3dkmthandle context; -+ struct d3dkmthandle fence_object; -+ __u32 num_operations; -+#ifdef __KERNEL__ -+ struct d3dddi_updategpuvirtualaddress_operation *operations; -+#else -+ __u64 operations; -+#endif -+ __u32 reserved0; -+ __u32 reserved1; -+ __u64 reserved2; -+ __u64 fence_value; -+ union { -+ struct { -+ __u32 do_not_wait:1; -+ __u32 reserved:31; -+ }; -+ __u32 value; -+ } flags; -+ __u32 reserved3; -+}; -+ -+struct d3dddi_mapgpuvirtualaddress { -+ struct d3dkmthandle paging_queue; -+ __u64 base_address; -+ __u64 minimum_address; -+ __u64 maximum_address; -+ struct d3dkmthandle allocation; -+ __u64 offset_in_pages; -+ __u64 size_in_pages; -+ struct d3dddigpuva_protection_type protection; -+ __u64 driver_protection; -+ __u32 reserved0; -+ __u64 reserved1; -+ __u64 virtual_address; -+ __u64 paging_fence_value; -+}; -+ -+struct d3dddi_reservegpuvirtualaddress { -+ struct d3dkmthandle adapter; -+ __u64 base_address; -+ __u64 minimum_address; -+ __u64 maximum_address; -+ __u64 size; -+ enum d3dddigpuva_reservation_type reservation_type; -+ __u64 driver_protection; -+ __u64 virtual_address; -+ __u64 paging_fence_value; -+}; -+ -+struct d3dkmt_freegpuvirtualaddress { -+ struct d3dkmthandle adapter; -+ __u32 reserved; -+ __u64 base_address; -+ __u64 size; -+}; -+ - enum d3dkmt_memory_segment_group { - _D3DKMT_MEMORY_SEGMENT_GROUP_LOCAL = 0, - _D3DKMT_MEMORY_SEGMENT_GROUP_NON_LOCAL = 1 -@@ -1453,12 +1571,16 @@ struct d3dkmt_shareobjectwithhost { - _IOWR(0x47, 0x06, struct d3dkmt_createallocation) - #define LX_DXCREATEPAGINGQUEUE \ - _IOWR(0x47, 0x07, struct d3dkmt_createpagingqueue) -+#define LX_DXRESERVEGPUVIRTUALADDRESS \ -+ _IOWR(0x47, 0x08, struct d3dddi_reservegpuvirtualaddress) - #define LX_DXQUERYADAPTERINFO \ - _IOWR(0x47, 0x09, struct d3dkmt_queryadapterinfo) - #define LX_DXQUERYVIDEOMEMORYINFO \ - _IOWR(0x47, 0x0a, struct d3dkmt_queryvideomemoryinfo) - #define LX_DXMAKERESIDENT \ - _IOWR(0x47, 0x0b, struct d3dddi_makeresident) -+#define LX_DXMAPGPUVIRTUALADDRESS \ -+ _IOWR(0x47, 0x0c, struct d3dddi_mapgpuvirtualaddress) - #define LX_DXESCAPE \ - _IOWR(0x47, 0x0d, struct d3dkmt_escape) - #define LX_DXGETDEVICESTATE \ -@@ -1493,6 +1615,8 @@ struct d3dkmt_shareobjectwithhost { - _IOWR(0x47, 0x1e, struct d3dkmt_evict) - #define LX_DXFLUSHHEAPTRANSITIONS \ - _IOWR(0x47, 0x1f, struct d3dkmt_flushheaptransitions) -+#define LX_DXFREEGPUVIRTUALADDRESS \ -+ _IOWR(0x47, 0x20, struct d3dkmt_freegpuvirtualaddress) - #define LX_DXGETCONTEXTINPROCESSSCHEDULINGPRIORITY \ - _IOWR(0x47, 0x21, struct d3dkmt_getcontextinprocessschedulingpriority) - #define LX_DXGETCONTEXTSCHEDULINGPRIORITY \ -@@ -1529,6 +1653,8 @@ struct d3dkmt_shareobjectwithhost { - _IOWR(0x47, 0x37, struct d3dkmt_unlock2) - #define LX_DXUPDATEALLOCPROPERTY \ - _IOWR(0x47, 0x38, struct d3dddi_updateallocproperty) -+#define LX_DXUPDATEGPUVIRTUALADDRESS \ -+ _IOWR(0x47, 0x39, struct d3dkmt_updategpuvirtualaddress) - #define LX_DXWAITFORSYNCHRONIZATIONOBJECTFROMCPU \ - _IOWR(0x47, 0x3a, struct d3dkmt_waitforsynchronizationobjectfromcpu) - #define LX_DXWAITFORSYNCHRONIZATIONOBJECTFROMGPU \ --- -Armbian - diff --git a/patch/kernel/archive/wsl2-arm64-6.1/1696-drivers-hv-dxgkrnl-Add-support-to-map-guest-pages-by-host.patch b/patch/kernel/archive/wsl2-arm64-6.1/1696-drivers-hv-dxgkrnl-Add-support-to-map-guest-pages-by-host.patch deleted file mode 100644 index 767630abc3c9..000000000000 --- a/patch/kernel/archive/wsl2-arm64-6.1/1696-drivers-hv-dxgkrnl-Add-support-to-map-guest-pages-by-host.patch +++ /dev/null @@ -1,313 +0,0 @@ -From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001 -From: Iouri Tarassov -Date: Fri, 8 Oct 2021 14:17:39 -0700 -Subject: drivers: hv: dxgkrnl: Add support to map guest pages by host - -Implement support for mapping guest memory pages by the host. -This removes hyper-v limitations of using GPADL (guest physical -address list). - -Dxgkrnl uses hyper-v GPADLs to share guest system memory with the -host. This method has limitations: -- a single GPADL can represent only ~32MB of memory -- there is a limit of how much memory the total size of GPADLs - in a VM can represent. -To avoid these limitations the host implemented mapping guest memory -pages. Presence of this support is determined by reading PCI config -space. When the support is enabled, dxgkrnl does not use GPADLs and -instead uses the following code flow: -- memory pages of an existing system memory buffer are pinned -- PFNs of the pages are sent to the host via a VM bus message -- the host maps the PFNs to get access to the memory - -Signed-off-by: Iouri Tarassov -[kms: Forward port to v6.1] -Signed-off-by: Kelsey Steele ---- - drivers/hv/dxgkrnl/Makefile | 2 +- - drivers/hv/dxgkrnl/dxgkrnl.h | 1 + - drivers/hv/dxgkrnl/dxgmodule.c | 33 ++- - drivers/hv/dxgkrnl/dxgvmbus.c | 117 +++++++--- - drivers/hv/dxgkrnl/dxgvmbus.h | 10 + - drivers/hv/dxgkrnl/misc.c | 1 + - 6 files changed, 129 insertions(+), 35 deletions(-) - -diff --git a/drivers/hv/dxgkrnl/Makefile b/drivers/hv/dxgkrnl/Makefile -index 111111111111..222222222222 100644 ---- a/drivers/hv/dxgkrnl/Makefile -+++ b/drivers/hv/dxgkrnl/Makefile -@@ -2,4 +2,4 @@ - # Makefile for the hyper-v compute device driver (dxgkrnl). - - obj-$(CONFIG_DXGKRNL) += dxgkrnl.o --dxgkrnl-y := dxgmodule.o hmgr.o misc.o dxgadapter.o ioctl.o dxgvmbus.o dxgprocess.o -+dxgkrnl-y := dxgmodule.o hmgr.o misc.o dxgadapter.o ioctl.o dxgvmbus.o dxgprocess.o -diff --git a/drivers/hv/dxgkrnl/dxgkrnl.h b/drivers/hv/dxgkrnl/dxgkrnl.h -index 111111111111..222222222222 100644 ---- a/drivers/hv/dxgkrnl/dxgkrnl.h -+++ b/drivers/hv/dxgkrnl/dxgkrnl.h -@@ -316,6 +316,7 @@ struct dxgglobal { - bool misc_registered; - bool pci_registered; - bool vmbus_registered; -+ bool map_guest_pages_enabled; - }; - - static inline struct dxgglobal *dxggbl(void) -diff --git a/drivers/hv/dxgkrnl/dxgmodule.c b/drivers/hv/dxgkrnl/dxgmodule.c -index 111111111111..222222222222 100644 ---- a/drivers/hv/dxgkrnl/dxgmodule.c -+++ b/drivers/hv/dxgkrnl/dxgmodule.c -@@ -147,7 +147,7 @@ void dxgglobal_remove_host_event(struct dxghostevent *event) - - void signal_host_cpu_event(struct dxghostevent *eventhdr) - { -- struct dxghosteventcpu *event = (struct dxghosteventcpu *)eventhdr; -+ struct dxghosteventcpu *event = (struct dxghosteventcpu *)eventhdr; - - if (event->remove_from_list || - event->destroy_after_signal) { -@@ -426,7 +426,11 @@ const struct file_operations dxgk_fops = { - #define DXGK_VMBUS_VGPU_LUID_OFFSET (DXGK_VMBUS_VERSION_OFFSET + \ - sizeof(u32)) - --/* The guest writes its capabilities to this address */ -+/* The host caps (dxgk_vmbus_hostcaps) */ -+#define DXGK_VMBUS_HOSTCAPS_OFFSET (DXGK_VMBUS_VGPU_LUID_OFFSET + \ -+ sizeof(struct winluid)) -+ -+/* The guest writes its capavilities to this adderss */ - #define DXGK_VMBUS_GUESTCAPS_OFFSET (DXGK_VMBUS_VERSION_OFFSET + \ - sizeof(u32)) - -@@ -441,6 +445,23 @@ struct dxgk_vmbus_guestcaps { - }; - }; - -+/* -+ * The structure defines features, supported by the host. -+ * -+ * map_guest_memory -+ * Host can map guest memory pages, so the guest can avoid using GPADLs -+ * to represent existing system memory allocations. -+ */ -+struct dxgk_vmbus_hostcaps { -+ union { -+ struct { -+ u32 map_guest_memory : 1; -+ u32 reserved : 31; -+ }; -+ u32 host_caps; -+ }; -+}; -+ - /* - * A helper function to read PCI config space. - */ -@@ -475,6 +496,7 @@ static int dxg_pci_probe_device(struct pci_dev *dev, - struct winluid vgpu_luid = {}; - struct dxgk_vmbus_guestcaps guest_caps = {.wsl2 = 1}; - struct dxgglobal *dxgglobal = dxggbl(); -+ struct dxgk_vmbus_hostcaps host_caps = {}; - - mutex_lock(&dxgglobal->device_mutex); - -@@ -503,6 +525,13 @@ static int dxg_pci_probe_device(struct pci_dev *dev, - if (ret) - goto cleanup; - -+ ret = pci_read_config_dword(dev, DXGK_VMBUS_HOSTCAPS_OFFSET, -+ &host_caps.host_caps); -+ if (ret == 0) { -+ if (host_caps.map_guest_memory) -+ dxgglobal->map_guest_pages_enabled = true; -+ } -+ - if (dxgglobal->vmbus_ver > DXGK_VMBUS_INTERFACE_VERSION) - dxgglobal->vmbus_ver = DXGK_VMBUS_INTERFACE_VERSION; - } -diff --git a/drivers/hv/dxgkrnl/dxgvmbus.c b/drivers/hv/dxgkrnl/dxgvmbus.c -index 111111111111..222222222222 100644 ---- a/drivers/hv/dxgkrnl/dxgvmbus.c -+++ b/drivers/hv/dxgkrnl/dxgvmbus.c -@@ -1383,15 +1383,19 @@ int create_existing_sysmem(struct dxgdevice *device, - void *kmem = NULL; - int ret = 0; - struct dxgkvmb_command_setexistingsysmemstore *set_store_command; -+ struct dxgkvmb_command_setexistingsysmempages *set_pages_command; - u64 alloc_size = host_alloc->allocation_size; - u32 npages = alloc_size >> PAGE_SHIFT; - struct dxgvmbusmsg msg = {.hdr = NULL}; -- -- ret = init_message(&msg, device->adapter, device->process, -- sizeof(*set_store_command)); -- if (ret) -- goto cleanup; -- set_store_command = (void *)msg.msg; -+ const u32 max_pfns_in_message = -+ (DXG_MAX_VM_BUS_PACKET_SIZE - sizeof(*set_pages_command) - -+ PAGE_SIZE) / sizeof(__u64); -+ u32 alloc_offset_in_pages = 0; -+ struct page **page_in; -+ u64 *pfn; -+ u32 pages_to_send; -+ u32 i; -+ struct dxgglobal *dxgglobal = dxggbl(); - - /* - * Create a guest physical address list and set it as the allocation -@@ -1402,6 +1406,7 @@ int create_existing_sysmem(struct dxgdevice *device, - DXG_TRACE("Alloc size: %lld", alloc_size); - - dxgalloc->cpu_address = (void *)sysmem; -+ - dxgalloc->pages = vzalloc(npages * sizeof(void *)); - if (dxgalloc->pages == NULL) { - DXG_ERR("failed to allocate pages"); -@@ -1419,39 +1424,87 @@ int create_existing_sysmem(struct dxgdevice *device, - ret = -ENOMEM; - goto cleanup; - } -- kmem = vmap(dxgalloc->pages, npages, VM_MAP, PAGE_KERNEL); -- if (kmem == NULL) { -- DXG_ERR("vmap failed"); -- ret = -ENOMEM; -- goto cleanup; -- } -- ret1 = vmbus_establish_gpadl(dxgglobal_get_vmbus(), kmem, -- alloc_size, &dxgalloc->gpadl); -- if (ret1) { -- DXG_ERR("establish_gpadl failed: %d", ret1); -- ret = -ENOMEM; -- goto cleanup; -- } -+ if (!dxgglobal->map_guest_pages_enabled) { -+ ret = init_message(&msg, device->adapter, device->process, -+ sizeof(*set_store_command)); -+ if (ret) -+ goto cleanup; -+ set_store_command = (void *)msg.msg; -+ -+ kmem = vmap(dxgalloc->pages, npages, VM_MAP, PAGE_KERNEL); -+ if (kmem == NULL) { -+ DXG_ERR("vmap failed"); -+ ret = -ENOMEM; -+ goto cleanup; -+ } -+ ret1 = vmbus_establish_gpadl(dxgglobal_get_vmbus(), kmem, -+ alloc_size, &dxgalloc->gpadl); -+ if (ret1) { -+ DXG_ERR("establish_gpadl failed: %d", ret1); -+ ret = -ENOMEM; -+ goto cleanup; -+ } - #ifdef _MAIN_KERNEL_ -- DXG_TRACE("New gpadl %d", dxgalloc->gpadl.gpadl_handle); -+ DXG_TRACE("New gpadl %d", dxgalloc->gpadl.gpadl_handle); - #else -- DXG_TRACE("New gpadl %d", dxgalloc->gpadl); -+ DXG_TRACE("New gpadl %d", dxgalloc->gpadl); - #endif - -- command_vgpu_to_host_init2(&set_store_command->hdr, -- DXGK_VMBCOMMAND_SETEXISTINGSYSMEMSTORE, -- device->process->host_handle); -- set_store_command->device = device->handle; -- set_store_command->device = device->handle; -- set_store_command->allocation = host_alloc->allocation; -+ command_vgpu_to_host_init2(&set_store_command->hdr, -+ DXGK_VMBCOMMAND_SETEXISTINGSYSMEMSTORE, -+ device->process->host_handle); -+ set_store_command->device = device->handle; -+ set_store_command->allocation = host_alloc->allocation; - #ifdef _MAIN_KERNEL_ -- set_store_command->gpadl = dxgalloc->gpadl.gpadl_handle; -+ set_store_command->gpadl = dxgalloc->gpadl.gpadl_handle; - #else -- set_store_command->gpadl = dxgalloc->gpadl; -+ set_store_command->gpadl = dxgalloc->gpadl; - #endif -- ret = dxgvmb_send_sync_msg_ntstatus(msg.channel, msg.hdr, msg.size); -- if (ret < 0) -- DXG_ERR("failed to set existing store: %x", ret); -+ ret = dxgvmb_send_sync_msg_ntstatus(msg.channel, msg.hdr, -+ msg.size); -+ if (ret < 0) -+ DXG_ERR("failed set existing store: %x", ret); -+ } else { -+ /* -+ * Send the list of the allocation PFNs to the host. The host -+ * will map the pages for GPU access. -+ */ -+ -+ ret = init_message(&msg, device->adapter, device->process, -+ sizeof(*set_pages_command) + -+ max_pfns_in_message * sizeof(u64)); -+ if (ret) -+ goto cleanup; -+ set_pages_command = (void *)msg.msg; -+ command_vgpu_to_host_init2(&set_pages_command->hdr, -+ DXGK_VMBCOMMAND_SETEXISTINGSYSMEMPAGES, -+ device->process->host_handle); -+ set_pages_command->device = device->handle; -+ set_pages_command->allocation = host_alloc->allocation; -+ -+ page_in = dxgalloc->pages; -+ while (alloc_offset_in_pages < npages) { -+ pfn = (u64 *)((char *)msg.msg + -+ sizeof(*set_pages_command)); -+ pages_to_send = min(npages - alloc_offset_in_pages, -+ max_pfns_in_message); -+ set_pages_command->num_pages = pages_to_send; -+ set_pages_command->alloc_offset_in_pages = -+ alloc_offset_in_pages; -+ -+ for (i = 0; i < pages_to_send; i++) -+ *pfn++ = page_to_pfn(*page_in++); -+ -+ ret = dxgvmb_send_sync_msg_ntstatus(msg.channel, -+ msg.hdr, -+ msg.size); -+ if (ret < 0) { -+ DXG_ERR("failed set existing pages: %x", ret); -+ break; -+ } -+ alloc_offset_in_pages += pages_to_send; -+ } -+ } - - cleanup: - if (kmem) -diff --git a/drivers/hv/dxgkrnl/dxgvmbus.h b/drivers/hv/dxgkrnl/dxgvmbus.h -index 111111111111..222222222222 100644 ---- a/drivers/hv/dxgkrnl/dxgvmbus.h -+++ b/drivers/hv/dxgkrnl/dxgvmbus.h -@@ -234,6 +234,16 @@ struct dxgkvmb_command_setexistingsysmemstore { - u32 gpadl; - }; - -+/* Returns ntstatus */ -+struct dxgkvmb_command_setexistingsysmempages { -+ struct dxgkvmb_command_vgpu_to_host hdr; -+ struct d3dkmthandle device; -+ struct d3dkmthandle allocation; -+ u32 num_pages; -+ u32 alloc_offset_in_pages; -+ /* u64 pfn_array[num_pages] */ -+}; -+ - struct dxgkvmb_command_createprocess { - struct dxgkvmb_command_vm_to_host hdr; - void *process; -diff --git a/drivers/hv/dxgkrnl/misc.c b/drivers/hv/dxgkrnl/misc.c -index 111111111111..222222222222 100644 ---- a/drivers/hv/dxgkrnl/misc.c -+++ b/drivers/hv/dxgkrnl/misc.c -@@ -35,3 +35,4 @@ u16 *wcsncpy(u16 *dest, const u16 *src, size_t n) - dest[i - 1] = 0; - return dest; - } -+ --- -Armbian - diff --git a/patch/kernel/archive/wsl2-arm64-6.1/1697-drivers-hv-dxgkrnl-Removed-struct-vmbus_gpadl-which-was-defined-in-the-main-linux-branch.patch b/patch/kernel/archive/wsl2-arm64-6.1/1697-drivers-hv-dxgkrnl-Removed-struct-vmbus_gpadl-which-was-defined-in-the-main-linux-branch.patch deleted file mode 100644 index f434a553a124..000000000000 --- a/patch/kernel/archive/wsl2-arm64-6.1/1697-drivers-hv-dxgkrnl-Removed-struct-vmbus_gpadl-which-was-defined-in-the-main-linux-branch.patch +++ /dev/null @@ -1,29 +0,0 @@ -From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001 -From: Iouri Tarassov -Date: Mon, 21 Mar 2022 20:32:44 -0700 -Subject: drivers: hv: dxgkrnl: Removed struct vmbus_gpadl, which was defined - in the main linux branch - -Signed-off-by: Iouri Tarassov -[kms: Forward port to v6.1] -Signed-off-by: Kelsey Steele ---- - drivers/hv/dxgkrnl/dxgadapter.c | 2 +- - 1 file changed, 1 insertion(+), 1 deletion(-) - -diff --git a/drivers/hv/dxgkrnl/dxgadapter.c b/drivers/hv/dxgkrnl/dxgadapter.c -index 111111111111..222222222222 100644 ---- a/drivers/hv/dxgkrnl/dxgadapter.c -+++ b/drivers/hv/dxgkrnl/dxgadapter.c -@@ -932,7 +932,7 @@ void dxgallocation_destroy(struct dxgallocation *alloc) - vmbus_teardown_gpadl(dxgglobal_get_vmbus(), &alloc->gpadl); - alloc->gpadl.gpadl_handle = 0; - } --else -+#else - if (alloc->gpadl) { - DXG_TRACE("Teardown gpadl %d", alloc->gpadl); - vmbus_teardown_gpadl(dxgglobal_get_vmbus(), alloc->gpadl); --- -Armbian - diff --git a/patch/kernel/archive/wsl2-arm64-6.1/1698-drivers-hv-dxgkrnl-Remove-dxgk_init_ioctls.patch b/patch/kernel/archive/wsl2-arm64-6.1/1698-drivers-hv-dxgkrnl-Remove-dxgk_init_ioctls.patch deleted file mode 100644 index a36ee3dedcf0..000000000000 --- a/patch/kernel/archive/wsl2-arm64-6.1/1698-drivers-hv-dxgkrnl-Remove-dxgk_init_ioctls.patch +++ /dev/null @@ -1,100 +0,0 @@ -From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001 -From: Iouri Tarassov -Date: Tue, 22 Mar 2022 10:32:54 -0700 -Subject: drivers: hv: dxgkrnl: Remove dxgk_init_ioctls - -The array of ioctls is initialized statically to remove the unnecessary -function. - -Signed-off-by: Iouri Tarassov -[kms: Forward port to v6.1] -Signed-off-by: Kelsey Steele ---- - drivers/hv/dxgkrnl/dxgmodule.c | 2 +- - drivers/hv/dxgkrnl/ioctl.c | 15 +++++----- - 2 files changed, 8 insertions(+), 9 deletions(-) - -diff --git a/drivers/hv/dxgkrnl/dxgmodule.c b/drivers/hv/dxgkrnl/dxgmodule.c -index 111111111111..222222222222 100644 ---- a/drivers/hv/dxgkrnl/dxgmodule.c -+++ b/drivers/hv/dxgkrnl/dxgmodule.c -@@ -300,7 +300,7 @@ static void dxgglobal_start_adapters(void) - } - - /* -- * Stopsthe active dxgadapter objects. -+ * Stop the active dxgadapter objects. - */ - static void dxgglobal_stop_adapters(void) - { -diff --git a/drivers/hv/dxgkrnl/ioctl.c b/drivers/hv/dxgkrnl/ioctl.c -index 111111111111..222222222222 100644 ---- a/drivers/hv/dxgkrnl/ioctl.c -+++ b/drivers/hv/dxgkrnl/ioctl.c -@@ -26,7 +26,6 @@ - struct ioctl_desc { - int (*ioctl_callback)(struct dxgprocess *p, void __user *arg); - u32 ioctl; -- u32 arg_size; - }; - - #ifdef DEBUG -@@ -91,7 +90,7 @@ static const struct file_operations dxg_resource_fops = { - }; - - static int dxgkio_open_adapter_from_luid(struct dxgprocess *process, -- void *__user inargs) -+ void *__user inargs) - { - struct d3dkmt_openadapterfromluid args; - int ret; -@@ -1002,7 +1001,7 @@ dxgkio_create_hwqueue(struct dxgprocess *process, void *__user inargs) - } - - static int dxgkio_destroy_hwqueue(struct dxgprocess *process, -- void *__user inargs) -+ void *__user inargs) - { - struct d3dkmt_destroyhwqueue args; - int ret; -@@ -2280,7 +2279,8 @@ dxgkio_submit_command(struct dxgprocess *process, void *__user inargs) - } - - static int --dxgkio_submit_command_to_hwqueue(struct dxgprocess *process, void *__user inargs) -+dxgkio_submit_command_to_hwqueue(struct dxgprocess *process, -+ void *__user inargs) - { - int ret; - struct d3dkmt_submitcommandtohwqueue args; -@@ -5087,8 +5087,7 @@ open_resource(struct dxgprocess *process, - } - - static int --dxgkio_open_resource_nt(struct dxgprocess *process, -- void *__user inargs) -+dxgkio_open_resource_nt(struct dxgprocess *process, void *__user inargs) - { - struct d3dkmt_openresourcefromnthandle args; - struct d3dkmt_openresourcefromnthandle *__user args_user = inargs; -@@ -5166,7 +5165,7 @@ static struct ioctl_desc ioctls[] = { - /* 0x14 */ {dxgkio_enum_adapters, LX_DXENUMADAPTERS2}, - /* 0x15 */ {dxgkio_close_adapter, LX_DXCLOSEADAPTER}, - /* 0x16 */ {dxgkio_change_vidmem_reservation, -- LX_DXCHANGEVIDEOMEMORYRESERVATION}, -+ LX_DXCHANGEVIDEOMEMORYRESERVATION}, - /* 0x17 */ {}, - /* 0x18 */ {dxgkio_create_hwqueue, LX_DXCREATEHWQUEUE}, - /* 0x19 */ {dxgkio_destroy_device, LX_DXDESTROYDEVICE}, -@@ -5205,7 +5204,7 @@ static struct ioctl_desc ioctls[] = { - LX_DXSIGNALSYNCHRONIZATIONOBJECTFROMGPU2}, - /* 0x34 */ {dxgkio_submit_command_to_hwqueue, LX_DXSUBMITCOMMANDTOHWQUEUE}, - /* 0x35 */ {dxgkio_submit_signal_to_hwqueue, -- LX_DXSUBMITSIGNALSYNCOBJECTSTOHWQUEUE}, -+ LX_DXSUBMITSIGNALSYNCOBJECTSTOHWQUEUE}, - /* 0x36 */ {dxgkio_submit_wait_to_hwqueue, - LX_DXSUBMITWAITFORSYNCOBJECTSTOHWQUEUE}, - /* 0x37 */ {dxgkio_unlock2, LX_DXUNLOCK2}, --- -Armbian - diff --git a/patch/kernel/archive/wsl2-arm64-6.1/1699-drivers-hv-dxgkrnl-Creation-of-dxgsyncfile-objects.patch b/patch/kernel/archive/wsl2-arm64-6.1/1699-drivers-hv-dxgkrnl-Creation-of-dxgsyncfile-objects.patch deleted file mode 100644 index 44e9b6778efd..000000000000 --- a/patch/kernel/archive/wsl2-arm64-6.1/1699-drivers-hv-dxgkrnl-Creation-of-dxgsyncfile-objects.patch +++ /dev/null @@ -1,482 +0,0 @@ -From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001 -From: Iouri Tarassov -Date: Tue, 22 Mar 2022 11:02:49 -0700 -Subject: drivers: hv: dxgkrnl: Creation of dxgsyncfile objects - -Implement the ioctl to create a dxgsyncfile object -(LX_DXCREATESYNCFILE). This object is a wrapper around a monitored -fence sync object and a fence value. - -dxgsyncfile is built on top of the Linux sync_file object and -provides a way for the user mode to synchronize with the execution -of the device DMA packets. - -The ioctl creates a dxgsyncfile object for the given GPU synchronization -object and a fence value. A file descriptor of the sync_file object -is returned to the caller. The caller could wait for the object by using -poll(). When the underlying GPU synchronization object is signaled on -the host, the host sends a message to the virtual machine and the -sync_file object is signaled. - -Signed-off-by: Iouri Tarassov -[kms: Forward port to v6.1] -Signed-off-by: Kelsey Steele ---- - drivers/hv/dxgkrnl/Kconfig | 2 + - drivers/hv/dxgkrnl/Makefile | 2 +- - drivers/hv/dxgkrnl/dxgkrnl.h | 2 + - drivers/hv/dxgkrnl/dxgmodule.c | 12 + - drivers/hv/dxgkrnl/dxgsyncfile.c | 215 ++++++++++ - drivers/hv/dxgkrnl/dxgsyncfile.h | 30 ++ - drivers/hv/dxgkrnl/dxgvmbus.c | 33 +- - drivers/hv/dxgkrnl/ioctl.c | 5 +- - include/uapi/misc/d3dkmthk.h | 9 + - 9 files changed, 294 insertions(+), 16 deletions(-) - -diff --git a/drivers/hv/dxgkrnl/Kconfig b/drivers/hv/dxgkrnl/Kconfig -index 111111111111..222222222222 100644 ---- a/drivers/hv/dxgkrnl/Kconfig -+++ b/drivers/hv/dxgkrnl/Kconfig -@@ -6,6 +6,8 @@ config DXGKRNL - tristate "Microsoft Paravirtualized GPU support" - depends on HYPERV - depends on 64BIT || COMPILE_TEST -+ select DMA_SHARED_BUFFER -+ select SYNC_FILE - help - This driver supports paravirtualized virtual compute devices, exposed - by Microsoft Hyper-V when Linux is running inside of a virtual machine -diff --git a/drivers/hv/dxgkrnl/Makefile b/drivers/hv/dxgkrnl/Makefile -index 111111111111..222222222222 100644 ---- a/drivers/hv/dxgkrnl/Makefile -+++ b/drivers/hv/dxgkrnl/Makefile -@@ -2,4 +2,4 @@ - # Makefile for the hyper-v compute device driver (dxgkrnl). - - obj-$(CONFIG_DXGKRNL) += dxgkrnl.o --dxgkrnl-y := dxgmodule.o hmgr.o misc.o dxgadapter.o ioctl.o dxgvmbus.o dxgprocess.o -+dxgkrnl-y := dxgmodule.o hmgr.o misc.o dxgadapter.o ioctl.o dxgvmbus.o dxgprocess.o dxgsyncfile.o -diff --git a/drivers/hv/dxgkrnl/dxgkrnl.h b/drivers/hv/dxgkrnl/dxgkrnl.h -index 111111111111..222222222222 100644 ---- a/drivers/hv/dxgkrnl/dxgkrnl.h -+++ b/drivers/hv/dxgkrnl/dxgkrnl.h -@@ -120,6 +120,7 @@ struct dxgpagingqueue { - */ - enum dxghosteventtype { - dxghostevent_cpu_event = 1, -+ dxghostevent_dma_fence = 2, - }; - - struct dxghostevent { -@@ -858,6 +859,7 @@ int dxgvmb_send_wait_sync_object_cpu(struct dxgprocess *process, - struct - d3dkmt_waitforsynchronizationobjectfromcpu - *args, -+ bool user_address, - u64 cpu_event); - int dxgvmb_send_lock2(struct dxgprocess *process, - struct dxgadapter *adapter, -diff --git a/drivers/hv/dxgkrnl/dxgmodule.c b/drivers/hv/dxgkrnl/dxgmodule.c -index 111111111111..222222222222 100644 ---- a/drivers/hv/dxgkrnl/dxgmodule.c -+++ b/drivers/hv/dxgkrnl/dxgmodule.c -@@ -16,6 +16,7 @@ - #include - #include - #include "dxgkrnl.h" -+#include "dxgsyncfile.h" - - #define PCI_VENDOR_ID_MICROSOFT 0x1414 - #define PCI_DEVICE_ID_VIRTUAL_RENDER 0x008E -@@ -145,6 +146,15 @@ void dxgglobal_remove_host_event(struct dxghostevent *event) - spin_unlock_irq(&dxgglobal->host_event_list_mutex); - } - -+static void signal_dma_fence(struct dxghostevent *eventhdr) -+{ -+ struct dxgsyncpoint *event = (struct dxgsyncpoint *)eventhdr; -+ -+ event->fence_value++; -+ list_del(&eventhdr->host_event_list_entry); -+ dma_fence_signal(&event->base); -+} -+ - void signal_host_cpu_event(struct dxghostevent *eventhdr) - { - struct dxghosteventcpu *event = (struct dxghosteventcpu *)eventhdr; -@@ -184,6 +194,8 @@ void dxgglobal_signal_host_event(u64 event_id) - DXG_TRACE("found event to signal"); - if (event->event_type == dxghostevent_cpu_event) - signal_host_cpu_event(event); -+ else if (event->event_type == dxghostevent_dma_fence) -+ signal_dma_fence(event); - else - DXG_ERR("Unknown host event type"); - break; -diff --git a/drivers/hv/dxgkrnl/dxgsyncfile.c b/drivers/hv/dxgkrnl/dxgsyncfile.c -new file mode 100644 -index 000000000000..111111111111 ---- /dev/null -+++ b/drivers/hv/dxgkrnl/dxgsyncfile.c -@@ -0,0 +1,215 @@ -+// SPDX-License-Identifier: GPL-2.0 -+ -+/* -+ * Copyright (c) 2022, Microsoft Corporation. -+ * -+ * Author: -+ * Iouri Tarassov -+ * -+ * Dxgkrnl Graphics Driver -+ * Ioctl implementation -+ * -+ */ -+ -+#include -+#include -+#include -+#include -+#include -+ -+#include "dxgkrnl.h" -+#include "dxgvmbus.h" -+#include "dxgsyncfile.h" -+ -+#undef dev_fmt -+#define dev_fmt(fmt) "dxgk: " fmt -+ -+#ifdef DEBUG -+static char *errorstr(int ret) -+{ -+ return ret < 0 ? "err" : ""; -+} -+#endif -+ -+static const struct dma_fence_ops dxgdmafence_ops; -+ -+static struct dxgsyncpoint *to_syncpoint(struct dma_fence *fence) -+{ -+ if (fence->ops != &dxgdmafence_ops) -+ return NULL; -+ return container_of(fence, struct dxgsyncpoint, base); -+} -+ -+int dxgkio_create_sync_file(struct dxgprocess *process, void *__user inargs) -+{ -+ struct d3dkmt_createsyncfile args; -+ struct dxgsyncpoint *pt = NULL; -+ int ret = 0; -+ int fd = get_unused_fd_flags(O_CLOEXEC); -+ struct sync_file *sync_file = NULL; -+ struct dxgdevice *device = NULL; -+ struct dxgadapter *adapter = NULL; -+ struct d3dkmt_waitforsynchronizationobjectfromcpu waitargs = {}; -+ -+ if (fd < 0) { -+ DXG_ERR("get_unused_fd_flags failed: %d", fd); -+ ret = fd; -+ goto cleanup; -+ } -+ -+ ret = copy_from_user(&args, inargs, sizeof(args)); -+ if (ret) { -+ DXG_ERR("failed to copy input args"); -+ ret = -EFAULT; -+ goto cleanup; -+ } -+ -+ device = dxgprocess_device_by_handle(process, args.device); -+ if (device == NULL) { -+ DXG_ERR("dxgprocess_device_by_handle failed"); -+ ret = -EINVAL; -+ goto cleanup; -+ } -+ -+ ret = dxgdevice_acquire_lock_shared(device); -+ if (ret < 0) { -+ DXG_ERR("dxgdevice_acquire_lock_shared failed"); -+ device = NULL; -+ goto cleanup; -+ } -+ -+ adapter = device->adapter; -+ ret = dxgadapter_acquire_lock_shared(adapter); -+ if (ret < 0) { -+ DXG_ERR("dxgadapter_acquire_lock_shared failed"); -+ adapter = NULL; -+ goto cleanup; -+ } -+ -+ pt = kzalloc(sizeof(*pt), GFP_KERNEL); -+ if (!pt) { -+ ret = -ENOMEM; -+ goto cleanup; -+ } -+ spin_lock_init(&pt->lock); -+ pt->fence_value = args.fence_value; -+ pt->context = dma_fence_context_alloc(1); -+ pt->hdr.event_id = dxgglobal_new_host_event_id(); -+ pt->hdr.event_type = dxghostevent_dma_fence; -+ dxgglobal_add_host_event(&pt->hdr); -+ -+ dma_fence_init(&pt->base, &dxgdmafence_ops, &pt->lock, -+ pt->context, args.fence_value); -+ -+ sync_file = sync_file_create(&pt->base); -+ if (sync_file == NULL) { -+ DXG_ERR("sync_file_create failed"); -+ ret = -ENOMEM; -+ goto cleanup; -+ } -+ dma_fence_put(&pt->base); -+ -+ waitargs.device = args.device; -+ waitargs.object_count = 1; -+ waitargs.objects = &args.monitored_fence; -+ waitargs.fence_values = &args.fence_value; -+ ret = dxgvmb_send_wait_sync_object_cpu(process, adapter, -+ &waitargs, false, -+ pt->hdr.event_id); -+ if (ret < 0) { -+ DXG_ERR("dxgvmb_send_wait_sync_object_cpu failed"); -+ goto cleanup; -+ } -+ -+ args.sync_file_handle = (u64)fd; -+ ret = copy_to_user(inargs, &args, sizeof(args)); -+ if (ret) { -+ DXG_ERR("failed to copy output args"); -+ ret = -EFAULT; -+ goto cleanup; -+ } -+ -+ fd_install(fd, sync_file->file); -+ -+cleanup: -+ if (adapter) -+ dxgadapter_release_lock_shared(adapter); -+ if (device) -+ dxgdevice_release_lock_shared(device); -+ if (ret) { -+ if (sync_file) { -+ fput(sync_file->file); -+ /* sync_file_release will destroy dma_fence */ -+ pt = NULL; -+ } -+ if (pt) -+ dma_fence_put(&pt->base); -+ if (fd >= 0) -+ put_unused_fd(fd); -+ } -+ DXG_TRACE("ioctl:%s %d", errorstr(ret), ret); -+ return ret; -+} -+ -+static const char *dxgdmafence_get_driver_name(struct dma_fence *fence) -+{ -+ return "dxgkrnl"; -+} -+ -+static const char *dxgdmafence_get_timeline_name(struct dma_fence *fence) -+{ -+ return "no_timeline"; -+} -+ -+static void dxgdmafence_release(struct dma_fence *fence) -+{ -+ struct dxgsyncpoint *syncpoint; -+ -+ syncpoint = to_syncpoint(fence); -+ if (syncpoint) { -+ if (syncpoint->hdr.event_id) -+ dxgglobal_get_host_event(syncpoint->hdr.event_id); -+ kfree(syncpoint); -+ } -+} -+ -+static bool dxgdmafence_signaled(struct dma_fence *fence) -+{ -+ struct dxgsyncpoint *syncpoint; -+ -+ syncpoint = to_syncpoint(fence); -+ if (syncpoint == 0) -+ return true; -+ return __dma_fence_is_later(syncpoint->fence_value, fence->seqno, -+ fence->ops); -+} -+ -+static bool dxgdmafence_enable_signaling(struct dma_fence *fence) -+{ -+ return true; -+} -+ -+static void dxgdmafence_value_str(struct dma_fence *fence, -+ char *str, int size) -+{ -+ snprintf(str, size, "%lld", fence->seqno); -+} -+ -+static void dxgdmafence_timeline_value_str(struct dma_fence *fence, -+ char *str, int size) -+{ -+ struct dxgsyncpoint *syncpoint; -+ -+ syncpoint = to_syncpoint(fence); -+ snprintf(str, size, "%lld", syncpoint->fence_value); -+} -+ -+static const struct dma_fence_ops dxgdmafence_ops = { -+ .get_driver_name = dxgdmafence_get_driver_name, -+ .get_timeline_name = dxgdmafence_get_timeline_name, -+ .enable_signaling = dxgdmafence_enable_signaling, -+ .signaled = dxgdmafence_signaled, -+ .release = dxgdmafence_release, -+ .fence_value_str = dxgdmafence_value_str, -+ .timeline_value_str = dxgdmafence_timeline_value_str, -+}; -diff --git a/drivers/hv/dxgkrnl/dxgsyncfile.h b/drivers/hv/dxgkrnl/dxgsyncfile.h -new file mode 100644 -index 000000000000..111111111111 ---- /dev/null -+++ b/drivers/hv/dxgkrnl/dxgsyncfile.h -@@ -0,0 +1,30 @@ -+/* SPDX-License-Identifier: GPL-2.0 */ -+ -+/* -+ * Copyright (c) 2022, Microsoft Corporation. -+ * -+ * Author: -+ * Iouri Tarassov -+ * -+ * Dxgkrnl Graphics Driver -+ * Headers for sync file objects -+ * -+ */ -+ -+#ifndef _DXGSYNCFILE_H -+#define _DXGSYNCFILE_H -+ -+#include -+ -+int dxgkio_create_sync_file(struct dxgprocess *process, void *__user inargs); -+ -+struct dxgsyncpoint { -+ struct dxghostevent hdr; -+ struct dma_fence base; -+ u64 fence_value; -+ u64 context; -+ spinlock_t lock; -+ u64 u64; -+}; -+ -+#endif /* _DXGSYNCFILE_H */ -diff --git a/drivers/hv/dxgkrnl/dxgvmbus.c b/drivers/hv/dxgkrnl/dxgvmbus.c -index 111111111111..222222222222 100644 ---- a/drivers/hv/dxgkrnl/dxgvmbus.c -+++ b/drivers/hv/dxgkrnl/dxgvmbus.c -@@ -2820,6 +2820,7 @@ int dxgvmb_send_wait_sync_object_cpu(struct dxgprocess *process, - struct - d3dkmt_waitforsynchronizationobjectfromcpu - *args, -+ bool user_address, - u64 cpu_event) - { - int ret = -EINVAL; -@@ -2844,19 +2845,25 @@ int dxgvmb_send_wait_sync_object_cpu(struct dxgprocess *process, - command->guest_event_pointer = (u64) cpu_event; - current_pos = (u8 *) &command[1]; - -- ret = copy_from_user(current_pos, args->objects, object_size); -- if (ret) { -- DXG_ERR("failed to copy objects"); -- ret = -EINVAL; -- goto cleanup; -- } -- current_pos += object_size; -- ret = copy_from_user(current_pos, args->fence_values, -- fence_size); -- if (ret) { -- DXG_ERR("failed to copy fences"); -- ret = -EINVAL; -- goto cleanup; -+ if (user_address) { -+ ret = copy_from_user(current_pos, args->objects, object_size); -+ if (ret) { -+ DXG_ERR("failed to copy objects"); -+ ret = -EINVAL; -+ goto cleanup; -+ } -+ current_pos += object_size; -+ ret = copy_from_user(current_pos, args->fence_values, -+ fence_size); -+ if (ret) { -+ DXG_ERR("failed to copy fences"); -+ ret = -EINVAL; -+ goto cleanup; -+ } -+ } else { -+ memcpy(current_pos, args->objects, object_size); -+ current_pos += object_size; -+ memcpy(current_pos, args->fence_values, fence_size); - } - - ret = dxgvmb_send_sync_msg_ntstatus(msg.channel, msg.hdr, msg.size); -diff --git a/drivers/hv/dxgkrnl/ioctl.c b/drivers/hv/dxgkrnl/ioctl.c -index 111111111111..222222222222 100644 ---- a/drivers/hv/dxgkrnl/ioctl.c -+++ b/drivers/hv/dxgkrnl/ioctl.c -@@ -19,6 +19,7 @@ - - #include "dxgkrnl.h" - #include "dxgvmbus.h" -+#include "dxgsyncfile.h" - - #undef pr_fmt - #define pr_fmt(fmt) "dxgk: " fmt -@@ -3488,7 +3489,7 @@ dxgkio_wait_sync_object_cpu(struct dxgprocess *process, void *__user inargs) - } - - ret = dxgvmb_send_wait_sync_object_cpu(process, adapter, -- &args, event_id); -+ &args, true, event_id); - if (ret < 0) - goto cleanup; - -@@ -5224,7 +5225,7 @@ static struct ioctl_desc ioctls[] = { - /* 0x42 */ {dxgkio_open_resource_nt, LX_DXOPENRESOURCEFROMNTHANDLE}, - /* 0x43 */ {dxgkio_query_statistics, LX_DXQUERYSTATISTICS}, - /* 0x44 */ {dxgkio_share_object_with_host, LX_DXSHAREOBJECTWITHHOST}, --/* 0x45 */ {}, -+/* 0x45 */ {dxgkio_create_sync_file, LX_DXCREATESYNCFILE}, - }; - - /* -diff --git a/include/uapi/misc/d3dkmthk.h b/include/uapi/misc/d3dkmthk.h -index 111111111111..222222222222 100644 ---- a/include/uapi/misc/d3dkmthk.h -+++ b/include/uapi/misc/d3dkmthk.h -@@ -1554,6 +1554,13 @@ struct d3dkmt_shareobjectwithhost { - __u64 object_vail_nt_handle; - }; - -+struct d3dkmt_createsyncfile { -+ struct d3dkmthandle device; -+ struct d3dkmthandle monitored_fence; -+ __u64 fence_value; -+ __u64 sync_file_handle; /* out */ -+}; -+ - /* - * Dxgkrnl Graphics Port Driver ioctl definitions - * -@@ -1677,5 +1684,7 @@ struct d3dkmt_shareobjectwithhost { - _IOWR(0x47, 0x43, struct d3dkmt_querystatistics) - #define LX_DXSHAREOBJECTWITHHOST \ - _IOWR(0x47, 0x44, struct d3dkmt_shareobjectwithhost) -+#define LX_DXCREATESYNCFILE \ -+ _IOWR(0x47, 0x45, struct d3dkmt_createsyncfile) - - #endif /* _D3DKMTHK_H */ --- -Armbian - diff --git a/patch/kernel/archive/wsl2-arm64-6.1/1700-drivers-hv-dxgkrnl-Use-tracing-instead-of-dev_dbg.patch b/patch/kernel/archive/wsl2-arm64-6.1/1700-drivers-hv-dxgkrnl-Use-tracing-instead-of-dev_dbg.patch deleted file mode 100644 index 3a99408a496b..000000000000 --- a/patch/kernel/archive/wsl2-arm64-6.1/1700-drivers-hv-dxgkrnl-Use-tracing-instead-of-dev_dbg.patch +++ /dev/null @@ -1,205 +0,0 @@ -From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001 -From: Iouri Tarassov -Date: Thu, 24 Mar 2022 15:03:41 -0700 -Subject: drivers: hv: dxgkrnl: Use tracing instead of dev_dbg - -Signed-off-by: Iouri Tarassov -[kms: Forward port to v6.1] -Signed-off-by: Kelsey Steele ---- - drivers/hv/dxgkrnl/dxgadapter.c | 4 +-- - drivers/hv/dxgkrnl/dxgmodule.c | 5 ++- - drivers/hv/dxgkrnl/dxgprocess.c | 6 ++-- - drivers/hv/dxgkrnl/dxgvmbus.c | 4 +-- - drivers/hv/dxgkrnl/hmgr.c | 16 +++++----- - drivers/hv/dxgkrnl/ioctl.c | 8 ++--- - drivers/hv/dxgkrnl/misc.c | 4 +-- - 7 files changed, 25 insertions(+), 22 deletions(-) - -diff --git a/drivers/hv/dxgkrnl/dxgadapter.c b/drivers/hv/dxgkrnl/dxgadapter.c -index 111111111111..222222222222 100644 ---- a/drivers/hv/dxgkrnl/dxgadapter.c -+++ b/drivers/hv/dxgkrnl/dxgadapter.c -@@ -18,8 +18,8 @@ - - #include "dxgkrnl.h" - --#undef pr_fmt --#define pr_fmt(fmt) "dxgk: " fmt -+#undef dev_fmt -+#define dev_fmt(fmt) "dxgk: " fmt - - int dxgadapter_set_vmbus(struct dxgadapter *adapter, struct hv_device *hdev) - { -diff --git a/drivers/hv/dxgkrnl/dxgmodule.c b/drivers/hv/dxgkrnl/dxgmodule.c -index 111111111111..222222222222 100644 ---- a/drivers/hv/dxgkrnl/dxgmodule.c -+++ b/drivers/hv/dxgkrnl/dxgmodule.c -@@ -24,6 +24,9 @@ - #undef pr_fmt - #define pr_fmt(fmt) "dxgk: " fmt - -+#undef dev_fmt -+#define dev_fmt(fmt) "dxgk: " fmt -+ - /* - * Interface from dxgglobal - */ -@@ -442,7 +445,7 @@ const struct file_operations dxgk_fops = { - #define DXGK_VMBUS_HOSTCAPS_OFFSET (DXGK_VMBUS_VGPU_LUID_OFFSET + \ - sizeof(struct winluid)) - --/* The guest writes its capavilities to this adderss */ -+/* The guest writes its capabilities to this address */ - #define DXGK_VMBUS_GUESTCAPS_OFFSET (DXGK_VMBUS_VERSION_OFFSET + \ - sizeof(u32)) - -diff --git a/drivers/hv/dxgkrnl/dxgprocess.c b/drivers/hv/dxgkrnl/dxgprocess.c -index 111111111111..222222222222 100644 ---- a/drivers/hv/dxgkrnl/dxgprocess.c -+++ b/drivers/hv/dxgkrnl/dxgprocess.c -@@ -13,8 +13,8 @@ - - #include "dxgkrnl.h" - --#undef pr_fmt --#define pr_fmt(fmt) "dxgk: " fmt -+#undef dev_fmt -+#define dev_fmt(fmt) "dxgk: " fmt - - /* - * Creates a new dxgprocess object -@@ -248,7 +248,7 @@ struct dxgadapter *dxgprocess_adapter_by_handle(struct dxgprocess *process, - HMGRENTRY_TYPE_DXGADAPTER, - handle); - if (adapter == NULL) -- DXG_ERR("adapter_by_handle failed %x", handle.v); -+ DXG_TRACE("adapter_by_handle failed %x", handle.v); - else if (kref_get_unless_zero(&adapter->adapter_kref) == 0) { - DXG_ERR("failed to acquire adapter reference"); - adapter = NULL; -diff --git a/drivers/hv/dxgkrnl/dxgvmbus.c b/drivers/hv/dxgkrnl/dxgvmbus.c -index 111111111111..222222222222 100644 ---- a/drivers/hv/dxgkrnl/dxgvmbus.c -+++ b/drivers/hv/dxgkrnl/dxgvmbus.c -@@ -22,8 +22,8 @@ - #include "dxgkrnl.h" - #include "dxgvmbus.h" - --#undef pr_fmt --#define pr_fmt(fmt) "dxgk: " fmt -+#undef dev_fmt -+#define dev_fmt(fmt) "dxgk: " fmt - - #define RING_BUFSIZE (256 * 1024) - -diff --git a/drivers/hv/dxgkrnl/hmgr.c b/drivers/hv/dxgkrnl/hmgr.c -index 111111111111..222222222222 100644 ---- a/drivers/hv/dxgkrnl/hmgr.c -+++ b/drivers/hv/dxgkrnl/hmgr.c -@@ -19,8 +19,8 @@ - #include "dxgkrnl.h" - #include "hmgr.h" - --#undef pr_fmt --#define pr_fmt(fmt) "dxgk: " fmt -+#undef dev_fmt -+#define dev_fmt(fmt) "dxgk: " fmt - - const struct d3dkmthandle zerohandle; - -@@ -90,29 +90,29 @@ static bool is_handle_valid(struct hmgrtable *table, struct d3dkmthandle h, - struct hmgrentry *entry; - - if (index >= table->table_size) { -- DXG_ERR("Invalid index %x %d", h.v, index); -+ DXG_TRACE("Invalid index %x %d", h.v, index); - return false; - } - - entry = &table->entry_table[index]; - if (unique != entry->unique) { -- DXG_ERR("Invalid unique %x %d %d %d %p", -+ DXG_TRACE("Invalid unique %x %d %d %d %p", - h.v, unique, entry->unique, index, entry->object); - return false; - } - - if (entry->destroyed && !ignore_destroyed) { -- DXG_ERR("Invalid destroyed value"); -+ DXG_TRACE("Invalid destroyed value"); - return false; - } - - if (entry->type == HMGRENTRY_TYPE_FREE) { -- DXG_ERR("Entry is freed %x %d", h.v, index); -+ DXG_TRACE("Entry is freed %x %d", h.v, index); - return false; - } - - if (t != HMGRENTRY_TYPE_FREE && t != entry->type) { -- DXG_ERR("type mismatch %x %d %d", h.v, t, entry->type); -+ DXG_TRACE("type mismatch %x %d %d", h.v, t, entry->type); - return false; - } - -@@ -500,7 +500,7 @@ void *hmgrtable_get_object_by_type(struct hmgrtable *table, - struct d3dkmthandle h) - { - if (!is_handle_valid(table, h, false, type)) { -- DXG_ERR("Invalid handle %x", h.v); -+ DXG_TRACE("Invalid handle %x", h.v); - return NULL; - } - return table->entry_table[get_index(h)].object; -diff --git a/drivers/hv/dxgkrnl/ioctl.c b/drivers/hv/dxgkrnl/ioctl.c -index 111111111111..222222222222 100644 ---- a/drivers/hv/dxgkrnl/ioctl.c -+++ b/drivers/hv/dxgkrnl/ioctl.c -@@ -21,8 +21,8 @@ - #include "dxgvmbus.h" - #include "dxgsyncfile.h" - --#undef pr_fmt --#define pr_fmt(fmt) "dxgk: " fmt -+#undef dev_fmt -+#define dev_fmt(fmt) "dxgk: " fmt - - struct ioctl_desc { - int (*ioctl_callback)(struct dxgprocess *p, void __user *arg); -@@ -556,7 +556,7 @@ dxgkio_enum_adapters3(struct dxgprocess *process, void *__user inargs) - - cleanup: - -- DXG_TRACE("ioctl: %s %d", errorstr(ret), ret); -+ DXG_TRACE("ioctl:%s %d", errorstr(ret), ret); - return ret; - } - -@@ -5242,7 +5242,7 @@ static int dxgk_ioctl(struct file *f, unsigned int p1, unsigned long p2) - int status; - struct dxgprocess *process; - -- if (code < 1 || code >= ARRAY_SIZE(ioctls)) { -+ if (code < 1 || code >= ARRAY_SIZE(ioctls)) { - DXG_ERR("bad ioctl %x %x %x %x", - code, _IOC_TYPE(p1), _IOC_SIZE(p1), _IOC_DIR(p1)); - return -ENOTTY; -diff --git a/drivers/hv/dxgkrnl/misc.c b/drivers/hv/dxgkrnl/misc.c -index 111111111111..222222222222 100644 ---- a/drivers/hv/dxgkrnl/misc.c -+++ b/drivers/hv/dxgkrnl/misc.c -@@ -18,8 +18,8 @@ - #include "dxgkrnl.h" - #include "misc.h" - --#undef pr_fmt --#define pr_fmt(fmt) "dxgk: " fmt -+#undef dev_fmt -+#define dev_fmt(fmt) "dxgk: " fmt - - u16 *wcsncpy(u16 *dest, const u16 *src, size_t n) - { --- -Armbian - diff --git a/patch/kernel/archive/wsl2-arm64-6.1/1701-drivers-hv-dxgkrnl-Implement-D3DKMTWaitSyncFile.patch b/patch/kernel/archive/wsl2-arm64-6.1/1701-drivers-hv-dxgkrnl-Implement-D3DKMTWaitSyncFile.patch deleted file mode 100644 index d2c43649bad4..000000000000 --- a/patch/kernel/archive/wsl2-arm64-6.1/1701-drivers-hv-dxgkrnl-Implement-D3DKMTWaitSyncFile.patch +++ /dev/null @@ -1,658 +0,0 @@ -From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001 -From: Iouri Tarassov -Date: Mon, 2 May 2022 11:46:48 -0700 -Subject: drivers: hv: dxgkrnl: Implement D3DKMTWaitSyncFile - -Signed-off-by: Iouri Tarassov -[kms: Forward port to v6.1] -Signed-off-by: Kelsey Steele ---- - drivers/hv/dxgkrnl/dxgkrnl.h | 11 + - drivers/hv/dxgkrnl/dxgmodule.c | 7 +- - drivers/hv/dxgkrnl/dxgprocess.c | 12 +- - drivers/hv/dxgkrnl/dxgsyncfile.c | 291 +++++++++- - drivers/hv/dxgkrnl/dxgsyncfile.h | 3 + - drivers/hv/dxgkrnl/dxgvmbus.c | 49 ++ - drivers/hv/dxgkrnl/ioctl.c | 16 +- - include/uapi/misc/d3dkmthk.h | 23 + - 8 files changed, 396 insertions(+), 16 deletions(-) - -diff --git a/drivers/hv/dxgkrnl/dxgkrnl.h b/drivers/hv/dxgkrnl/dxgkrnl.h -index 111111111111..222222222222 100644 ---- a/drivers/hv/dxgkrnl/dxgkrnl.h -+++ b/drivers/hv/dxgkrnl/dxgkrnl.h -@@ -254,6 +254,10 @@ void dxgsharedsyncobj_add_syncobj(struct dxgsharedsyncobject *sharedsyncobj, - struct dxgsyncobject *syncobj); - void dxgsharedsyncobj_remove_syncobj(struct dxgsharedsyncobject *sharedsyncobj, - struct dxgsyncobject *syncobj); -+int dxgsharedsyncobj_get_host_nt_handle(struct dxgsharedsyncobject *syncobj, -+ struct dxgprocess *process, -+ struct d3dkmthandle objecthandle); -+void dxgsharedsyncobj_put(struct dxgsharedsyncobject *syncobj); - - struct dxgsyncobject *dxgsyncobject_create(struct dxgprocess *process, - struct dxgdevice *device, -@@ -384,6 +388,8 @@ struct dxgprocess { - pid_t tgid; - /* how many time the process was opened */ - struct kref process_kref; -+ /* protects the object memory */ -+ struct kref process_mem_kref; - /* - * This handle table is used for all objects except dxgadapter - * The handle table lock order is higher than the local_handle_table -@@ -405,6 +411,7 @@ struct dxgprocess { - struct dxgprocess *dxgprocess_create(void); - void dxgprocess_destroy(struct dxgprocess *process); - void dxgprocess_release(struct kref *refcount); -+void dxgprocess_mem_release(struct kref *refcount); - int dxgprocess_open_adapter(struct dxgprocess *process, - struct dxgadapter *adapter, - struct d3dkmthandle *handle); -@@ -932,6 +939,10 @@ int dxgvmb_send_open_sync_object_nt(struct dxgprocess *process, - struct d3dkmt_opensyncobjectfromnthandle2 - *args, - struct dxgsyncobject *syncobj); -+int dxgvmb_send_open_sync_object(struct dxgprocess *process, -+ struct d3dkmthandle device, -+ struct d3dkmthandle host_shared_syncobj, -+ struct d3dkmthandle *syncobj); - int dxgvmb_send_query_alloc_residency(struct dxgprocess *process, - struct dxgadapter *adapter, - struct d3dkmt_queryallocationresidency -diff --git a/drivers/hv/dxgkrnl/dxgmodule.c b/drivers/hv/dxgkrnl/dxgmodule.c -index 111111111111..222222222222 100644 ---- a/drivers/hv/dxgkrnl/dxgmodule.c -+++ b/drivers/hv/dxgkrnl/dxgmodule.c -@@ -149,10 +149,11 @@ void dxgglobal_remove_host_event(struct dxghostevent *event) - spin_unlock_irq(&dxgglobal->host_event_list_mutex); - } - --static void signal_dma_fence(struct dxghostevent *eventhdr) -+static void dxg_signal_dma_fence(struct dxghostevent *eventhdr) - { - struct dxgsyncpoint *event = (struct dxgsyncpoint *)eventhdr; - -+ DXG_TRACE("syncpoint: %px, fence: %lld", event, event->fence_value); - event->fence_value++; - list_del(&eventhdr->host_event_list_entry); - dma_fence_signal(&event->base); -@@ -198,7 +199,7 @@ void dxgglobal_signal_host_event(u64 event_id) - if (event->event_type == dxghostevent_cpu_event) - signal_host_cpu_event(event); - else if (event->event_type == dxghostevent_dma_fence) -- signal_dma_fence(event); -+ dxg_signal_dma_fence(event); - else - DXG_ERR("Unknown host event type"); - break; -@@ -355,6 +356,7 @@ static struct dxgprocess *dxgglobal_get_current_process(void) - if (entry->tgid == current->tgid) { - if (kref_get_unless_zero(&entry->process_kref)) { - process = entry; -+ kref_get(&entry->process_mem_kref); - DXG_TRACE("found dxgprocess"); - } else { - DXG_TRACE("process is destroyed"); -@@ -405,6 +407,7 @@ static int dxgk_release(struct inode *n, struct file *f) - return -EINVAL; - - kref_put(&process->process_kref, dxgprocess_release); -+ kref_put(&process->process_mem_kref, dxgprocess_mem_release); - - f->private_data = NULL; - return 0; -diff --git a/drivers/hv/dxgkrnl/dxgprocess.c b/drivers/hv/dxgkrnl/dxgprocess.c -index 111111111111..222222222222 100644 ---- a/drivers/hv/dxgkrnl/dxgprocess.c -+++ b/drivers/hv/dxgkrnl/dxgprocess.c -@@ -39,6 +39,7 @@ struct dxgprocess *dxgprocess_create(void) - } else { - INIT_LIST_HEAD(&process->plistentry); - kref_init(&process->process_kref); -+ kref_init(&process->process_mem_kref); - - mutex_lock(&dxgglobal->plistmutex); - list_add_tail(&process->plistentry, -@@ -117,8 +118,17 @@ void dxgprocess_release(struct kref *refcount) - - dxgprocess_destroy(process); - -- if (process->host_handle.v) -+ if (process->host_handle.v) { - dxgvmb_send_destroy_process(process->host_handle); -+ process->host_handle.v = 0; -+ } -+} -+ -+void dxgprocess_mem_release(struct kref *refcount) -+{ -+ struct dxgprocess *process; -+ -+ process = container_of(refcount, struct dxgprocess, process_mem_kref); - kfree(process); - } - -diff --git a/drivers/hv/dxgkrnl/dxgsyncfile.c b/drivers/hv/dxgkrnl/dxgsyncfile.c -index 111111111111..222222222222 100644 ---- a/drivers/hv/dxgkrnl/dxgsyncfile.c -+++ b/drivers/hv/dxgkrnl/dxgsyncfile.c -@@ -9,6 +9,20 @@ - * Dxgkrnl Graphics Driver - * Ioctl implementation - * -+ * dxgsyncpoint: -+ * - pointer to dxgsharedsyncobject -+ * - host_shared_handle_nt_reference incremented -+ * - list of (process, local syncobj d3dkmthandle) pairs -+ * wait for sync file -+ * - get dxgsyncpoint -+ * - if process doesn't have a local syncobj -+ * - create local dxgsyncobject -+ * - send open syncobj to the host -+ * - Send wait for syncobj to the context -+ * dxgsyncpoint destruction -+ * - walk the list of (process, local syncobj) -+ * - destroy syncobj -+ * - remove reference to dxgsharedsyncobject - */ - - #include -@@ -45,12 +59,15 @@ int dxgkio_create_sync_file(struct dxgprocess *process, void *__user inargs) - struct d3dkmt_createsyncfile args; - struct dxgsyncpoint *pt = NULL; - int ret = 0; -- int fd = get_unused_fd_flags(O_CLOEXEC); -+ int fd; - struct sync_file *sync_file = NULL; - struct dxgdevice *device = NULL; - struct dxgadapter *adapter = NULL; -+ struct dxgsyncobject *syncobj = NULL; - struct d3dkmt_waitforsynchronizationobjectfromcpu waitargs = {}; -+ bool device_lock_acquired = false; - -+ fd = get_unused_fd_flags(O_CLOEXEC); - if (fd < 0) { - DXG_ERR("get_unused_fd_flags failed: %d", fd); - ret = fd; -@@ -74,9 +91,9 @@ int dxgkio_create_sync_file(struct dxgprocess *process, void *__user inargs) - ret = dxgdevice_acquire_lock_shared(device); - if (ret < 0) { - DXG_ERR("dxgdevice_acquire_lock_shared failed"); -- device = NULL; - goto cleanup; - } -+ device_lock_acquired = true; - - adapter = device->adapter; - ret = dxgadapter_acquire_lock_shared(adapter); -@@ -109,6 +126,30 @@ int dxgkio_create_sync_file(struct dxgprocess *process, void *__user inargs) - } - dma_fence_put(&pt->base); - -+ hmgrtable_lock(&process->handle_table, DXGLOCK_SHARED); -+ syncobj = hmgrtable_get_object(&process->handle_table, -+ args.monitored_fence); -+ if (syncobj == NULL) { -+ DXG_ERR("invalid syncobj handle %x", args.monitored_fence.v); -+ ret = -EINVAL; -+ } else { -+ if (syncobj->shared) { -+ kref_get(&syncobj->syncobj_kref); -+ pt->shared_syncobj = syncobj->shared_owner; -+ } -+ } -+ hmgrtable_unlock(&process->handle_table, DXGLOCK_SHARED); -+ -+ if (pt->shared_syncobj) { -+ ret = dxgsharedsyncobj_get_host_nt_handle(pt->shared_syncobj, -+ process, -+ args.monitored_fence); -+ if (ret) -+ pt->shared_syncobj = NULL; -+ } -+ if (ret) -+ goto cleanup; -+ - waitargs.device = args.device; - waitargs.object_count = 1; - waitargs.objects = &args.monitored_fence; -@@ -132,10 +173,15 @@ int dxgkio_create_sync_file(struct dxgprocess *process, void *__user inargs) - fd_install(fd, sync_file->file); - - cleanup: -+ if (syncobj && syncobj->shared) -+ kref_put(&syncobj->syncobj_kref, dxgsyncobject_release); - if (adapter) - dxgadapter_release_lock_shared(adapter); -- if (device) -- dxgdevice_release_lock_shared(device); -+ if (device) { -+ if (device_lock_acquired) -+ dxgdevice_release_lock_shared(device); -+ kref_put(&device->device_kref, dxgdevice_release); -+ } - if (ret) { - if (sync_file) { - fput(sync_file->file); -@@ -151,6 +197,228 @@ int dxgkio_create_sync_file(struct dxgprocess *process, void *__user inargs) - return ret; - } - -+int dxgkio_open_syncobj_from_syncfile(struct dxgprocess *process, -+ void *__user inargs) -+{ -+ struct d3dkmt_opensyncobjectfromsyncfile args; -+ int ret = 0; -+ struct dxgsyncpoint *pt = NULL; -+ struct dma_fence *dmafence = NULL; -+ struct dxgdevice *device = NULL; -+ struct dxgadapter *adapter = NULL; -+ struct dxgsyncobject *syncobj = NULL; -+ struct d3dddi_synchronizationobject_flags flags = { }; -+ struct d3dkmt_opensyncobjectfromnthandle2 openargs = { }; -+ struct dxgglobal *dxgglobal = dxggbl(); -+ -+ ret = copy_from_user(&args, inargs, sizeof(args)); -+ if (ret) { -+ DXG_ERR("failed to copy input args"); -+ ret = -EFAULT; -+ goto cleanup; -+ } -+ -+ dmafence = sync_file_get_fence(args.sync_file_handle); -+ if (dmafence == NULL) { -+ DXG_ERR("failed to get dmafence from handle: %llx", -+ args.sync_file_handle); -+ ret = -EINVAL; -+ goto cleanup; -+ } -+ pt = to_syncpoint(dmafence); -+ if (pt->shared_syncobj == NULL) { -+ DXG_ERR("Sync object is not shared"); -+ goto cleanup; -+ } -+ -+ device = dxgprocess_device_by_handle(process, args.device); -+ if (device == NULL) { -+ DXG_ERR("dxgprocess_device_by_handle failed"); -+ ret = -EINVAL; -+ goto cleanup; -+ } -+ -+ ret = dxgdevice_acquire_lock_shared(device); -+ if (ret < 0) { -+ DXG_ERR("dxgdevice_acquire_lock_shared failed"); -+ kref_put(&device->device_kref, dxgdevice_release); -+ device = NULL; -+ goto cleanup; -+ } -+ -+ adapter = device->adapter; -+ ret = dxgadapter_acquire_lock_shared(adapter); -+ if (ret < 0) { -+ DXG_ERR("dxgadapter_acquire_lock_shared failed"); -+ adapter = NULL; -+ goto cleanup; -+ } -+ -+ flags.shared = 1; -+ flags.nt_security_sharing = 1; -+ syncobj = dxgsyncobject_create(process, device, adapter, -+ _D3DDDI_MONITORED_FENCE, flags); -+ if (syncobj == NULL) { -+ DXG_ERR("failed to create sync object"); -+ ret = -ENOMEM; -+ goto cleanup; -+ } -+ dxgsharedsyncobj_add_syncobj(pt->shared_syncobj, syncobj); -+ -+ /* Open the shared syncobj to get a local handle */ -+ -+ openargs.device = device->handle; -+ openargs.flags.shared = 1; -+ openargs.flags.nt_security_sharing = 1; -+ openargs.flags.no_signal = 1; -+ -+ ret = dxgvmb_send_open_sync_object_nt(process, -+ &dxgglobal->channel, &openargs, syncobj); -+ if (ret) { -+ DXG_ERR("Failed to open shared syncobj on host"); -+ goto cleanup; -+ } -+ -+ hmgrtable_lock(&process->handle_table, DXGLOCK_EXCL); -+ ret = hmgrtable_assign_handle(&process->handle_table, -+ syncobj, -+ HMGRENTRY_TYPE_DXGSYNCOBJECT, -+ openargs.sync_object); -+ if (ret == 0) { -+ syncobj->handle = openargs.sync_object; -+ kref_get(&syncobj->syncobj_kref); -+ } -+ hmgrtable_unlock(&process->handle_table, DXGLOCK_EXCL); -+ -+ args.syncobj = openargs.sync_object; -+ args.fence_value = pt->fence_value; -+ args.fence_value_cpu_va = openargs.monitored_fence.fence_value_cpu_va; -+ args.fence_value_gpu_va = openargs.monitored_fence.fence_value_gpu_va; -+ -+ ret = copy_to_user(inargs, &args, sizeof(args)); -+ if (ret) { -+ DXG_ERR("failed to copy output args"); -+ ret = -EFAULT; -+ } -+ -+cleanup: -+ if (dmafence) -+ dma_fence_put(dmafence); -+ if (ret) { -+ if (syncobj) { -+ dxgsyncobject_destroy(process, syncobj); -+ kref_put(&syncobj->syncobj_kref, dxgsyncobject_release); -+ } -+ } -+ if (adapter) -+ dxgadapter_release_lock_shared(adapter); -+ if (device) { -+ dxgdevice_release_lock_shared(device); -+ kref_put(&device->device_kref, dxgdevice_release); -+ } -+ -+ DXG_TRACE("ioctl:%s %d", errorstr(ret), ret); -+ return ret; -+} -+ -+int dxgkio_wait_sync_file(struct dxgprocess *process, void *__user inargs) -+{ -+ struct d3dkmt_waitsyncfile args; -+ struct dma_fence *dmafence = NULL; -+ int ret = 0; -+ struct dxgsyncpoint *pt = NULL; -+ struct dxgdevice *device = NULL; -+ struct dxgadapter *adapter = NULL; -+ struct d3dkmthandle syncobj_handle = {}; -+ bool device_lock_acquired = false; -+ -+ ret = copy_from_user(&args, inargs, sizeof(args)); -+ if (ret) { -+ DXG_ERR("failed to copy input args"); -+ ret = -EFAULT; -+ goto cleanup; -+ } -+ -+ dmafence = sync_file_get_fence(args.sync_file_handle); -+ if (dmafence == NULL) { -+ DXG_ERR("failed to get dmafence from handle: %llx", -+ args.sync_file_handle); -+ ret = -EINVAL; -+ goto cleanup; -+ } -+ pt = to_syncpoint(dmafence); -+ -+ device = dxgprocess_device_by_object_handle(process, -+ HMGRENTRY_TYPE_DXGCONTEXT, -+ args.context); -+ if (device == NULL) { -+ ret = -EINVAL; -+ goto cleanup; -+ } -+ -+ ret = dxgdevice_acquire_lock_shared(device); -+ if (ret < 0) { -+ DXG_ERR("dxgdevice_acquire_lock_shared failed"); -+ device = NULL; -+ goto cleanup; -+ } -+ device_lock_acquired = true; -+ -+ adapter = device->adapter; -+ ret = dxgadapter_acquire_lock_shared(adapter); -+ if (ret < 0) { -+ DXG_ERR("dxgadapter_acquire_lock_shared failed"); -+ adapter = NULL; -+ goto cleanup; -+ } -+ -+ /* Open the shared syncobj to get a local handle */ -+ if (pt->shared_syncobj == NULL) { -+ DXG_ERR("Sync object is not shared"); -+ goto cleanup; -+ } -+ ret = dxgvmb_send_open_sync_object(process, -+ device->handle, -+ pt->shared_syncobj->host_shared_handle, -+ &syncobj_handle); -+ if (ret) { -+ DXG_ERR("Failed to open shared syncobj on host"); -+ goto cleanup; -+ } -+ -+ /* Ask the host to insert the syncobj to the context queue */ -+ ret = dxgvmb_send_wait_sync_object_gpu(process, adapter, -+ args.context, 1, -+ &syncobj_handle, -+ &pt->fence_value, -+ false); -+ if (ret < 0) { -+ DXG_ERR("dxgvmb_send_wait_sync_object_cpu failed"); -+ goto cleanup; -+ } -+ -+ /* -+ * Destroy the local syncobject immediately. This will not unblock -+ * GPU waiters, but will unblock CPU waiter, which includes the sync -+ * file itself. -+ */ -+ ret = dxgvmb_send_destroy_sync_object(process, syncobj_handle); -+ -+cleanup: -+ if (adapter) -+ dxgadapter_release_lock_shared(adapter); -+ if (device) { -+ if (device_lock_acquired) -+ dxgdevice_release_lock_shared(device); -+ kref_put(&device->device_kref, dxgdevice_release); -+ } -+ if (dmafence) -+ dma_fence_put(dmafence); -+ -+ DXG_TRACE("ioctl:%s %d", errorstr(ret), ret); -+ return ret; -+} -+ - static const char *dxgdmafence_get_driver_name(struct dma_fence *fence) - { - return "dxgkrnl"; -@@ -166,11 +434,16 @@ static void dxgdmafence_release(struct dma_fence *fence) - struct dxgsyncpoint *syncpoint; - - syncpoint = to_syncpoint(fence); -- if (syncpoint) { -- if (syncpoint->hdr.event_id) -- dxgglobal_get_host_event(syncpoint->hdr.event_id); -- kfree(syncpoint); -- } -+ if (syncpoint == NULL) -+ return; -+ -+ if (syncpoint->hdr.event_id) -+ dxgglobal_get_host_event(syncpoint->hdr.event_id); -+ -+ if (syncpoint->shared_syncobj) -+ dxgsharedsyncobj_put(syncpoint->shared_syncobj); -+ -+ kfree(syncpoint); - } - - static bool dxgdmafence_signaled(struct dma_fence *fence) -diff --git a/drivers/hv/dxgkrnl/dxgsyncfile.h b/drivers/hv/dxgkrnl/dxgsyncfile.h -index 111111111111..222222222222 100644 ---- a/drivers/hv/dxgkrnl/dxgsyncfile.h -+++ b/drivers/hv/dxgkrnl/dxgsyncfile.h -@@ -17,10 +17,13 @@ - #include - - int dxgkio_create_sync_file(struct dxgprocess *process, void *__user inargs); -+int dxgkio_wait_sync_file(struct dxgprocess *process, void *__user inargs); -+int dxgkio_open_syncobj_from_syncfile(struct dxgprocess *p, void *__user args); - - struct dxgsyncpoint { - struct dxghostevent hdr; - struct dma_fence base; -+ struct dxgsharedsyncobject *shared_syncobj; - u64 fence_value; - u64 context; - spinlock_t lock; -diff --git a/drivers/hv/dxgkrnl/dxgvmbus.c b/drivers/hv/dxgkrnl/dxgvmbus.c -index 111111111111..222222222222 100644 ---- a/drivers/hv/dxgkrnl/dxgvmbus.c -+++ b/drivers/hv/dxgkrnl/dxgvmbus.c -@@ -796,6 +796,55 @@ int dxgvmb_send_open_sync_object_nt(struct dxgprocess *process, - return ret; - } - -+int dxgvmb_send_open_sync_object(struct dxgprocess *process, -+ struct d3dkmthandle device, -+ struct d3dkmthandle host_shared_syncobj, -+ struct d3dkmthandle *syncobj) -+{ -+ struct dxgkvmb_command_opensyncobject *command; -+ struct dxgkvmb_command_opensyncobject_return result = { }; -+ int ret; -+ struct dxgvmbusmsg msg; -+ struct dxgglobal *dxgglobal = dxggbl(); -+ -+ ret = init_message(&msg, NULL, process, sizeof(*command)); -+ if (ret) -+ return ret; -+ command = (void *)msg.msg; -+ -+ command_vm_to_host_init2(&command->hdr, DXGK_VMBCOMMAND_OPENSYNCOBJECT, -+ process->host_handle); -+ command->device = device; -+ command->global_sync_object = host_shared_syncobj; -+ command->flags.shared = 1; -+ command->flags.nt_security_sharing = 1; -+ command->flags.no_signal = 1; -+ -+ ret = dxgglobal_acquire_channel_lock(); -+ if (ret < 0) -+ goto cleanup; -+ -+ ret = dxgvmb_send_sync_msg(&dxgglobal->channel, msg.hdr, msg.size, -+ &result, sizeof(result)); -+ -+ dxgglobal_release_channel_lock(); -+ -+ if (ret < 0) -+ goto cleanup; -+ -+ ret = ntstatus2int(result.status); -+ if (ret < 0) -+ goto cleanup; -+ -+ *syncobj = result.sync_object; -+ -+cleanup: -+ free_message(&msg, process); -+ if (ret) -+ DXG_TRACE("err: %d", ret); -+ return ret; -+} -+ - int dxgvmb_send_create_nt_shared_object(struct dxgprocess *process, - struct d3dkmthandle object, - struct d3dkmthandle *shared_handle) -diff --git a/drivers/hv/dxgkrnl/ioctl.c b/drivers/hv/dxgkrnl/ioctl.c -index 111111111111..222222222222 100644 ---- a/drivers/hv/dxgkrnl/ioctl.c -+++ b/drivers/hv/dxgkrnl/ioctl.c -@@ -36,10 +36,8 @@ static char *errorstr(int ret) - } - #endif - --static int dxgsyncobj_release(struct inode *inode, struct file *file) -+void dxgsharedsyncobj_put(struct dxgsharedsyncobject *syncobj) - { -- struct dxgsharedsyncobject *syncobj = file->private_data; -- - DXG_TRACE("Release syncobj: %p", syncobj); - mutex_lock(&syncobj->fd_mutex); - kref_get(&syncobj->ssyncobj_kref); -@@ -56,6 +54,13 @@ static int dxgsyncobj_release(struct inode *inode, struct file *file) - } - mutex_unlock(&syncobj->fd_mutex); - kref_put(&syncobj->ssyncobj_kref, dxgsharedsyncobj_release); -+} -+ -+static int dxgsyncobj_release(struct inode *inode, struct file *file) -+{ -+ struct dxgsharedsyncobject *syncobj = file->private_data; -+ -+ dxgsharedsyncobj_put(syncobj); - return 0; - } - -@@ -4478,7 +4483,7 @@ dxgkio_get_device_state(struct dxgprocess *process, void *__user inargs) - return ret; - } - --static int -+int - dxgsharedsyncobj_get_host_nt_handle(struct dxgsharedsyncobject *syncobj, - struct dxgprocess *process, - struct d3dkmthandle objecthandle) -@@ -5226,6 +5231,9 @@ static struct ioctl_desc ioctls[] = { - /* 0x43 */ {dxgkio_query_statistics, LX_DXQUERYSTATISTICS}, - /* 0x44 */ {dxgkio_share_object_with_host, LX_DXSHAREOBJECTWITHHOST}, - /* 0x45 */ {dxgkio_create_sync_file, LX_DXCREATESYNCFILE}, -+/* 0x46 */ {dxgkio_wait_sync_file, LX_DXWAITSYNCFILE}, -+/* 0x46 */ {dxgkio_open_syncobj_from_syncfile, -+ LX_DXOPENSYNCOBJECTFROMSYNCFILE}, - }; - - /* -diff --git a/include/uapi/misc/d3dkmthk.h b/include/uapi/misc/d3dkmthk.h -index 111111111111..222222222222 100644 ---- a/include/uapi/misc/d3dkmthk.h -+++ b/include/uapi/misc/d3dkmthk.h -@@ -1561,6 +1561,25 @@ struct d3dkmt_createsyncfile { - __u64 sync_file_handle; /* out */ - }; - -+struct d3dkmt_waitsyncfile { -+ __u64 sync_file_handle; -+ struct d3dkmthandle context; -+ __u32 reserved; -+}; -+ -+struct d3dkmt_opensyncobjectfromsyncfile { -+ __u64 sync_file_handle; -+ struct d3dkmthandle device; -+ struct d3dkmthandle syncobj; /* out */ -+ __u64 fence_value; /* out */ -+#ifdef __KERNEL__ -+ void *fence_value_cpu_va; /* out */ -+#else -+ __u64 fence_value_cpu_va; /* out */ -+#endif -+ __u64 fence_value_gpu_va; /* out */ -+}; -+ - /* - * Dxgkrnl Graphics Port Driver ioctl definitions - * -@@ -1686,5 +1705,9 @@ struct d3dkmt_createsyncfile { - _IOWR(0x47, 0x44, struct d3dkmt_shareobjectwithhost) - #define LX_DXCREATESYNCFILE \ - _IOWR(0x47, 0x45, struct d3dkmt_createsyncfile) -+#define LX_DXWAITSYNCFILE \ -+ _IOWR(0x47, 0x46, struct d3dkmt_waitsyncfile) -+#define LX_DXOPENSYNCOBJECTFROMSYNCFILE \ -+ _IOWR(0x47, 0x47, struct d3dkmt_opensyncobjectfromsyncfile) - - #endif /* _D3DKMTHK_H */ --- -Armbian - diff --git a/patch/kernel/archive/wsl2-arm64-6.1/1702-drivers-hv-dxgkrnl-Improve-tracing-and-return-values-from-copy-from-user.patch b/patch/kernel/archive/wsl2-arm64-6.1/1702-drivers-hv-dxgkrnl-Improve-tracing-and-return-values-from-copy-from-user.patch deleted file mode 100644 index 572b86f65edd..000000000000 --- a/patch/kernel/archive/wsl2-arm64-6.1/1702-drivers-hv-dxgkrnl-Improve-tracing-and-return-values-from-copy-from-user.patch +++ /dev/null @@ -1,2000 +0,0 @@ -From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001 -From: Iouri Tarassov -Date: Fri, 6 May 2022 19:19:09 -0700 -Subject: drivers: hv: dxgkrnl: Improve tracing and return values from copy - from user - -Signed-off-by: Iouri Tarassov -[kms: Forward port to v6.1] -Signed-off-by: Kelsey Steele ---- - drivers/hv/dxgkrnl/dxgkrnl.h | 17 +- - drivers/hv/dxgkrnl/dxgmodule.c | 1 + - drivers/hv/dxgkrnl/dxgsyncfile.c | 13 +- - drivers/hv/dxgkrnl/dxgvmbus.c | 98 +-- - drivers/hv/dxgkrnl/ioctl.c | 327 +++++----- - 5 files changed, 225 insertions(+), 231 deletions(-) - -diff --git a/drivers/hv/dxgkrnl/dxgkrnl.h b/drivers/hv/dxgkrnl/dxgkrnl.h -index 111111111111..222222222222 100644 ---- a/drivers/hv/dxgkrnl/dxgkrnl.h -+++ b/drivers/hv/dxgkrnl/dxgkrnl.h -@@ -999,18 +999,25 @@ void dxgk_validate_ioctls(void); - trace_printk(dev_fmt(fmt) "\n", ##__VA_ARGS__); \ - } while (0) - --#define DXG_ERR(fmt, ...) do { \ -- dev_err(DXGDEV, fmt, ##__VA_ARGS__); \ -- trace_printk("*** dxgkerror *** " dev_fmt(fmt) "\n", ##__VA_ARGS__); \ -+#define DXG_ERR(fmt, ...) do { \ -+ dev_err(DXGDEV, "%s: " fmt, __func__, ##__VA_ARGS__); \ -+ trace_printk("*** dxgkerror *** " dev_fmt(fmt) "\n", ##__VA_ARGS__); \ - } while (0) - - #else - - #define DXG_TRACE(...) --#define DXG_ERR(fmt, ...) do { \ -- dev_err(DXGDEV, fmt, ##__VA_ARGS__); \ -+#define DXG_ERR(fmt, ...) do { \ -+ dev_err(DXGDEV, "%s: " fmt, __func__, ##__VA_ARGS__); \ - } while (0) - - #endif /* DEBUG */ - -+#define DXG_TRACE_IOCTL_END(ret) do { \ -+ if (ret < 0) \ -+ DXG_ERR("Ioctl failed: %d", ret); \ -+ else \ -+ DXG_TRACE("Ioctl returned: %d", ret); \ -+} while (0) -+ - #endif -diff --git a/drivers/hv/dxgkrnl/dxgmodule.c b/drivers/hv/dxgkrnl/dxgmodule.c -index 111111111111..222222222222 100644 ---- a/drivers/hv/dxgkrnl/dxgmodule.c -+++ b/drivers/hv/dxgkrnl/dxgmodule.c -@@ -961,3 +961,4 @@ module_exit(dxg_drv_exit); - - MODULE_LICENSE("GPL"); - MODULE_DESCRIPTION("Microsoft Dxgkrnl virtual compute device Driver"); -+MODULE_VERSION("2.0.0"); -diff --git a/drivers/hv/dxgkrnl/dxgsyncfile.c b/drivers/hv/dxgkrnl/dxgsyncfile.c -index 111111111111..222222222222 100644 ---- a/drivers/hv/dxgkrnl/dxgsyncfile.c -+++ b/drivers/hv/dxgkrnl/dxgsyncfile.c -@@ -38,13 +38,6 @@ - #undef dev_fmt - #define dev_fmt(fmt) "dxgk: " fmt - --#ifdef DEBUG --static char *errorstr(int ret) --{ -- return ret < 0 ? "err" : ""; --} --#endif -- - static const struct dma_fence_ops dxgdmafence_ops; - - static struct dxgsyncpoint *to_syncpoint(struct dma_fence *fence) -@@ -193,7 +186,7 @@ int dxgkio_create_sync_file(struct dxgprocess *process, void *__user inargs) - if (fd >= 0) - put_unused_fd(fd); - } -- DXG_TRACE("ioctl:%s %d", errorstr(ret), ret); -+ DXG_TRACE_IOCTL_END(ret); - return ret; - } - -@@ -317,7 +310,7 @@ int dxgkio_open_syncobj_from_syncfile(struct dxgprocess *process, - kref_put(&device->device_kref, dxgdevice_release); - } - -- DXG_TRACE("ioctl:%s %d", errorstr(ret), ret); -+ DXG_TRACE_IOCTL_END(ret); - return ret; - } - -@@ -415,7 +408,7 @@ int dxgkio_wait_sync_file(struct dxgprocess *process, void *__user inargs) - if (dmafence) - dma_fence_put(dmafence); - -- DXG_TRACE("ioctl:%s %d", errorstr(ret), ret); -+ DXG_TRACE_IOCTL_END(ret); - return ret; - } - -diff --git a/drivers/hv/dxgkrnl/dxgvmbus.c b/drivers/hv/dxgkrnl/dxgvmbus.c -index 111111111111..222222222222 100644 ---- a/drivers/hv/dxgkrnl/dxgvmbus.c -+++ b/drivers/hv/dxgkrnl/dxgvmbus.c -@@ -1212,7 +1212,7 @@ dxgvmb_send_create_context(struct dxgadapter *adapter, - args->priv_drv_data_size); - if (ret) { - DXG_ERR("Faled to copy private data"); -- ret = -EINVAL; -+ ret = -EFAULT; - goto cleanup; - } - } -@@ -1230,7 +1230,7 @@ dxgvmb_send_create_context(struct dxgadapter *adapter, - if (ret) { - DXG_ERR( - "Faled to copy private data to user"); -- ret = -EINVAL; -+ ret = -EFAULT; - dxgvmb_send_destroy_context(adapter, process, - context); - context.v = 0; -@@ -1365,7 +1365,7 @@ copy_private_data(struct d3dkmt_createallocation *args, - args->private_runtime_data_size); - if (ret) { - DXG_ERR("failed to copy runtime data"); -- ret = -EINVAL; -+ ret = -EFAULT; - goto cleanup; - } - private_data_dest += args->private_runtime_data_size; -@@ -1385,7 +1385,7 @@ copy_private_data(struct d3dkmt_createallocation *args, - args->priv_drv_data_size); - if (ret) { - DXG_ERR("failed to copy private data"); -- ret = -EINVAL; -+ ret = -EFAULT; - goto cleanup; - } - private_data_dest += args->priv_drv_data_size; -@@ -1406,7 +1406,7 @@ copy_private_data(struct d3dkmt_createallocation *args, - input_alloc->priv_drv_data_size); - if (ret) { - DXG_ERR("failed to copy alloc data"); -- ret = -EINVAL; -+ ret = -EFAULT; - goto cleanup; - } - private_data_dest += input_alloc->priv_drv_data_size; -@@ -1658,7 +1658,7 @@ create_local_allocations(struct dxgprocess *process, - sizeof(struct d3dkmthandle)); - if (ret) { - DXG_ERR("failed to copy resource handle"); -- ret = -EINVAL; -+ ret = -EFAULT; - goto cleanup; - } - } -@@ -1690,7 +1690,7 @@ create_local_allocations(struct dxgprocess *process, - host_alloc->priv_drv_data_size); - if (ret) { - DXG_ERR("failed to copy private data"); -- ret = -EINVAL; -+ ret = -EFAULT; - goto cleanup; - } - alloc_private_data += host_alloc->priv_drv_data_size; -@@ -1700,7 +1700,7 @@ create_local_allocations(struct dxgprocess *process, - sizeof(struct d3dkmthandle)); - if (ret) { - DXG_ERR("failed to copy alloc handle"); -- ret = -EINVAL; -+ ret = -EFAULT; - goto cleanup; - } - } -@@ -1714,7 +1714,7 @@ create_local_allocations(struct dxgprocess *process, - sizeof(struct d3dkmthandle)); - if (ret) { - DXG_ERR("failed to copy global share"); -- ret = -EINVAL; -+ ret = -EFAULT; - } - - cleanup: -@@ -1961,7 +1961,7 @@ int dxgvmb_send_query_clock_calibration(struct dxgprocess *process, - sizeof(result.clock_data)); - if (ret) { - DXG_ERR("failed to copy clock data"); -- ret = -EINVAL; -+ ret = -EFAULT; - goto cleanup; - } - ret = ntstatus2int(result.status); -@@ -2041,7 +2041,7 @@ int dxgvmb_send_query_alloc_residency(struct dxgprocess *process, - alloc_size); - if (ret) { - DXG_ERR("failed to copy alloc handles"); -- ret = -EINVAL; -+ ret = -EFAULT; - goto cleanup; - } - } -@@ -2059,7 +2059,7 @@ int dxgvmb_send_query_alloc_residency(struct dxgprocess *process, - result_allocation_size); - if (ret) { - DXG_ERR("failed to copy residency status"); -- ret = -EINVAL; -+ ret = -EFAULT; - } - - cleanup: -@@ -2105,7 +2105,7 @@ int dxgvmb_send_escape(struct dxgprocess *process, - args->priv_drv_data_size); - if (ret) { - DXG_ERR("failed to copy priv data"); -- ret = -EINVAL; -+ ret = -EFAULT; - goto cleanup; - } - } -@@ -2164,14 +2164,14 @@ int dxgvmb_send_query_vidmem_info(struct dxgprocess *process, - sizeof(output->budget)); - if (ret) { - DXG_ERR("failed to copy budget"); -- ret = -EINVAL; -+ ret = -EFAULT; - goto cleanup; - } - ret = copy_to_user(&output->current_usage, &result.current_usage, - sizeof(output->current_usage)); - if (ret) { - DXG_ERR("failed to copy current usage"); -- ret = -EINVAL; -+ ret = -EFAULT; - goto cleanup; - } - ret = copy_to_user(&output->current_reservation, -@@ -2179,7 +2179,7 @@ int dxgvmb_send_query_vidmem_info(struct dxgprocess *process, - sizeof(output->current_reservation)); - if (ret) { - DXG_ERR("failed to copy reservation"); -- ret = -EINVAL; -+ ret = -EFAULT; - goto cleanup; - } - ret = copy_to_user(&output->available_for_reservation, -@@ -2187,7 +2187,7 @@ int dxgvmb_send_query_vidmem_info(struct dxgprocess *process, - sizeof(output->available_for_reservation)); - if (ret) { - DXG_ERR("failed to copy avail reservation"); -- ret = -EINVAL; -+ ret = -EFAULT; - } - - cleanup: -@@ -2229,7 +2229,7 @@ int dxgvmb_send_get_device_state(struct dxgprocess *process, - ret = copy_to_user(output, &result.args, sizeof(result.args)); - if (ret) { - DXG_ERR("failed to copy output args"); -- ret = -EINVAL; -+ ret = -EFAULT; - } - - if (args->state_type == _D3DKMT_DEVICESTATE_EXECUTION) -@@ -2404,7 +2404,7 @@ int dxgvmb_send_make_resident(struct dxgprocess *process, - sizeof(struct d3dkmthandle)); - if (ret) { - DXG_ERR("failed to copy alloc handles"); -- ret = -EINVAL; -+ ret = -EFAULT; - goto cleanup; - } - command_vgpu_to_host_init2(&command->hdr, -@@ -2454,7 +2454,7 @@ int dxgvmb_send_evict(struct dxgprocess *process, - sizeof(struct d3dkmthandle)); - if (ret) { - DXG_ERR("failed to copy alloc handles"); -- ret = -EINVAL; -+ ret = -EFAULT; - goto cleanup; - } - command_vgpu_to_host_init2(&command->hdr, -@@ -2502,14 +2502,14 @@ int dxgvmb_send_submit_command(struct dxgprocess *process, - hbufsize); - if (ret) { - DXG_ERR(" failed to copy history buffer"); -- ret = -EINVAL; -+ ret = -EFAULT; - goto cleanup; - } - ret = copy_from_user((u8 *) &command[1] + hbufsize, - args->priv_drv_data, args->priv_drv_data_size); - if (ret) { - DXG_ERR("failed to copy history priv data"); -- ret = -EINVAL; -+ ret = -EFAULT; - goto cleanup; - } - -@@ -2671,7 +2671,7 @@ int dxgvmb_send_update_gpu_va(struct dxgprocess *process, - op_size); - if (ret) { - DXG_ERR("failed to copy operations"); -- ret = -EINVAL; -+ ret = -EFAULT; - goto cleanup; - } - -@@ -2751,7 +2751,7 @@ dxgvmb_send_create_sync_object(struct dxgprocess *process, - sizeof(u64)); - if (ret) { - DXG_ERR("failed to read fence"); -- ret = -EINVAL; -+ ret = -EFAULT; - } else { - DXG_TRACE("fence value:%lx", - value); -@@ -2820,7 +2820,7 @@ int dxgvmb_send_signal_sync_object(struct dxgprocess *process, - if (ret) { - DXG_ERR("Failed to read objects %p %d", - objects, object_size); -- ret = -EINVAL; -+ ret = -EFAULT; - goto cleanup; - } - current_pos += object_size; -@@ -2834,7 +2834,7 @@ int dxgvmb_send_signal_sync_object(struct dxgprocess *process, - if (ret) { - DXG_ERR("Failed to read contexts %p %d", - contexts, context_size); -- ret = -EINVAL; -+ ret = -EFAULT; - goto cleanup; - } - current_pos += context_size; -@@ -2844,7 +2844,7 @@ int dxgvmb_send_signal_sync_object(struct dxgprocess *process, - if (ret) { - DXG_ERR("Failed to read fences %p %d", - fences, fence_size); -- ret = -EINVAL; -+ ret = -EFAULT; - goto cleanup; - } - } -@@ -2898,7 +2898,7 @@ int dxgvmb_send_wait_sync_object_cpu(struct dxgprocess *process, - ret = copy_from_user(current_pos, args->objects, object_size); - if (ret) { - DXG_ERR("failed to copy objects"); -- ret = -EINVAL; -+ ret = -EFAULT; - goto cleanup; - } - current_pos += object_size; -@@ -2906,7 +2906,7 @@ int dxgvmb_send_wait_sync_object_cpu(struct dxgprocess *process, - fence_size); - if (ret) { - DXG_ERR("failed to copy fences"); -- ret = -EINVAL; -+ ret = -EFAULT; - goto cleanup; - } - } else { -@@ -3037,7 +3037,7 @@ int dxgvmb_send_lock2(struct dxgprocess *process, - sizeof(args->data)); - if (ret) { - DXG_ERR("failed to copy data"); -- ret = -EINVAL; -+ ret = -EFAULT; - alloc->cpu_address_refcount--; - if (alloc->cpu_address_refcount == 0) { - dxg_unmap_iospace(alloc->cpu_address, -@@ -3119,7 +3119,7 @@ int dxgvmb_send_update_alloc_property(struct dxgprocess *process, - sizeof(u64)); - if (ret1) { - DXG_ERR("failed to copy paging fence"); -- ret = -EINVAL; -+ ret = -EFAULT; - } - } - cleanup: -@@ -3204,14 +3204,14 @@ int dxgvmb_send_set_allocation_priority(struct dxgprocess *process, - alloc_size); - if (ret) { - DXG_ERR("failed to copy alloc handle"); -- ret = -EINVAL; -+ ret = -EFAULT; - goto cleanup; - } - ret = copy_from_user((u8 *) allocations + alloc_size, - args->priorities, priority_size); - if (ret) { - DXG_ERR("failed to copy alloc priority"); -- ret = -EINVAL; -+ ret = -EFAULT; - goto cleanup; - } - -@@ -3277,7 +3277,7 @@ int dxgvmb_send_get_allocation_priority(struct dxgprocess *process, - alloc_size); - if (ret) { - DXG_ERR("failed to copy alloc handles"); -- ret = -EINVAL; -+ ret = -EFAULT; - goto cleanup; - } - -@@ -3296,7 +3296,7 @@ int dxgvmb_send_get_allocation_priority(struct dxgprocess *process, - priority_size); - if (ret) { - DXG_ERR("failed to copy priorities"); -- ret = -EINVAL; -+ ret = -EFAULT; - } - - cleanup: -@@ -3402,7 +3402,7 @@ int dxgvmb_send_offer_allocations(struct dxgprocess *process, - } - if (ret) { - DXG_ERR("failed to copy input handles"); -- ret = -EINVAL; -+ ret = -EFAULT; - goto cleanup; - } - -@@ -3457,7 +3457,7 @@ int dxgvmb_send_reclaim_allocations(struct dxgprocess *process, - } - if (ret) { - DXG_ERR("failed to copy input handles"); -- ret = -EINVAL; -+ ret = -EFAULT; - goto cleanup; - } - -@@ -3469,7 +3469,7 @@ int dxgvmb_send_reclaim_allocations(struct dxgprocess *process, - &result->paging_fence_value, sizeof(u64)); - if (ret) { - DXG_ERR("failed to copy paging fence"); -- ret = -EINVAL; -+ ret = -EFAULT; - goto cleanup; - } - -@@ -3480,7 +3480,7 @@ int dxgvmb_send_reclaim_allocations(struct dxgprocess *process, - args->allocation_count); - if (ret) { - DXG_ERR("failed to copy results"); -- ret = -EINVAL; -+ ret = -EFAULT; - } - } - -@@ -3559,7 +3559,7 @@ int dxgvmb_send_create_hwqueue(struct dxgprocess *process, - args->priv_drv_data_size); - if (ret) { - DXG_ERR("failed to copy private data"); -- ret = -EINVAL; -+ ret = -EFAULT; - goto cleanup; - } - } -@@ -3604,7 +3604,7 @@ int dxgvmb_send_create_hwqueue(struct dxgprocess *process, - sizeof(struct d3dkmthandle)); - if (ret) { - DXG_ERR("failed to copy hwqueue handle"); -- ret = -EINVAL; -+ ret = -EFAULT; - goto cleanup; - } - ret = copy_to_user(&inargs->queue_progress_fence, -@@ -3612,7 +3612,7 @@ int dxgvmb_send_create_hwqueue(struct dxgprocess *process, - sizeof(struct d3dkmthandle)); - if (ret) { - DXG_ERR("failed to progress fence"); -- ret = -EINVAL; -+ ret = -EFAULT; - goto cleanup; - } - ret = copy_to_user(&inargs->queue_progress_fence_cpu_va, -@@ -3620,7 +3620,7 @@ int dxgvmb_send_create_hwqueue(struct dxgprocess *process, - sizeof(inargs->queue_progress_fence_cpu_va)); - if (ret) { - DXG_ERR("failed to copy fence cpu va"); -- ret = -EINVAL; -+ ret = -EFAULT; - goto cleanup; - } - ret = copy_to_user(&inargs->queue_progress_fence_gpu_va, -@@ -3628,7 +3628,7 @@ int dxgvmb_send_create_hwqueue(struct dxgprocess *process, - sizeof(u64)); - if (ret) { - DXG_ERR("failed to copy fence gpu va"); -- ret = -EINVAL; -+ ret = -EFAULT; - goto cleanup; - } - if (args->priv_drv_data_size) { -@@ -3637,7 +3637,7 @@ int dxgvmb_send_create_hwqueue(struct dxgprocess *process, - args->priv_drv_data_size); - if (ret) { - DXG_ERR("failed to copy private data"); -- ret = -EINVAL; -+ ret = -EFAULT; - } - } - -@@ -3706,7 +3706,7 @@ int dxgvmb_send_query_adapter_info(struct dxgprocess *process, - args->private_data, args->private_data_size); - if (ret) { - DXG_ERR("Faled to copy private data"); -- ret = -EINVAL; -+ ret = -EFAULT; - goto cleanup; - } - -@@ -3758,7 +3758,7 @@ int dxgvmb_send_query_adapter_info(struct dxgprocess *process, - args->private_data_size); - if (ret) { - DXG_ERR("Faled to copy private data to user"); -- ret = -EINVAL; -+ ret = -EFAULT; - } - - cleanup: -@@ -3791,7 +3791,7 @@ int dxgvmb_send_submit_command_hwqueue(struct dxgprocess *process, - primaries_size); - if (ret) { - DXG_ERR("failed to copy primaries handles"); -- ret = -EINVAL; -+ ret = -EFAULT; - goto cleanup; - } - } -@@ -3801,7 +3801,7 @@ int dxgvmb_send_submit_command_hwqueue(struct dxgprocess *process, - args->priv_drv_data_size); - if (ret) { - DXG_ERR("failed to copy primaries data"); -- ret = -EINVAL; -+ ret = -EFAULT; - goto cleanup; - } - } -diff --git a/drivers/hv/dxgkrnl/ioctl.c b/drivers/hv/dxgkrnl/ioctl.c -index 111111111111..222222222222 100644 ---- a/drivers/hv/dxgkrnl/ioctl.c -+++ b/drivers/hv/dxgkrnl/ioctl.c -@@ -29,13 +29,6 @@ struct ioctl_desc { - u32 ioctl; - }; - --#ifdef DEBUG --static char *errorstr(int ret) --{ -- return ret < 0 ? "err" : ""; --} --#endif -- - void dxgsharedsyncobj_put(struct dxgsharedsyncobject *syncobj) - { - DXG_TRACE("Release syncobj: %p", syncobj); -@@ -108,7 +101,7 @@ static int dxgkio_open_adapter_from_luid(struct dxgprocess *process, - ret = copy_from_user(&args, inargs, sizeof(args)); - if (ret) { - DXG_ERR("Faled to copy input args"); -- ret = -EINVAL; -+ ret = -EFAULT; - goto cleanup; - } - -@@ -129,7 +122,7 @@ static int dxgkio_open_adapter_from_luid(struct dxgprocess *process, - &args.adapter_handle, - sizeof(struct d3dkmthandle)); - if (ret) -- ret = -EINVAL; -+ ret = -EFAULT; - } - adapter = entry; - } -@@ -150,7 +143,7 @@ static int dxgkio_open_adapter_from_luid(struct dxgprocess *process, - if (ret < 0) - dxgprocess_close_adapter(process, args.adapter_handle); - -- DXG_TRACE("ioctl:%s %d", errorstr(ret), ret); -+ DXG_TRACE_IOCTL_END(ret); - return ret; - } - -@@ -173,7 +166,7 @@ static int dxgkio_query_statistics(struct dxgprocess *process, - ret = copy_from_user(args, inargs, sizeof(*args)); - if (ret) { - DXG_ERR("failed to copy input args"); -- ret = -EINVAL; -+ ret = -EFAULT; - goto cleanup; - } - -@@ -199,7 +192,7 @@ static int dxgkio_query_statistics(struct dxgprocess *process, - ret = copy_to_user(inargs, args, sizeof(*args)); - if (ret) { - DXG_ERR("failed to copy args"); -- ret = -EINVAL; -+ ret = -EFAULT; - } - } - dxgadapter_release_lock_shared(adapter); -@@ -209,7 +202,7 @@ static int dxgkio_query_statistics(struct dxgprocess *process, - if (args) - vfree(args); - -- DXG_TRACE("ioctl:%s %d", errorstr(ret), ret); -+ DXG_TRACE_IOCTL_END(ret); - return ret; - } - -@@ -233,7 +226,7 @@ dxgkp_enum_adapters(struct dxgprocess *process, - &dxgglobal->num_adapters, sizeof(u32)); - if (ret) { - DXG_ERR("copy_to_user faled"); -- ret = -EINVAL; -+ ret = -EFAULT; - } - goto cleanup; - } -@@ -291,7 +284,7 @@ dxgkp_enum_adapters(struct dxgprocess *process, - &dxgglobal->num_adapters, sizeof(u32)); - if (ret) { - DXG_ERR("copy_to_user failed"); -- ret = -EINVAL; -+ ret = -EFAULT; - } - goto cleanup; - } -@@ -300,13 +293,13 @@ dxgkp_enum_adapters(struct dxgprocess *process, - sizeof(adapter_count)); - if (ret) { - DXG_ERR("failed to copy adapter_count"); -- ret = -EINVAL; -+ ret = -EFAULT; - goto cleanup; - } - ret = copy_to_user(info_out, info, sizeof(info[0]) * adapter_count); - if (ret) { - DXG_ERR("failed to copy adapter info"); -- ret = -EINVAL; -+ ret = -EFAULT; - } - - cleanup: -@@ -326,7 +319,7 @@ dxgkp_enum_adapters(struct dxgprocess *process, - if (adapters) - vfree(adapters); - -- DXG_TRACE("ioctl:%s %d", errorstr(ret), ret); -+ DXG_TRACE_IOCTL_END(ret); - return ret; - } - -@@ -437,7 +430,7 @@ dxgkio_enum_adapters(struct dxgprocess *process, void *__user inargs) - ret = copy_from_user(&args, inargs, sizeof(args)); - if (ret) { - DXG_ERR("failed to copy input args"); -- ret = -EINVAL; -+ ret = -EFAULT; - goto cleanup; - } - -@@ -447,7 +440,7 @@ dxgkio_enum_adapters(struct dxgprocess *process, void *__user inargs) - ret = copy_to_user(inargs, &args, sizeof(args)); - if (ret) { - DXG_ERR("failed to copy args to user"); -- ret = -EINVAL; -+ ret = -EFAULT; - } - goto cleanup; - } -@@ -508,14 +501,14 @@ dxgkio_enum_adapters(struct dxgprocess *process, void *__user inargs) - ret = copy_to_user(inargs, &args, sizeof(args)); - if (ret) { - DXG_ERR("failed to copy args to user"); -- ret = -EINVAL; -+ ret = -EFAULT; - goto cleanup; - } - ret = copy_to_user(args.adapters, info, - sizeof(info[0]) * args.num_adapters); - if (ret) { - DXG_ERR("failed to copy adapter info to user"); -- ret = -EINVAL; -+ ret = -EFAULT; - } - - cleanup: -@@ -536,7 +529,7 @@ dxgkio_enum_adapters(struct dxgprocess *process, void *__user inargs) - if (adapters) - vfree(adapters); - -- DXG_TRACE("ioctl:%s %d", errorstr(ret), ret); -+ DXG_TRACE_IOCTL_END(ret); - return ret; - } - -@@ -549,7 +542,7 @@ dxgkio_enum_adapters3(struct dxgprocess *process, void *__user inargs) - ret = copy_from_user(&args, inargs, sizeof(args)); - if (ret) { - DXG_ERR("failed to copy input args"); -- ret = -EINVAL; -+ ret = -EFAULT; - goto cleanup; - } - -@@ -561,7 +554,7 @@ dxgkio_enum_adapters3(struct dxgprocess *process, void *__user inargs) - - cleanup: - -- DXG_TRACE("ioctl:%s %d", errorstr(ret), ret); -+ DXG_TRACE_IOCTL_END(ret); - return ret; - } - -@@ -574,7 +567,7 @@ dxgkio_close_adapter(struct dxgprocess *process, void *__user inargs) - ret = copy_from_user(&args, inargs, sizeof(args)); - if (ret) { - DXG_ERR("failed to copy input args"); -- ret = -EINVAL; -+ ret = -EFAULT; - goto cleanup; - } - -@@ -584,7 +577,7 @@ dxgkio_close_adapter(struct dxgprocess *process, void *__user inargs) - - cleanup: - -- DXG_TRACE("ioctl: %s %d", errorstr(ret), ret); -+ DXG_TRACE_IOCTL_END(ret); - return ret; - } - -@@ -598,7 +591,7 @@ dxgkio_query_adapter_info(struct dxgprocess *process, void *__user inargs) - ret = copy_from_user(&args, inargs, sizeof(args)); - if (ret) { - DXG_ERR("failed to copy input args"); -- ret = -EINVAL; -+ ret = -EFAULT; - goto cleanup; - } - -@@ -630,7 +623,7 @@ dxgkio_query_adapter_info(struct dxgprocess *process, void *__user inargs) - if (adapter) - kref_put(&adapter->adapter_kref, dxgadapter_release); - -- DXG_TRACE("ioctl:%s %d", errorstr(ret), ret); -+ DXG_TRACE_IOCTL_END(ret); - return ret; - } - -@@ -647,7 +640,7 @@ dxgkio_create_device(struct dxgprocess *process, void *__user inargs) - ret = copy_from_user(&args, inargs, sizeof(args)); - if (ret) { - DXG_ERR("failed to copy input args"); -- ret = -EINVAL; -+ ret = -EFAULT; - goto cleanup; - } - -@@ -677,7 +670,7 @@ dxgkio_create_device(struct dxgprocess *process, void *__user inargs) - sizeof(struct d3dkmthandle)); - if (ret) { - DXG_ERR("failed to copy device handle"); -- ret = -EINVAL; -+ ret = -EFAULT; - goto cleanup; - } - -@@ -709,7 +702,7 @@ dxgkio_create_device(struct dxgprocess *process, void *__user inargs) - if (adapter) - kref_put(&adapter->adapter_kref, dxgadapter_release); - -- DXG_TRACE("ioctl:%s %d", errorstr(ret), ret); -+ DXG_TRACE_IOCTL_END(ret); - return ret; - } - -@@ -724,7 +717,7 @@ dxgkio_destroy_device(struct dxgprocess *process, void *__user inargs) - ret = copy_from_user(&args, inargs, sizeof(args)); - if (ret) { - DXG_ERR("failed to copy input args"); -- ret = -EINVAL; -+ ret = -EFAULT; - goto cleanup; - } - -@@ -756,7 +749,7 @@ dxgkio_destroy_device(struct dxgprocess *process, void *__user inargs) - - cleanup: - -- DXG_TRACE("ioctl:%s %d", errorstr(ret), ret); -+ DXG_TRACE_IOCTL_END(ret); - return ret; - } - -@@ -774,7 +767,7 @@ dxgkio_create_context_virtual(struct dxgprocess *process, void *__user inargs) - ret = copy_from_user(&args, inargs, sizeof(args)); - if (ret) { - DXG_ERR("failed to copy input args"); -- ret = -EINVAL; -+ ret = -EFAULT; - goto cleanup; - } - -@@ -824,7 +817,7 @@ dxgkio_create_context_virtual(struct dxgprocess *process, void *__user inargs) - sizeof(struct d3dkmthandle)); - if (ret) { - DXG_ERR("failed to copy context handle"); -- ret = -EINVAL; -+ ret = -EFAULT; - } - } else { - DXG_ERR("invalid host handle"); -@@ -851,7 +844,7 @@ dxgkio_create_context_virtual(struct dxgprocess *process, void *__user inargs) - kref_put(&device->device_kref, dxgdevice_release); - } - -- DXG_TRACE("ioctl:%s %d", errorstr(ret), ret); -+ DXG_TRACE_IOCTL_END(ret); - return ret; - } - -@@ -868,7 +861,7 @@ dxgkio_destroy_context(struct dxgprocess *process, void *__user inargs) - ret = copy_from_user(&args, inargs, sizeof(args)); - if (ret) { - DXG_ERR("failed to copy input args"); -- ret = -EINVAL; -+ ret = -EFAULT; - goto cleanup; - } - -@@ -920,7 +913,7 @@ dxgkio_destroy_context(struct dxgprocess *process, void *__user inargs) - if (device) - kref_put(&device->device_kref, dxgdevice_release); - -- DXG_TRACE("ioctl:%s %s %d", errorstr(ret), __func__, ret); -+ DXG_TRACE_IOCTL_END(ret); - return ret; - } - -@@ -938,7 +931,7 @@ dxgkio_create_hwqueue(struct dxgprocess *process, void *__user inargs) - ret = copy_from_user(&args, inargs, sizeof(args)); - if (ret) { - DXG_ERR("failed to copy input args"); -- ret = -EINVAL; -+ ret = -EFAULT; - goto cleanup; - } - -@@ -1002,7 +995,7 @@ dxgkio_create_hwqueue(struct dxgprocess *process, void *__user inargs) - if (device) - kref_put(&device->device_kref, dxgdevice_release); - -- DXG_TRACE("ioctl:%s %d", errorstr(ret), ret); -+ DXG_TRACE_IOCTL_END(ret); - return ret; - } - -@@ -1019,7 +1012,7 @@ static int dxgkio_destroy_hwqueue(struct dxgprocess *process, - ret = copy_from_user(&args, inargs, sizeof(args)); - if (ret) { - DXG_ERR("failed to copy input args"); -- ret = -EINVAL; -+ ret = -EFAULT; - goto cleanup; - } - -@@ -1070,7 +1063,7 @@ static int dxgkio_destroy_hwqueue(struct dxgprocess *process, - if (device) - kref_put(&device->device_kref, dxgdevice_release); - -- DXG_TRACE("ioctl:%s %d", errorstr(ret), ret); -+ DXG_TRACE_IOCTL_END(ret); - return ret; - } - -@@ -1088,7 +1081,7 @@ dxgkio_create_paging_queue(struct dxgprocess *process, void *__user inargs) - ret = copy_from_user(&args, inargs, sizeof(args)); - if (ret) { - DXG_ERR("failed to copy input args"); -- ret = -EINVAL; -+ ret = -EFAULT; - goto cleanup; - } - -@@ -1128,7 +1121,7 @@ dxgkio_create_paging_queue(struct dxgprocess *process, void *__user inargs) - ret = copy_to_user(inargs, &args, sizeof(args)); - if (ret) { - DXG_ERR("failed to copy input args"); -- ret = -EINVAL; -+ ret = -EFAULT; - goto cleanup; - } - -@@ -1169,7 +1162,7 @@ dxgkio_create_paging_queue(struct dxgprocess *process, void *__user inargs) - kref_put(&device->device_kref, dxgdevice_release); - } - -- DXG_TRACE("ioctl:%s %d", errorstr(ret), ret); -+ DXG_TRACE_IOCTL_END(ret); - return ret; - } - -@@ -1186,7 +1179,7 @@ dxgkio_destroy_paging_queue(struct dxgprocess *process, void *__user inargs) - ret = copy_from_user(&args, inargs, sizeof(args)); - if (ret) { - DXG_ERR("failed to copy input args"); -- ret = -EINVAL; -+ ret = -EFAULT; - goto cleanup; - } - -@@ -1247,7 +1240,7 @@ dxgkio_destroy_paging_queue(struct dxgprocess *process, void *__user inargs) - kref_put(&device->device_kref, dxgdevice_release); - } - -- DXG_TRACE("ioctl:%s %d", errorstr(ret), ret); -+ DXG_TRACE_IOCTL_END(ret); - return ret; - } - -@@ -1351,7 +1344,7 @@ dxgkio_create_allocation(struct dxgprocess *process, void *__user inargs) - ret = copy_from_user(&args, inargs, sizeof(args)); - if (ret) { - DXG_ERR("failed to copy input args"); -- ret = -EINVAL; -+ ret = -EFAULT; - goto cleanup; - } - -@@ -1373,7 +1366,7 @@ dxgkio_create_allocation(struct dxgprocess *process, void *__user inargs) - alloc_info_size); - if (ret) { - DXG_ERR("failed to copy alloc info"); -- ret = -EINVAL; -+ ret = -EFAULT; - goto cleanup; - } - -@@ -1412,7 +1405,7 @@ dxgkio_create_allocation(struct dxgprocess *process, void *__user inargs) - sizeof(standard_alloc)); - if (ret) { - DXG_ERR("failed to copy std alloc data"); -- ret = -EINVAL; -+ ret = -EFAULT; - goto cleanup; - } - if (standard_alloc.type == -@@ -1556,7 +1549,7 @@ dxgkio_create_allocation(struct dxgprocess *process, void *__user inargs) - if (ret) { - DXG_ERR( - "failed to copy runtime data"); -- ret = -EINVAL; -+ ret = -EFAULT; - goto cleanup; - } - } -@@ -1576,7 +1569,7 @@ dxgkio_create_allocation(struct dxgprocess *process, void *__user inargs) - if (ret) { - DXG_ERR( - "failed to copy res data"); -- ret = -EINVAL; -+ ret = -EFAULT; - goto cleanup; - } - } -@@ -1733,7 +1726,7 @@ dxgkio_create_allocation(struct dxgprocess *process, void *__user inargs) - kref_put(&device->device_kref, dxgdevice_release); - } - -- DXG_TRACE("ioctl:%s %d", errorstr(ret), ret); -+ DXG_TRACE_IOCTL_END(ret); - return ret; - } - -@@ -1793,7 +1786,7 @@ dxgkio_destroy_allocation(struct dxgprocess *process, void *__user inargs) - ret = copy_from_user(&args, inargs, sizeof(args)); - if (ret) { - DXG_ERR("failed to copy input args"); -- ret = -EINVAL; -+ ret = -EFAULT; - goto cleanup; - } - -@@ -1823,7 +1816,7 @@ dxgkio_destroy_allocation(struct dxgprocess *process, void *__user inargs) - handle_size); - if (ret) { - DXG_ERR("failed to copy alloc handles"); -- ret = -EINVAL; -+ ret = -EFAULT; - goto cleanup; - } - } -@@ -1962,7 +1955,7 @@ dxgkio_destroy_allocation(struct dxgprocess *process, void *__user inargs) - if (allocs) - vfree(allocs); - -- DXG_TRACE("ioctl:%s %d", errorstr(ret), ret); -+ DXG_TRACE_IOCTL_END(ret); - return ret; - } - -@@ -1978,7 +1971,7 @@ dxgkio_make_resident(struct dxgprocess *process, void *__user inargs) - ret = copy_from_user(&args, inargs, sizeof(args)); - if (ret) { - DXG_ERR("failed to copy input args"); -- ret = -EINVAL; -+ ret = -EFAULT; - goto cleanup; - } - -@@ -2022,7 +2015,7 @@ dxgkio_make_resident(struct dxgprocess *process, void *__user inargs) - &args.paging_fence_value, sizeof(u64)); - if (ret2) { - DXG_ERR("failed to copy paging fence"); -- ret = -EINVAL; -+ ret = -EFAULT; - goto cleanup; - } - -@@ -2030,7 +2023,7 @@ dxgkio_make_resident(struct dxgprocess *process, void *__user inargs) - &args.num_bytes_to_trim, sizeof(u64)); - if (ret2) { - DXG_ERR("failed to copy bytes to trim"); -- ret = -EINVAL; -+ ret = -EFAULT; - goto cleanup; - } - -@@ -2041,7 +2034,7 @@ dxgkio_make_resident(struct dxgprocess *process, void *__user inargs) - if (device) - kref_put(&device->device_kref, dxgdevice_release); - -- DXG_TRACE("ioctl:%s %d", errorstr(ret), ret); -+ DXG_TRACE_IOCTL_END(ret); - - return ret; - } -@@ -2058,7 +2051,7 @@ dxgkio_evict(struct dxgprocess *process, void *__user inargs) - ret = copy_from_user(&args, inargs, sizeof(args)); - if (ret) { - DXG_ERR("failed to copy input args"); -- ret = -EINVAL; -+ ret = -EFAULT; - goto cleanup; - } - -@@ -2090,7 +2083,7 @@ dxgkio_evict(struct dxgprocess *process, void *__user inargs) - &args.num_bytes_to_trim, sizeof(u64)); - if (ret) { - DXG_ERR("failed to copy bytes to trim to user"); -- ret = -EINVAL; -+ ret = -EFAULT; - } - cleanup: - -@@ -2099,7 +2092,7 @@ dxgkio_evict(struct dxgprocess *process, void *__user inargs) - if (device) - kref_put(&device->device_kref, dxgdevice_release); - -- DXG_TRACE("ioctl:%s %d", errorstr(ret), ret); -+ DXG_TRACE_IOCTL_END(ret); - return ret; - } - -@@ -2114,7 +2107,7 @@ dxgkio_offer_allocations(struct dxgprocess *process, void *__user inargs) - ret = copy_from_user(&args, inargs, sizeof(args)); - if (ret) { - DXG_ERR("failed to copy input args"); -- ret = -EINVAL; -+ ret = -EFAULT; - goto cleanup; - } - -@@ -2153,7 +2146,7 @@ dxgkio_offer_allocations(struct dxgprocess *process, void *__user inargs) - if (device) - kref_put(&device->device_kref, dxgdevice_release); - -- DXG_TRACE("ioctl:%s %d", errorstr(ret), ret); -+ DXG_TRACE_IOCTL_END(ret); - return ret; - } - -@@ -2169,7 +2162,7 @@ dxgkio_reclaim_allocations(struct dxgprocess *process, void *__user inargs) - ret = copy_from_user(&args, inargs, sizeof(args)); - if (ret) { - DXG_ERR("failed to copy input args"); -- ret = -EINVAL; -+ ret = -EFAULT; - goto cleanup; - } - -@@ -2212,7 +2205,7 @@ dxgkio_reclaim_allocations(struct dxgprocess *process, void *__user inargs) - if (device) - kref_put(&device->device_kref, dxgdevice_release); - -- DXG_TRACE("ioctl:%s %d", errorstr(ret), ret); -+ DXG_TRACE_IOCTL_END(ret); - return ret; - } - -@@ -2227,7 +2220,7 @@ dxgkio_submit_command(struct dxgprocess *process, void *__user inargs) - ret = copy_from_user(&args, inargs, sizeof(args)); - if (ret) { - DXG_ERR("failed to copy input args"); -- ret = -EINVAL; -+ ret = -EFAULT; - goto cleanup; - } - -@@ -2280,7 +2273,7 @@ dxgkio_submit_command(struct dxgprocess *process, void *__user inargs) - if (device) - kref_put(&device->device_kref, dxgdevice_release); - -- DXG_TRACE("ioctl:%s %d", errorstr(ret), ret); -+ DXG_TRACE_IOCTL_END(ret); - return ret; - } - -@@ -2296,7 +2289,7 @@ dxgkio_submit_command_to_hwqueue(struct dxgprocess *process, - ret = copy_from_user(&args, inargs, sizeof(args)); - if (ret) { - DXG_ERR("failed to copy input args"); -- ret = -EINVAL; -+ ret = -EFAULT; - goto cleanup; - } - -@@ -2336,7 +2329,7 @@ dxgkio_submit_command_to_hwqueue(struct dxgprocess *process, - if (device) - kref_put(&device->device_kref, dxgdevice_release); - -- DXG_TRACE("ioctl:%s %d", errorstr(ret), ret); -+ DXG_TRACE_IOCTL_END(ret); - return ret; - } - -@@ -2352,7 +2345,7 @@ dxgkio_submit_signal_to_hwqueue(struct dxgprocess *process, void *__user inargs) - ret = copy_from_user(&args, inargs, sizeof(args)); - if (ret) { - DXG_ERR("failed to copy input args"); -- ret = -EINVAL; -+ ret = -EFAULT; - goto cleanup; - } - -@@ -2376,7 +2369,7 @@ dxgkio_submit_signal_to_hwqueue(struct dxgprocess *process, void *__user inargs) - sizeof(struct d3dkmthandle)); - if (ret) { - DXG_ERR("failed to copy hwqueue handle"); -- ret = -EINVAL; -+ ret = -EFAULT; - goto cleanup; - } - -@@ -2410,7 +2403,7 @@ dxgkio_submit_signal_to_hwqueue(struct dxgprocess *process, void *__user inargs) - if (device) - kref_put(&device->device_kref, dxgdevice_release); - -- DXG_TRACE("ioctl:%s %d", errorstr(ret), ret); -+ DXG_TRACE_IOCTL_END(ret); - return ret; - } - -@@ -2428,7 +2421,7 @@ dxgkio_submit_wait_to_hwqueue(struct dxgprocess *process, void *__user inargs) - ret = copy_from_user(&args, inargs, sizeof(args)); - if (ret) { - DXG_ERR("failed to copy input args"); -- ret = -EINVAL; -+ ret = -EFAULT; - goto cleanup; - } - -@@ -2447,7 +2440,7 @@ dxgkio_submit_wait_to_hwqueue(struct dxgprocess *process, void *__user inargs) - ret = copy_from_user(objects, args.objects, object_size); - if (ret) { - DXG_ERR("failed to copy objects"); -- ret = -EINVAL; -+ ret = -EFAULT; - goto cleanup; - } - -@@ -2460,7 +2453,7 @@ dxgkio_submit_wait_to_hwqueue(struct dxgprocess *process, void *__user inargs) - ret = copy_from_user(fences, args.fence_values, object_size); - if (ret) { - DXG_ERR("failed to copy fence values"); -- ret = -EINVAL; -+ ret = -EFAULT; - goto cleanup; - } - -@@ -2494,7 +2487,7 @@ dxgkio_submit_wait_to_hwqueue(struct dxgprocess *process, void *__user inargs) - if (device) - kref_put(&device->device_kref, dxgdevice_release); - -- DXG_TRACE("ioctl:%s %d", errorstr(ret), ret); -+ DXG_TRACE_IOCTL_END(ret); - return ret; - } - -@@ -2510,7 +2503,7 @@ dxgkio_map_gpu_va(struct dxgprocess *process, void *__user inargs) - ret = copy_from_user(&args, inargs, sizeof(args)); - if (ret) { - DXG_ERR("failed to copy input args"); -- ret = -EINVAL; -+ ret = -EFAULT; - goto cleanup; - } - -@@ -2542,7 +2535,7 @@ dxgkio_map_gpu_va(struct dxgprocess *process, void *__user inargs) - &args.paging_fence_value, sizeof(u64)); - if (ret2) { - DXG_ERR("failed to copy paging fence to user"); -- ret = -EINVAL; -+ ret = -EFAULT; - goto cleanup; - } - -@@ -2550,7 +2543,7 @@ dxgkio_map_gpu_va(struct dxgprocess *process, void *__user inargs) - sizeof(args.virtual_address)); - if (ret2) { - DXG_ERR("failed to copy va to user"); -- ret = -EINVAL; -+ ret = -EFAULT; - goto cleanup; - } - -@@ -2561,7 +2554,7 @@ dxgkio_map_gpu_va(struct dxgprocess *process, void *__user inargs) - if (device) - kref_put(&device->device_kref, dxgdevice_release); - -- DXG_TRACE("ioctl:%s %d", errorstr(ret), ret); -+ DXG_TRACE_IOCTL_END(ret); - return ret; - } - -@@ -2577,7 +2570,7 @@ dxgkio_reserve_gpu_va(struct dxgprocess *process, void *__user inargs) - ret = copy_from_user(&args, inargs, sizeof(args)); - if (ret) { - DXG_ERR("failed to copy input args"); -- ret = -EINVAL; -+ ret = -EFAULT; - goto cleanup; - } - -@@ -2614,7 +2607,7 @@ dxgkio_reserve_gpu_va(struct dxgprocess *process, void *__user inargs) - sizeof(args.virtual_address)); - if (ret) { - DXG_ERR("failed to copy VA to user"); -- ret = -EINVAL; -+ ret = -EFAULT; - } - - cleanup: -@@ -2624,7 +2617,7 @@ dxgkio_reserve_gpu_va(struct dxgprocess *process, void *__user inargs) - kref_put(&adapter->adapter_kref, dxgadapter_release); - } - -- DXG_TRACE("ioctl:%s %d", errorstr(ret), ret); -+ DXG_TRACE_IOCTL_END(ret); - return ret; - } - -@@ -2638,7 +2631,7 @@ dxgkio_free_gpu_va(struct dxgprocess *process, void *__user inargs) - ret = copy_from_user(&args, inargs, sizeof(args)); - if (ret) { - DXG_ERR("failed to copy input args"); -- ret = -EINVAL; -+ ret = -EFAULT; - goto cleanup; - } - -@@ -2680,7 +2673,7 @@ dxgkio_update_gpu_va(struct dxgprocess *process, void *__user inargs) - ret = copy_from_user(&args, inargs, sizeof(args)); - if (ret) { - DXG_ERR("failed to copy input args"); -- ret = -EINVAL; -+ ret = -EFAULT; - goto cleanup; - } - -@@ -2705,7 +2698,7 @@ dxgkio_update_gpu_va(struct dxgprocess *process, void *__user inargs) - sizeof(args.fence_value)); - if (ret) { - DXG_ERR("failed to copy fence value to user"); -- ret = -EINVAL; -+ ret = -EFAULT; - } - - cleanup: -@@ -2734,7 +2727,7 @@ dxgkio_create_sync_object(struct dxgprocess *process, void *__user inargs) - ret = copy_from_user(&args, inargs, sizeof(args)); - if (ret) { - DXG_ERR("failed to copy input args"); -- ret = -EINVAL; -+ ret = -EFAULT; - goto cleanup; - } - -@@ -2808,7 +2801,7 @@ dxgkio_create_sync_object(struct dxgprocess *process, void *__user inargs) - ret = copy_to_user(inargs, &args, sizeof(args)); - if (ret) { - DXG_ERR("failed to copy output args"); -- ret = -EINVAL; -+ ret = -EFAULT; - goto cleanup; - } - -@@ -2842,7 +2835,7 @@ dxgkio_create_sync_object(struct dxgprocess *process, void *__user inargs) - if (device) - kref_put(&device->device_kref, dxgdevice_release); - -- DXG_TRACE("ioctl:%s %d", errorstr(ret), ret); -+ DXG_TRACE_IOCTL_END(ret); - return ret; - } - -@@ -2856,7 +2849,7 @@ dxgkio_destroy_sync_object(struct dxgprocess *process, void *__user inargs) - ret = copy_from_user(&args, inargs, sizeof(args)); - if (ret) { - DXG_ERR("failed to copy input args"); -- ret = -EINVAL; -+ ret = -EFAULT; - goto cleanup; - } - -@@ -2885,7 +2878,7 @@ dxgkio_destroy_sync_object(struct dxgprocess *process, void *__user inargs) - - cleanup: - -- DXG_TRACE("ioctl:%s %d", errorstr(ret), ret); -+ DXG_TRACE_IOCTL_END(ret); - return ret; - } - -@@ -2906,7 +2899,7 @@ dxgkio_open_sync_object_nt(struct dxgprocess *process, void *__user inargs) - ret = copy_from_user(&args, inargs, sizeof(args)); - if (ret) { - DXG_ERR("failed to copy input args"); -- ret = -EINVAL; -+ ret = -EFAULT; - goto cleanup; - } - -@@ -2995,7 +2988,7 @@ dxgkio_open_sync_object_nt(struct dxgprocess *process, void *__user inargs) - if (ret == 0) - goto success; - DXG_ERR("failed to copy output args"); -- ret = -EINVAL; -+ ret = -EFAULT; - - cleanup: - -@@ -3020,7 +3013,7 @@ dxgkio_open_sync_object_nt(struct dxgprocess *process, void *__user inargs) - if (device) - kref_put(&device->device_kref, dxgdevice_release); - -- DXG_TRACE("ioctl:%s %d", errorstr(ret), ret); -+ DXG_TRACE_IOCTL_END(ret); - return ret; - } - -@@ -3041,7 +3034,7 @@ dxgkio_signal_sync_object(struct dxgprocess *process, void *__user inargs) - ret = copy_from_user(&args, inargs, sizeof(args)); - if (ret) { - DXG_ERR("failed to copy input args"); -- ret = -EINVAL; -+ ret = -EFAULT; - goto cleanup; - } - -@@ -3129,7 +3122,7 @@ dxgkio_signal_sync_object(struct dxgprocess *process, void *__user inargs) - if (device) - kref_put(&device->device_kref, dxgdevice_release); - -- DXG_TRACE("ioctl:%s %d", errorstr(ret), ret); -+ DXG_TRACE_IOCTL_END(ret); - return ret; - } - -@@ -3144,7 +3137,7 @@ dxgkio_signal_sync_object_cpu(struct dxgprocess *process, void *__user inargs) - ret = copy_from_user(&args, inargs, sizeof(args)); - if (ret) { - DXG_ERR("failed to copy input args"); -- ret = -EINVAL; -+ ret = -EFAULT; - goto cleanup; - } - if (args.object_count == 0 || -@@ -3181,7 +3174,7 @@ dxgkio_signal_sync_object_cpu(struct dxgprocess *process, void *__user inargs) - if (device) - kref_put(&device->device_kref, dxgdevice_release); - -- DXG_TRACE("ioctl:%s %d", errorstr(ret), ret); -+ DXG_TRACE_IOCTL_END(ret); - return ret; - } - -@@ -3199,7 +3192,7 @@ dxgkio_signal_sync_object_gpu(struct dxgprocess *process, void *__user inargs) - ret = copy_from_user(&args, inargs, sizeof(args)); - if (ret) { - DXG_ERR("failed to copy input args"); -- ret = -EINVAL; -+ ret = -EFAULT; - goto cleanup; - } - -@@ -3240,7 +3233,7 @@ dxgkio_signal_sync_object_gpu(struct dxgprocess *process, void *__user inargs) - if (device) - kref_put(&device->device_kref, dxgdevice_release); - -- DXG_TRACE("ioctl:%s %d", errorstr(ret), ret); -+ DXG_TRACE_IOCTL_END(ret); - return ret; - } - -@@ -3262,7 +3255,7 @@ dxgkio_signal_sync_object_gpu2(struct dxgprocess *process, void *__user inargs) - ret = copy_from_user(&args, inargs, sizeof(args)); - if (ret) { - DXG_ERR("failed to copy input args"); -- ret = -EINVAL; -+ ret = -EFAULT; - goto cleanup; - } - -@@ -3287,7 +3280,7 @@ dxgkio_signal_sync_object_gpu2(struct dxgprocess *process, void *__user inargs) - sizeof(struct d3dkmthandle)); - if (ret) { - DXG_ERR("failed to copy context handle"); -- ret = -EINVAL; -+ ret = -EFAULT; - goto cleanup; - } - -@@ -3365,7 +3358,7 @@ dxgkio_signal_sync_object_gpu2(struct dxgprocess *process, void *__user inargs) - if (device) - kref_put(&device->device_kref, dxgdevice_release); - -- DXG_TRACE("ioctl:%s %d", errorstr(ret), ret); -+ DXG_TRACE_IOCTL_END(ret); - return ret; - } - -@@ -3380,7 +3373,7 @@ dxgkio_wait_sync_object(struct dxgprocess *process, void *__user inargs) - ret = copy_from_user(&args, inargs, sizeof(args)); - if (ret) { - DXG_ERR("failed to copy input args"); -- ret = -EINVAL; -+ ret = -EFAULT; - goto cleanup; - } - -@@ -3418,7 +3411,7 @@ dxgkio_wait_sync_object(struct dxgprocess *process, void *__user inargs) - if (device) - kref_put(&device->device_kref, dxgdevice_release); - -- DXG_TRACE("ioctl:%s %d", errorstr(ret), ret); -+ DXG_TRACE_IOCTL_END(ret); - return ret; - } - -@@ -3439,7 +3432,7 @@ dxgkio_wait_sync_object_cpu(struct dxgprocess *process, void *__user inargs) - ret = copy_from_user(&args, inargs, sizeof(args)); - if (ret) { - DXG_ERR("failed to copy input args"); -- ret = -EINVAL; -+ ret = -EFAULT; - goto cleanup; - } - -@@ -3540,7 +3533,7 @@ dxgkio_wait_sync_object_cpu(struct dxgprocess *process, void *__user inargs) - kfree(async_host_event); - } - -- DXG_TRACE("ioctl:%s %d", errorstr(ret), ret); -+ DXG_TRACE_IOCTL_END(ret); - return ret; - } - -@@ -3563,7 +3556,7 @@ dxgkio_wait_sync_object_gpu(struct dxgprocess *process, void *__user inargs) - ret = copy_from_user(&args, inargs, sizeof(args)); - if (ret) { - DXG_ERR("failed to copy input args"); -- ret = -EINVAL; -+ ret = -EFAULT; - goto cleanup; - } - -@@ -3583,7 +3576,7 @@ dxgkio_wait_sync_object_gpu(struct dxgprocess *process, void *__user inargs) - ret = copy_from_user(objects, args.objects, object_size); - if (ret) { - DXG_ERR("failed to copy objects"); -- ret = -EINVAL; -+ ret = -EFAULT; - goto cleanup; - } - -@@ -3637,7 +3630,7 @@ dxgkio_wait_sync_object_gpu(struct dxgprocess *process, void *__user inargs) - object_size); - if (ret) { - DXG_ERR("failed to copy fences"); -- ret = -EINVAL; -+ ret = -EFAULT; - goto cleanup; - } - } else { -@@ -3673,7 +3666,7 @@ dxgkio_wait_sync_object_gpu(struct dxgprocess *process, void *__user inargs) - if (fences && fences != &args.fence_value) - vfree(fences); - -- DXG_TRACE("ioctl:%s %d", errorstr(ret), ret); -+ DXG_TRACE_IOCTL_END(ret); - return ret; - } - -@@ -3690,7 +3683,7 @@ dxgkio_lock2(struct dxgprocess *process, void *__user inargs) - ret = copy_from_user(&args, inargs, sizeof(args)); - if (ret) { - DXG_ERR("failed to copy input args"); -- ret = -EINVAL; -+ ret = -EFAULT; - goto cleanup; - } - -@@ -3712,7 +3705,7 @@ dxgkio_lock2(struct dxgprocess *process, void *__user inargs) - alloc->cpu_address_refcount++; - } else { - DXG_ERR("Failed to copy cpu address"); -- ret = -EINVAL; -+ ret = -EFAULT; - } - } - } -@@ -3749,7 +3742,7 @@ dxgkio_lock2(struct dxgprocess *process, void *__user inargs) - kref_put(&device->device_kref, dxgdevice_release); - - success: -- DXG_TRACE("ioctl:%s %d", errorstr(ret), ret); -+ DXG_TRACE_IOCTL_END(ret); - return ret; - } - -@@ -3766,7 +3759,7 @@ dxgkio_unlock2(struct dxgprocess *process, void *__user inargs) - ret = copy_from_user(&args, inargs, sizeof(args)); - if (ret) { - DXG_ERR("failed to copy input args"); -- ret = -EINVAL; -+ ret = -EFAULT; - goto cleanup; - } - -@@ -3829,7 +3822,7 @@ dxgkio_unlock2(struct dxgprocess *process, void *__user inargs) - kref_put(&device->device_kref, dxgdevice_release); - - success: -- DXG_TRACE("ioctl:%s %d", errorstr(ret), ret); -+ DXG_TRACE_IOCTL_END(ret); - return ret; - } - -@@ -3844,7 +3837,7 @@ dxgkio_update_alloc_property(struct dxgprocess *process, void *__user inargs) - ret = copy_from_user(&args, inargs, sizeof(args)); - if (ret) { - DXG_ERR("failed to copy input args"); -- ret = -EINVAL; -+ ret = -EFAULT; - goto cleanup; - } - -@@ -3872,7 +3865,7 @@ dxgkio_update_alloc_property(struct dxgprocess *process, void *__user inargs) - if (device) - kref_put(&device->device_kref, dxgdevice_release); - -- DXG_TRACE("ioctl:%s %d", errorstr(ret), ret); -+ DXG_TRACE_IOCTL_END(ret); - return ret; - } - -@@ -3887,7 +3880,7 @@ dxgkio_mark_device_as_error(struct dxgprocess *process, void *__user inargs) - ret = copy_from_user(&args, inargs, sizeof(args)); - if (ret) { - DXG_ERR("failed to copy input args"); -- ret = -EINVAL; -+ ret = -EFAULT; - goto cleanup; - } - device = dxgprocess_device_by_handle(process, args.device); -@@ -3908,7 +3901,7 @@ dxgkio_mark_device_as_error(struct dxgprocess *process, void *__user inargs) - dxgadapter_release_lock_shared(adapter); - if (device) - kref_put(&device->device_kref, dxgdevice_release); -- DXG_TRACE("ioctl:%s %d", errorstr(ret), ret); -+ DXG_TRACE_IOCTL_END(ret); - return ret; - } - -@@ -3923,7 +3916,7 @@ dxgkio_query_alloc_residency(struct dxgprocess *process, void *__user inargs) - ret = copy_from_user(&args, inargs, sizeof(args)); - if (ret) { - DXG_ERR("failed to copy input args"); -- ret = -EINVAL; -+ ret = -EFAULT; - goto cleanup; - } - -@@ -3949,7 +3942,7 @@ dxgkio_query_alloc_residency(struct dxgprocess *process, void *__user inargs) - dxgadapter_release_lock_shared(adapter); - if (device) - kref_put(&device->device_kref, dxgdevice_release); -- DXG_TRACE("ioctl:%s %d", errorstr(ret), ret); -+ DXG_TRACE_IOCTL_END(ret); - return ret; - } - -@@ -3964,7 +3957,7 @@ dxgkio_set_allocation_priority(struct dxgprocess *process, void *__user inargs) - ret = copy_from_user(&args, inargs, sizeof(args)); - if (ret) { - DXG_ERR("failed to copy input args"); -- ret = -EINVAL; -+ ret = -EFAULT; - goto cleanup; - } - device = dxgprocess_device_by_handle(process, args.device); -@@ -3984,7 +3977,7 @@ dxgkio_set_allocation_priority(struct dxgprocess *process, void *__user inargs) - dxgadapter_release_lock_shared(adapter); - if (device) - kref_put(&device->device_kref, dxgdevice_release); -- DXG_TRACE("ioctl:%s %d", errorstr(ret), ret); -+ DXG_TRACE_IOCTL_END(ret); - return ret; - } - -@@ -3999,7 +3992,7 @@ dxgkio_get_allocation_priority(struct dxgprocess *process, void *__user inargs) - ret = copy_from_user(&args, inargs, sizeof(args)); - if (ret) { - DXG_ERR("failed to copy input args"); -- ret = -EINVAL; -+ ret = -EFAULT; - goto cleanup; - } - device = dxgprocess_device_by_handle(process, args.device); -@@ -4019,7 +4012,7 @@ dxgkio_get_allocation_priority(struct dxgprocess *process, void *__user inargs) - dxgadapter_release_lock_shared(adapter); - if (device) - kref_put(&device->device_kref, dxgdevice_release); -- DXG_TRACE("ioctl:%s %d", errorstr(ret), ret); -+ DXG_TRACE_IOCTL_END(ret); - return ret; - } - -@@ -4069,14 +4062,14 @@ dxgkio_set_context_scheduling_priority(struct dxgprocess *process, - ret = copy_from_user(&args, inargs, sizeof(args)); - if (ret) { - DXG_ERR("failed to copy input args"); -- ret = -EINVAL; -+ ret = -EFAULT; - goto cleanup; - } - - ret = set_context_scheduling_priority(process, args.context, - args.priority, false); - cleanup: -- DXG_TRACE("ioctl:%s %d", errorstr(ret), ret); -+ DXG_TRACE_IOCTL_END(ret); - return ret; - } - -@@ -4111,7 +4104,7 @@ get_context_scheduling_priority(struct dxgprocess *process, - ret = copy_to_user(priority, &pri, sizeof(pri)); - if (ret) { - DXG_ERR("failed to copy priority to user"); -- ret = -EINVAL; -+ ret = -EFAULT; - } - - cleanup: -@@ -4134,14 +4127,14 @@ dxgkio_get_context_scheduling_priority(struct dxgprocess *process, - ret = copy_from_user(&args, inargs, sizeof(args)); - if (ret) { - DXG_ERR("failed to copy input args"); -- ret = -EINVAL; -+ ret = -EFAULT; - goto cleanup; - } - - ret = get_context_scheduling_priority(process, args.context, - &input->priority, false); - cleanup: -- DXG_TRACE("ioctl:%s %d", errorstr(ret), ret); -+ DXG_TRACE_IOCTL_END(ret); - return ret; - } - -@@ -4155,14 +4148,14 @@ dxgkio_set_context_process_scheduling_priority(struct dxgprocess *process, - ret = copy_from_user(&args, inargs, sizeof(args)); - if (ret) { - DXG_ERR("failed to copy input args"); -- ret = -EINVAL; -+ ret = -EFAULT; - goto cleanup; - } - - ret = set_context_scheduling_priority(process, args.context, - args.priority, true); - cleanup: -- DXG_TRACE("ioctl:%s %d", errorstr(ret), ret); -+ DXG_TRACE_IOCTL_END(ret); - return ret; - } - -@@ -4176,7 +4169,7 @@ dxgkio_get_context_process_scheduling_priority(struct dxgprocess *process, - ret = copy_from_user(&args, inargs, sizeof(args)); - if (ret) { - DXG_ERR("failed to copy input args"); -- ret = -EINVAL; -+ ret = -EFAULT; - goto cleanup; - } - -@@ -4184,7 +4177,7 @@ dxgkio_get_context_process_scheduling_priority(struct dxgprocess *process, - &((struct d3dkmt_getcontextinprocessschedulingpriority *) - inargs)->priority, true); - cleanup: -- DXG_TRACE("ioctl:%s %d", errorstr(ret), ret); -+ DXG_TRACE_IOCTL_END(ret); - return ret; - } - -@@ -4199,7 +4192,7 @@ dxgkio_change_vidmem_reservation(struct dxgprocess *process, void *__user inargs - ret = copy_from_user(&args, inargs, sizeof(args)); - if (ret) { - DXG_ERR("failed to copy input args"); -- ret = -EINVAL; -+ ret = -EFAULT; - goto cleanup; - } - -@@ -4232,7 +4225,7 @@ dxgkio_change_vidmem_reservation(struct dxgprocess *process, void *__user inargs - if (adapter) - kref_put(&adapter->adapter_kref, dxgadapter_release); - -- DXG_TRACE("ioctl:%s %d", errorstr(ret), ret); -+ DXG_TRACE_IOCTL_END(ret); - return ret; - } - -@@ -4247,7 +4240,7 @@ dxgkio_query_clock_calibration(struct dxgprocess *process, void *__user inargs) - ret = copy_from_user(&args, inargs, sizeof(args)); - if (ret) { - DXG_ERR("failed to copy input args"); -- ret = -EINVAL; -+ ret = -EFAULT; - goto cleanup; - } - -@@ -4272,7 +4265,7 @@ dxgkio_query_clock_calibration(struct dxgprocess *process, void *__user inargs) - ret = copy_to_user(inargs, &args, sizeof(args)); - if (ret) { - DXG_ERR("failed to copy output args"); -- ret = -EINVAL; -+ ret = -EFAULT; - } - - cleanup: -@@ -4295,7 +4288,7 @@ dxgkio_flush_heap_transitions(struct dxgprocess *process, void *__user inargs) - ret = copy_from_user(&args, inargs, sizeof(args)); - if (ret) { - DXG_ERR("failed to copy input args"); -- ret = -EINVAL; -+ ret = -EFAULT; - goto cleanup; - } - -@@ -4319,7 +4312,7 @@ dxgkio_flush_heap_transitions(struct dxgprocess *process, void *__user inargs) - ret = copy_to_user(inargs, &args, sizeof(args)); - if (ret) { - DXG_ERR("failed to copy output args"); -- ret = -EINVAL; -+ ret = -EFAULT; - } - - cleanup: -@@ -4341,7 +4334,7 @@ dxgkio_escape(struct dxgprocess *process, void *__user inargs) - - ret = copy_from_user(&args, inargs, sizeof(args)); - if (ret) { -- ret = -EINVAL; -+ ret = -EFAULT; - goto cleanup; - } - -@@ -4367,7 +4360,7 @@ dxgkio_escape(struct dxgprocess *process, void *__user inargs) - dxgadapter_release_lock_shared(adapter); - if (adapter) - kref_put(&adapter->adapter_kref, dxgadapter_release); -- DXG_TRACE("ioctl:%s %d", errorstr(ret), ret); -+ DXG_TRACE_IOCTL_END(ret); - return ret; - } - -@@ -4382,7 +4375,7 @@ dxgkio_query_vidmem_info(struct dxgprocess *process, void *__user inargs) - ret = copy_from_user(&args, inargs, sizeof(args)); - if (ret) { - DXG_ERR("failed to copy input args"); -- ret = -EINVAL; -+ ret = -EFAULT; - goto cleanup; - } - -@@ -4432,7 +4425,7 @@ dxgkio_get_device_state(struct dxgprocess *process, void *__user inargs) - ret = copy_from_user(&args, inargs, sizeof(args)); - if (ret) { - DXG_ERR("failed to copy input args"); -- ret = -EINVAL; -+ ret = -EFAULT; - goto cleanup; - } - -@@ -4458,7 +4451,7 @@ dxgkio_get_device_state(struct dxgprocess *process, void *__user inargs) - ret = copy_to_user(inargs, &args, sizeof(args)); - if (ret) { - DXG_ERR("failed to copy args to user"); -- ret = -EINVAL; -+ ret = -EFAULT; - } - goto cleanup; - } -@@ -4590,7 +4583,7 @@ dxgkio_share_objects(struct dxgprocess *process, void *__user inargs) - ret = copy_from_user(&args, inargs, sizeof(args)); - if (ret) { - DXG_ERR("failed to copy input args"); -- ret = -EINVAL; -+ ret = -EFAULT; - goto cleanup; - } - -@@ -4610,7 +4603,7 @@ dxgkio_share_objects(struct dxgprocess *process, void *__user inargs) - ret = copy_from_user(handles, args.objects, handle_size); - if (ret) { - DXG_ERR("failed to copy object handles"); -- ret = -EINVAL; -+ ret = -EFAULT; - goto cleanup; - } - -@@ -4708,7 +4701,7 @@ dxgkio_share_objects(struct dxgprocess *process, void *__user inargs) - ret = copy_to_user(args.shared_handle, &tmp, sizeof(u64)); - if (ret) { - DXG_ERR("failed to copy shared handle"); -- ret = -EINVAL; -+ ret = -EFAULT; - } - - cleanup: -@@ -4726,7 +4719,7 @@ dxgkio_share_objects(struct dxgprocess *process, void *__user inargs) - if (resource) - kref_put(&resource->resource_kref, dxgresource_release); - -- DXG_TRACE("ioctl:%s %d", errorstr(ret), ret); -+ DXG_TRACE_IOCTL_END(ret); - return ret; - } - -@@ -4742,7 +4735,7 @@ dxgkio_query_resource_info_nt(struct dxgprocess *process, void *__user inargs) - ret = copy_from_user(&args, inargs, sizeof(args)); - if (ret) { - DXG_ERR("failed to copy input args"); -- ret = -EINVAL; -+ ret = -EFAULT; - goto cleanup; - } - -@@ -4795,7 +4788,7 @@ dxgkio_query_resource_info_nt(struct dxgprocess *process, void *__user inargs) - ret = copy_to_user(inargs, &args, sizeof(args)); - if (ret) { - DXG_ERR("failed to copy output args"); -- ret = -EINVAL; -+ ret = -EFAULT; - } - - cleanup: -@@ -4807,7 +4800,7 @@ dxgkio_query_resource_info_nt(struct dxgprocess *process, void *__user inargs) - if (device) - kref_put(&device->device_kref, dxgdevice_release); - -- DXG_TRACE("ioctl:%s %d", errorstr(ret), ret); -+ DXG_TRACE_IOCTL_END(ret); - return ret; - } - -@@ -4859,7 +4852,7 @@ assign_resource_handles(struct dxgprocess *process, - sizeof(open_alloc_info)); - if (ret) { - DXG_ERR("failed to copy alloc info"); -- ret = -EINVAL; -+ ret = -EFAULT; - goto cleanup; - } - } -@@ -5009,7 +5002,7 @@ open_resource(struct dxgprocess *process, - shared_resource->runtime_private_data_size); - if (ret) { - DXG_ERR("failed to copy runtime data"); -- ret = -EINVAL; -+ ret = -EFAULT; - goto cleanup; - } - } -@@ -5020,7 +5013,7 @@ open_resource(struct dxgprocess *process, - shared_resource->resource_private_data_size); - if (ret) { - DXG_ERR("failed to copy resource data"); -- ret = -EINVAL; -+ ret = -EFAULT; - goto cleanup; - } - } -@@ -5031,7 +5024,7 @@ open_resource(struct dxgprocess *process, - shared_resource->alloc_private_data_size); - if (ret) { - DXG_ERR("failed to copy alloc data"); -- ret = -EINVAL; -+ ret = -EFAULT; - goto cleanup; - } - } -@@ -5046,7 +5039,7 @@ open_resource(struct dxgprocess *process, - sizeof(struct d3dkmthandle)); - if (ret) { - DXG_ERR("failed to copy resource handle to user"); -- ret = -EINVAL; -+ ret = -EFAULT; - goto cleanup; - } - -@@ -5054,7 +5047,7 @@ open_resource(struct dxgprocess *process, - &args->total_priv_drv_data_size, sizeof(u32)); - if (ret) { - DXG_ERR("failed to copy total driver data size"); -- ret = -EINVAL; -+ ret = -EFAULT; - } - - cleanup: -@@ -5102,7 +5095,7 @@ dxgkio_open_resource_nt(struct dxgprocess *process, void *__user inargs) - ret = copy_from_user(&args, inargs, sizeof(args)); - if (ret) { - DXG_ERR("failed to copy input args"); -- ret = -EINVAL; -+ ret = -EFAULT; - goto cleanup; - } - -@@ -5112,7 +5105,7 @@ dxgkio_open_resource_nt(struct dxgprocess *process, void *__user inargs) - - cleanup: - -- DXG_TRACE("ioctl:%s %d", errorstr(ret), ret); -+ DXG_TRACE_IOCTL_END(ret); - return ret; - } - -@@ -5125,7 +5118,7 @@ dxgkio_share_object_with_host(struct dxgprocess *process, void *__user inargs) - ret = copy_from_user(&args, inargs, sizeof(args)); - if (ret) { - DXG_ERR("failed to copy input args"); -- ret = -EINVAL; -+ ret = -EFAULT; - goto cleanup; - } - -@@ -5138,12 +5131,12 @@ dxgkio_share_object_with_host(struct dxgprocess *process, void *__user inargs) - ret = copy_to_user(inargs, &args, sizeof(args)); - if (ret) { - DXG_ERR("failed to copy data to user"); -- ret = -EINVAL; -+ ret = -EFAULT; - } - - cleanup: - -- DXG_TRACE("ioctl:%s %d", errorstr(ret), ret); -+ DXG_TRACE_IOCTL_END(ret); - return ret; - } - --- -Armbian - diff --git a/patch/kernel/archive/wsl2-arm64-6.1/1703-drivers-hv-dxgkrnl-Fix-synchronization-locks.patch b/patch/kernel/archive/wsl2-arm64-6.1/1703-drivers-hv-dxgkrnl-Fix-synchronization-locks.patch deleted file mode 100644 index d2f13ea1b3f6..000000000000 --- a/patch/kernel/archive/wsl2-arm64-6.1/1703-drivers-hv-dxgkrnl-Fix-synchronization-locks.patch +++ /dev/null @@ -1,391 +0,0 @@ -From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001 -From: Iouri Tarassov -Date: Mon, 13 Jun 2022 14:18:10 -0700 -Subject: drivers: hv: dxgkrnl: Fix synchronization locks - -Signed-off-by: Iouri Tarassov -[kms: Forward port to v6.1] -Signed-off-by: Kelsey Steele ---- - drivers/hv/dxgkrnl/dxgadapter.c | 19 ++- - drivers/hv/dxgkrnl/dxgkrnl.h | 8 +- - drivers/hv/dxgkrnl/dxgmodule.c | 3 +- - drivers/hv/dxgkrnl/dxgprocess.c | 11 +- - drivers/hv/dxgkrnl/dxgvmbus.c | 85 +++++++--- - drivers/hv/dxgkrnl/ioctl.c | 24 ++- - drivers/hv/dxgkrnl/misc.h | 1 + - 7 files changed, 101 insertions(+), 50 deletions(-) - -diff --git a/drivers/hv/dxgkrnl/dxgadapter.c b/drivers/hv/dxgkrnl/dxgadapter.c -index 111111111111..222222222222 100644 ---- a/drivers/hv/dxgkrnl/dxgadapter.c -+++ b/drivers/hv/dxgkrnl/dxgadapter.c -@@ -136,7 +136,7 @@ void dxgadapter_release(struct kref *refcount) - struct dxgadapter *adapter; - - adapter = container_of(refcount, struct dxgadapter, adapter_kref); -- DXG_TRACE("%p", adapter); -+ DXG_TRACE("Destroying adapter: %px", adapter); - kfree(adapter); - } - -@@ -270,6 +270,8 @@ struct dxgdevice *dxgdevice_create(struct dxgadapter *adapter, - if (ret < 0) { - kref_put(&device->device_kref, dxgdevice_release); - device = NULL; -+ } else { -+ DXG_TRACE("dxgdevice created: %px", device); - } - } - return device; -@@ -413,11 +415,8 @@ void dxgdevice_destroy(struct dxgdevice *device) - - cleanup: - -- if (device->adapter) { -+ if (device->adapter) - dxgprocess_adapter_remove_device(device); -- kref_put(&device->adapter->adapter_kref, dxgadapter_release); -- device->adapter = NULL; -- } - - up_write(&device->device_lock); - -@@ -721,6 +720,8 @@ void dxgdevice_release(struct kref *refcount) - struct dxgdevice *device; - - device = container_of(refcount, struct dxgdevice, device_kref); -+ DXG_TRACE("Destroying device: %px", device); -+ kref_put(&device->adapter->adapter_kref, dxgadapter_release); - kfree(device); - } - -@@ -999,6 +1000,9 @@ void dxgpagingqueue_destroy(struct dxgpagingqueue *pqueue) - kfree(pqueue); - } - -+/* -+ * Process_adapter_mutex is held. -+ */ - struct dxgprocess_adapter *dxgprocess_adapter_create(struct dxgprocess *process, - struct dxgadapter *adapter) - { -@@ -1108,7 +1112,7 @@ int dxgprocess_adapter_add_device(struct dxgprocess *process, - - void dxgprocess_adapter_remove_device(struct dxgdevice *device) - { -- DXG_TRACE("Removing device: %p", device); -+ DXG_TRACE("Removing device: %px", device); - mutex_lock(&device->adapter_info->device_list_mutex); - if (device->device_list_entry.next) { - list_del(&device->device_list_entry); -@@ -1147,8 +1151,7 @@ void dxgsharedsyncobj_release(struct kref *refcount) - if (syncobj->adapter) { - dxgadapter_remove_shared_syncobj(syncobj->adapter, - syncobj); -- kref_put(&syncobj->adapter->adapter_kref, -- dxgadapter_release); -+ kref_put(&syncobj->adapter->adapter_kref, dxgadapter_release); - } - kfree(syncobj); - } -diff --git a/drivers/hv/dxgkrnl/dxgkrnl.h b/drivers/hv/dxgkrnl/dxgkrnl.h -index 111111111111..222222222222 100644 ---- a/drivers/hv/dxgkrnl/dxgkrnl.h -+++ b/drivers/hv/dxgkrnl/dxgkrnl.h -@@ -404,7 +404,10 @@ struct dxgprocess { - /* Handle of the corresponding objec on the host */ - struct d3dkmthandle host_handle; - -- /* List of opened adapters (dxgprocess_adapter) */ -+ /* -+ * List of opened adapters (dxgprocess_adapter). -+ * Protected by process_adapter_mutex. -+ */ - struct list_head process_adapter_list_head; - }; - -@@ -451,6 +454,8 @@ enum dxgadapter_state { - struct dxgadapter { - struct rw_semaphore core_lock; - struct kref adapter_kref; -+ /* Protects creation and destruction of dxgdevice objects */ -+ struct mutex device_creation_lock; - /* Entry in the list of adapters in dxgglobal */ - struct list_head adapter_list_entry; - /* The list of dxgprocess_adapter entries */ -@@ -997,6 +1002,7 @@ void dxgk_validate_ioctls(void); - - #define DXG_TRACE(fmt, ...) do { \ - trace_printk(dev_fmt(fmt) "\n", ##__VA_ARGS__); \ -+ dev_dbg(DXGDEV, "%s: " fmt, __func__, ##__VA_ARGS__); \ - } while (0) - - #define DXG_ERR(fmt, ...) do { \ -diff --git a/drivers/hv/dxgkrnl/dxgmodule.c b/drivers/hv/dxgkrnl/dxgmodule.c -index 111111111111..222222222222 100644 ---- a/drivers/hv/dxgkrnl/dxgmodule.c -+++ b/drivers/hv/dxgkrnl/dxgmodule.c -@@ -272,6 +272,7 @@ int dxgglobal_create_adapter(struct pci_dev *dev, guid_t *guid, - adapter->host_vgpu_luid = host_vgpu_luid; - kref_init(&adapter->adapter_kref); - init_rwsem(&adapter->core_lock); -+ mutex_init(&adapter->device_creation_lock); - - INIT_LIST_HEAD(&adapter->adapter_process_list_head); - INIT_LIST_HEAD(&adapter->shared_resource_list_head); -@@ -961,4 +962,4 @@ module_exit(dxg_drv_exit); - - MODULE_LICENSE("GPL"); - MODULE_DESCRIPTION("Microsoft Dxgkrnl virtual compute device Driver"); --MODULE_VERSION("2.0.0"); -+MODULE_VERSION("2.0.1"); -diff --git a/drivers/hv/dxgkrnl/dxgprocess.c b/drivers/hv/dxgkrnl/dxgprocess.c -index 111111111111..222222222222 100644 ---- a/drivers/hv/dxgkrnl/dxgprocess.c -+++ b/drivers/hv/dxgkrnl/dxgprocess.c -@@ -214,14 +214,15 @@ int dxgprocess_close_adapter(struct dxgprocess *process, - hmgrtable_unlock(&process->local_handle_table, DXGLOCK_EXCL); - - if (adapter) { -+ mutex_lock(&adapter->device_creation_lock); -+ dxgglobal_acquire_process_adapter_lock(); - adapter_info = dxgprocess_get_adapter_info(process, adapter); -- if (adapter_info) { -- dxgglobal_acquire_process_adapter_lock(); -+ if (adapter_info) - dxgprocess_adapter_release(adapter_info); -- dxgglobal_release_process_adapter_lock(); -- } else { -+ else - ret = -EINVAL; -- } -+ dxgglobal_release_process_adapter_lock(); -+ mutex_unlock(&adapter->device_creation_lock); - } else { - DXG_ERR("Adapter not found %x", handle.v); - ret = -EINVAL; -diff --git a/drivers/hv/dxgkrnl/dxgvmbus.c b/drivers/hv/dxgkrnl/dxgvmbus.c -index 111111111111..222222222222 100644 ---- a/drivers/hv/dxgkrnl/dxgvmbus.c -+++ b/drivers/hv/dxgkrnl/dxgvmbus.c -@@ -1573,8 +1573,27 @@ process_allocation_handles(struct dxgprocess *process, - struct dxgresource *resource) - { - int ret = 0; -- int i; -+ int i = 0; -+ int k; -+ struct dxgkvmb_command_allocinfo_return *host_alloc; - -+ /* -+ * Assign handle to the internal objects, so VM bus messages will be -+ * sent to the host to free them during object destruction. -+ */ -+ if (args->flags.create_resource) -+ resource->handle = res->resource; -+ for (i = 0; i < args->alloc_count; i++) { -+ host_alloc = &res->allocation_info[i]; -+ dxgalloc[i]->alloc_handle = host_alloc->allocation; -+ } -+ -+ /* -+ * Assign handle to the handle table. -+ * In case of a failure all handles should be freed. -+ * When the function returns, the objects could be destroyed by -+ * handle immediately. -+ */ - hmgrtable_lock(&process->handle_table, DXGLOCK_EXCL); - if (args->flags.create_resource) { - ret = hmgrtable_assign_handle(&process->handle_table, resource, -@@ -1583,14 +1602,12 @@ process_allocation_handles(struct dxgprocess *process, - if (ret < 0) { - DXG_ERR("failed to assign resource handle %x", - res->resource.v); -+ goto cleanup; - } else { -- resource->handle = res->resource; - resource->handle_valid = 1; - } - } - for (i = 0; i < args->alloc_count; i++) { -- struct dxgkvmb_command_allocinfo_return *host_alloc; -- - host_alloc = &res->allocation_info[i]; - ret = hmgrtable_assign_handle(&process->handle_table, - dxgalloc[i], -@@ -1602,9 +1619,26 @@ process_allocation_handles(struct dxgprocess *process, - args->alloc_count, i); - break; - } -- dxgalloc[i]->alloc_handle = host_alloc->allocation; - dxgalloc[i]->handle_valid = 1; - } -+ if (ret < 0) { -+ if (args->flags.create_resource) { -+ hmgrtable_free_handle(&process->handle_table, -+ HMGRENTRY_TYPE_DXGRESOURCE, -+ res->resource); -+ resource->handle_valid = 0; -+ } -+ for (k = 0; k < i; k++) { -+ host_alloc = &res->allocation_info[i]; -+ hmgrtable_free_handle(&process->handle_table, -+ HMGRENTRY_TYPE_DXGALLOCATION, -+ host_alloc->allocation); -+ dxgalloc[i]->handle_valid = 0; -+ } -+ } -+ -+cleanup: -+ - hmgrtable_unlock(&process->handle_table, DXGLOCK_EXCL); - - if (ret) -@@ -1705,18 +1739,17 @@ create_local_allocations(struct dxgprocess *process, - } - } - -- ret = process_allocation_handles(process, device, args, result, -- dxgalloc, resource); -- if (ret < 0) -- goto cleanup; -- - ret = copy_to_user(&input_args->global_share, &args->global_share, - sizeof(struct d3dkmthandle)); - if (ret) { - DXG_ERR("failed to copy global share"); - ret = -EFAULT; -+ goto cleanup; - } - -+ ret = process_allocation_handles(process, device, args, result, -+ dxgalloc, resource); -+ - cleanup: - - if (ret < 0) { -@@ -3576,22 +3609,6 @@ int dxgvmb_send_create_hwqueue(struct dxgprocess *process, - goto cleanup; - } - -- ret = hmgrtable_assign_handle_safe(&process->handle_table, hwqueue, -- HMGRENTRY_TYPE_DXGHWQUEUE, -- command->hwqueue); -- if (ret < 0) -- goto cleanup; -- -- ret = hmgrtable_assign_handle_safe(&process->handle_table, -- NULL, -- HMGRENTRY_TYPE_MONITOREDFENCE, -- command->hwqueue_progress_fence); -- if (ret < 0) -- goto cleanup; -- -- hwqueue->handle = command->hwqueue; -- hwqueue->progress_fence_sync_object = command->hwqueue_progress_fence; -- - hwqueue->progress_fence_mapped_address = - dxg_map_iospace((u64)command->hwqueue_progress_fence_cpuva, - PAGE_SIZE, PROT_READ | PROT_WRITE, true); -@@ -3641,6 +3658,22 @@ int dxgvmb_send_create_hwqueue(struct dxgprocess *process, - } - } - -+ ret = hmgrtable_assign_handle_safe(&process->handle_table, -+ NULL, -+ HMGRENTRY_TYPE_MONITOREDFENCE, -+ command->hwqueue_progress_fence); -+ if (ret < 0) -+ goto cleanup; -+ -+ hwqueue->progress_fence_sync_object = command->hwqueue_progress_fence; -+ hwqueue->handle = command->hwqueue; -+ -+ ret = hmgrtable_assign_handle_safe(&process->handle_table, hwqueue, -+ HMGRENTRY_TYPE_DXGHWQUEUE, -+ command->hwqueue); -+ if (ret < 0) -+ hwqueue->handle.v = 0; -+ - cleanup: - if (ret < 0) { - DXG_ERR("failed %x", ret); -diff --git a/drivers/hv/dxgkrnl/ioctl.c b/drivers/hv/dxgkrnl/ioctl.c -index 111111111111..222222222222 100644 ---- a/drivers/hv/dxgkrnl/ioctl.c -+++ b/drivers/hv/dxgkrnl/ioctl.c -@@ -636,6 +636,7 @@ dxgkio_create_device(struct dxgprocess *process, void *__user inargs) - struct dxgdevice *device = NULL; - struct d3dkmthandle host_device_handle = {}; - bool adapter_locked = false; -+ bool device_creation_locked = false; - - ret = copy_from_user(&args, inargs, sizeof(args)); - if (ret) { -@@ -651,6 +652,9 @@ dxgkio_create_device(struct dxgprocess *process, void *__user inargs) - goto cleanup; - } - -+ mutex_lock(&adapter->device_creation_lock); -+ device_creation_locked = true; -+ - device = dxgdevice_create(adapter, process); - if (device == NULL) { - ret = -ENOMEM; -@@ -699,6 +703,9 @@ dxgkio_create_device(struct dxgprocess *process, void *__user inargs) - if (adapter_locked) - dxgadapter_release_lock_shared(adapter); - -+ if (device_creation_locked) -+ mutex_unlock(&adapter->device_creation_lock); -+ - if (adapter) - kref_put(&adapter->adapter_kref, dxgadapter_release); - -@@ -803,22 +810,21 @@ dxgkio_create_context_virtual(struct dxgprocess *process, void *__user inargs) - host_context_handle = dxgvmb_send_create_context(adapter, - process, &args); - if (host_context_handle.v) { -- hmgrtable_lock(&process->handle_table, DXGLOCK_EXCL); -- ret = hmgrtable_assign_handle(&process->handle_table, context, -- HMGRENTRY_TYPE_DXGCONTEXT, -- host_context_handle); -- if (ret >= 0) -- context->handle = host_context_handle; -- hmgrtable_unlock(&process->handle_table, DXGLOCK_EXCL); -- if (ret < 0) -- goto cleanup; - ret = copy_to_user(&((struct d3dkmt_createcontextvirtual *) - inargs)->context, &host_context_handle, - sizeof(struct d3dkmthandle)); - if (ret) { - DXG_ERR("failed to copy context handle"); - ret = -EFAULT; -+ goto cleanup; - } -+ hmgrtable_lock(&process->handle_table, DXGLOCK_EXCL); -+ ret = hmgrtable_assign_handle(&process->handle_table, context, -+ HMGRENTRY_TYPE_DXGCONTEXT, -+ host_context_handle); -+ if (ret >= 0) -+ context->handle = host_context_handle; -+ hmgrtable_unlock(&process->handle_table, DXGLOCK_EXCL); - } else { - DXG_ERR("invalid host handle"); - ret = -EINVAL; -diff --git a/drivers/hv/dxgkrnl/misc.h b/drivers/hv/dxgkrnl/misc.h -index 111111111111..222222222222 100644 ---- a/drivers/hv/dxgkrnl/misc.h -+++ b/drivers/hv/dxgkrnl/misc.h -@@ -38,6 +38,7 @@ extern const struct d3dkmthandle zerohandle; - * core_lock (dxgadapter lock) - * device_lock (dxgdevice lock) - * process_adapter_mutex -+ * device_creation_lock in dxgadapter - * adapter_list_lock - * device_mutex (dxgglobal mutex) - */ --- -Armbian - diff --git a/patch/kernel/archive/wsl2-arm64-6.1/1704-drivers-hv-dxgkrnl-Close-shared-file-objects-in-case-of-a-failure.patch b/patch/kernel/archive/wsl2-arm64-6.1/1704-drivers-hv-dxgkrnl-Close-shared-file-objects-in-case-of-a-failure.patch deleted file mode 100644 index 3d0f8dbf3e00..000000000000 --- a/patch/kernel/archive/wsl2-arm64-6.1/1704-drivers-hv-dxgkrnl-Close-shared-file-objects-in-case-of-a-failure.patch +++ /dev/null @@ -1,80 +0,0 @@ -From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001 -From: Iouri Tarassov -Date: Tue, 28 Jun 2022 17:26:11 -0700 -Subject: drivers: hv: dxgkrnl: Close shared file objects in case of a failure - -Signed-off-by: Iouri Tarassov -[kms: Forward port to v6.1] -Signed-off-by: Kelsey Steele ---- - drivers/hv/dxgkrnl/ioctl.c | 14 +++++++--- - 1 file changed, 10 insertions(+), 4 deletions(-) - -diff --git a/drivers/hv/dxgkrnl/ioctl.c b/drivers/hv/dxgkrnl/ioctl.c -index 111111111111..222222222222 100644 ---- a/drivers/hv/dxgkrnl/ioctl.c -+++ b/drivers/hv/dxgkrnl/ioctl.c -@@ -4536,7 +4536,7 @@ enum dxg_sharedobject_type { - }; - - static int get_object_fd(enum dxg_sharedobject_type type, -- void *object, int *fdout) -+ void *object, int *fdout, struct file **filp) - { - struct file *file; - int fd; -@@ -4565,8 +4565,8 @@ static int get_object_fd(enum dxg_sharedobject_type type, - return -ENOTRECOVERABLE; - } - -- fd_install(fd, file); - *fdout = fd; -+ *filp = file; - return 0; - } - -@@ -4581,6 +4581,7 @@ dxgkio_share_objects(struct dxgprocess *process, void *__user inargs) - struct dxgsharedresource *shared_resource = NULL; - struct d3dkmthandle *handles = NULL; - int object_fd = -1; -+ struct file *filp = NULL; - void *obj = NULL; - u32 handle_size; - int ret; -@@ -4660,7 +4661,7 @@ dxgkio_share_objects(struct dxgprocess *process, void *__user inargs) - switch (object_type) { - case HMGRENTRY_TYPE_DXGSYNCOBJECT: - ret = get_object_fd(DXG_SHARED_SYNCOBJECT, shared_syncobj, -- &object_fd); -+ &object_fd, &filp); - if (ret < 0) { - DXG_ERR("get_object_fd failed for sync object"); - goto cleanup; -@@ -4675,7 +4676,7 @@ dxgkio_share_objects(struct dxgprocess *process, void *__user inargs) - break; - case HMGRENTRY_TYPE_DXGRESOURCE: - ret = get_object_fd(DXG_SHARED_RESOURCE, shared_resource, -- &object_fd); -+ &object_fd, &filp); - if (ret < 0) { - DXG_ERR("get_object_fd failed for resource"); - goto cleanup; -@@ -4708,10 +4709,15 @@ dxgkio_share_objects(struct dxgprocess *process, void *__user inargs) - if (ret) { - DXG_ERR("failed to copy shared handle"); - ret = -EFAULT; -+ goto cleanup; - } - -+ fd_install(object_fd, filp); -+ - cleanup: - if (ret < 0) { -+ if (filp) -+ fput(filp); - if (object_fd >= 0) - put_unused_fd(object_fd); - } --- -Armbian - diff --git a/patch/kernel/archive/wsl2-arm64-6.1/1705-drivers-hv-dxgkrnl-Added-missed-NULL-check-for-resource-object.patch b/patch/kernel/archive/wsl2-arm64-6.1/1705-drivers-hv-dxgkrnl-Added-missed-NULL-check-for-resource-object.patch deleted file mode 100644 index 0e0e86806c35..000000000000 --- a/patch/kernel/archive/wsl2-arm64-6.1/1705-drivers-hv-dxgkrnl-Added-missed-NULL-check-for-resource-object.patch +++ /dev/null @@ -1,51 +0,0 @@ -From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001 -From: Iouri Tarassov -Date: Wed, 29 Jun 2022 10:04:23 -0700 -Subject: drivers: hv: dxgkrnl: Added missed NULL check for resource object - -Signed-off-by: Iouri Tarassov -[kms: Forward port to v6.1] -Signed-off-by: Kelsey Steele ---- - drivers/hv/dxgkrnl/ioctl.c | 10 ++++++---- - 1 file changed, 6 insertions(+), 4 deletions(-) - -diff --git a/drivers/hv/dxgkrnl/ioctl.c b/drivers/hv/dxgkrnl/ioctl.c -index 111111111111..222222222222 100644 ---- a/drivers/hv/dxgkrnl/ioctl.c -+++ b/drivers/hv/dxgkrnl/ioctl.c -@@ -1589,7 +1589,8 @@ dxgkio_create_allocation(struct dxgprocess *process, void *__user inargs) - &process->handle_table, - HMGRENTRY_TYPE_DXGRESOURCE, - args.resource); -- kref_get(&resource->resource_kref); -+ if (resource != NULL) -+ kref_get(&resource->resource_kref); - dxgprocess_ht_lock_shared_up(process); - - if (resource == NULL || resource->device != device) { -@@ -1693,10 +1694,8 @@ dxgkio_create_allocation(struct dxgprocess *process, void *__user inargs) - &standard_alloc); - cleanup: - -- if (resource_mutex_acquired) { -+ if (resource_mutex_acquired) - mutex_unlock(&resource->resource_mutex); -- kref_put(&resource->resource_kref, dxgresource_release); -- } - if (ret < 0) { - if (dxgalloc) { - for (i = 0; i < args.alloc_count; i++) { -@@ -1727,6 +1726,9 @@ dxgkio_create_allocation(struct dxgprocess *process, void *__user inargs) - if (adapter) - dxgadapter_release_lock_shared(adapter); - -+ if (resource && !args.flags.create_resource) -+ kref_put(&resource->resource_kref, dxgresource_release); -+ - if (device) { - dxgdevice_release_lock_shared(device); - kref_put(&device->device_kref, dxgdevice_release); --- -Armbian - diff --git a/patch/kernel/archive/wsl2-arm64-6.1/1706-drivers-hv-dxgkrnl-Fixed-dxgkrnl-to-build-for-the-6.1-kernel.patch b/patch/kernel/archive/wsl2-arm64-6.1/1706-drivers-hv-dxgkrnl-Fixed-dxgkrnl-to-build-for-the-6.1-kernel.patch deleted file mode 100644 index da923d76f19a..000000000000 --- a/patch/kernel/archive/wsl2-arm64-6.1/1706-drivers-hv-dxgkrnl-Fixed-dxgkrnl-to-build-for-the-6.1-kernel.patch +++ /dev/null @@ -1,84 +0,0 @@ -From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001 -From: Iouri Tarassov -Date: Thu, 26 Jan 2023 10:49:41 -0800 -Subject: drivers: hv: dxgkrnl: Fixed dxgkrnl to build for the 6.1 kernel - -Definition for GPADL was changed from u32 to struct vmbus_gpadl. - -Signed-off-by: Iouri Tarassov ---- - drivers/hv/dxgkrnl/dxgadapter.c | 8 -------- - drivers/hv/dxgkrnl/dxgkrnl.h | 4 ---- - drivers/hv/dxgkrnl/dxgvmbus.c | 8 -------- - 3 files changed, 20 deletions(-) - -diff --git a/drivers/hv/dxgkrnl/dxgadapter.c b/drivers/hv/dxgkrnl/dxgadapter.c -index 111111111111..222222222222 100644 ---- a/drivers/hv/dxgkrnl/dxgadapter.c -+++ b/drivers/hv/dxgkrnl/dxgadapter.c -@@ -927,19 +927,11 @@ void dxgallocation_destroy(struct dxgallocation *alloc) - alloc->owner.device, - &args, &alloc->alloc_handle); - } --#ifdef _MAIN_KERNEL_ - if (alloc->gpadl.gpadl_handle) { - DXG_TRACE("Teardown gpadl %d", alloc->gpadl.gpadl_handle); - vmbus_teardown_gpadl(dxgglobal_get_vmbus(), &alloc->gpadl); - alloc->gpadl.gpadl_handle = 0; - } --#else -- if (alloc->gpadl) { -- DXG_TRACE("Teardown gpadl %d", alloc->gpadl); -- vmbus_teardown_gpadl(dxgglobal_get_vmbus(), alloc->gpadl); -- alloc->gpadl = 0; -- } --#endif - if (alloc->priv_drv_data) - vfree(alloc->priv_drv_data); - if (alloc->cpu_address_mapped) -diff --git a/drivers/hv/dxgkrnl/dxgkrnl.h b/drivers/hv/dxgkrnl/dxgkrnl.h -index 111111111111..222222222222 100644 ---- a/drivers/hv/dxgkrnl/dxgkrnl.h -+++ b/drivers/hv/dxgkrnl/dxgkrnl.h -@@ -728,11 +728,7 @@ struct dxgallocation { - u32 cached:1; - u32 handle_valid:1; - /* GPADL address list for existing sysmem allocations */ --#ifdef _MAIN_KERNEL_ - struct vmbus_gpadl gpadl; --#else -- u32 gpadl; --#endif - /* Number of pages in the 'pages' array */ - u32 num_pages; - /* -diff --git a/drivers/hv/dxgkrnl/dxgvmbus.c b/drivers/hv/dxgkrnl/dxgvmbus.c -index 111111111111..222222222222 100644 ---- a/drivers/hv/dxgkrnl/dxgvmbus.c -+++ b/drivers/hv/dxgkrnl/dxgvmbus.c -@@ -1493,22 +1493,14 @@ int create_existing_sysmem(struct dxgdevice *device, - ret = -ENOMEM; - goto cleanup; - } --#ifdef _MAIN_KERNEL_ - DXG_TRACE("New gpadl %d", dxgalloc->gpadl.gpadl_handle); --#else -- DXG_TRACE("New gpadl %d", dxgalloc->gpadl); --#endif - - command_vgpu_to_host_init2(&set_store_command->hdr, - DXGK_VMBCOMMAND_SETEXISTINGSYSMEMSTORE, - device->process->host_handle); - set_store_command->device = device->handle; - set_store_command->allocation = host_alloc->allocation; --#ifdef _MAIN_KERNEL_ - set_store_command->gpadl = dxgalloc->gpadl.gpadl_handle; --#else -- set_store_command->gpadl = dxgalloc->gpadl; --#endif - ret = dxgvmb_send_sync_msg_ntstatus(msg.channel, msg.hdr, - msg.size); - if (ret < 0) --- -Armbian - diff --git a/patch/kernel/archive/wsl2-arm64-6.1/1707-virtio-pmem-Support-PCI-BAR-relative-addresses.patch b/patch/kernel/archive/wsl2-arm64-6.1/1707-virtio-pmem-Support-PCI-BAR-relative-addresses.patch deleted file mode 100644 index 47adbb07dfd7..000000000000 --- a/patch/kernel/archive/wsl2-arm64-6.1/1707-virtio-pmem-Support-PCI-BAR-relative-addresses.patch +++ /dev/null @@ -1,80 +0,0 @@ -From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001 -From: Taylor Stark -Date: Thu, 15 Jul 2021 15:35:05 -0700 -Subject: virtio-pmem: Support PCI BAR-relative addresses - -Update virtio-pmem to allow for the pmem region to be specified in either -guest absolute terms or as a PCI BAR-relative address. This is required -to support virtio-pmem in Hyper-V, since Hyper-V only allows PCI devices -to operate on PCI memory ranges defined via BARs. - -Virtio-pmem will check for a shared memory window and use that if found, -else it will fallback to using the guest absolute addresses in -virtio_pmem_config. This was chosen over defining a new feature bit, -since it's similar to how virtio-fs is configured. - -Signed-off-by: Taylor Stark - -Link: https://lore.kernel.org/r/20210715223505.GA29329@linuxonhyperv3.guj3yctzbm1etfxqx2vob5hsef.xx.internal.cloudapp.net -Signed-off-by: Tyler Hicks ---- - drivers/nvdimm/virtio_pmem.c | 21 ++++++++-- - drivers/nvdimm/virtio_pmem.h | 3 ++ - 2 files changed, 20 insertions(+), 4 deletions(-) - -diff --git a/drivers/nvdimm/virtio_pmem.c b/drivers/nvdimm/virtio_pmem.c -index 111111111111..222222222222 100644 ---- a/drivers/nvdimm/virtio_pmem.c -+++ b/drivers/nvdimm/virtio_pmem.c -@@ -37,6 +37,8 @@ static int virtio_pmem_probe(struct virtio_device *vdev) - struct virtio_pmem *vpmem; - struct resource res; - int err = 0; -+ bool have_shm_region; -+ struct virtio_shm_region pmem_region; - - if (!vdev->config->get) { - dev_err(&vdev->dev, "%s failure: config access disabled\n", -@@ -58,10 +60,21 @@ static int virtio_pmem_probe(struct virtio_device *vdev) - goto out_err; - } - -- virtio_cread_le(vpmem->vdev, struct virtio_pmem_config, -- start, &vpmem->start); -- virtio_cread_le(vpmem->vdev, struct virtio_pmem_config, -- size, &vpmem->size); -+ /* Retrieve the pmem device's address and size. It may have been supplied -+ * as a PCI BAR-relative shared memory region, or as a guest absolute address. -+ */ -+ have_shm_region = virtio_get_shm_region(vpmem->vdev, &pmem_region, -+ VIRTIO_PMEM_SHMCAP_ID_PMEM_REGION); -+ -+ if (have_shm_region) { -+ vpmem->start = pmem_region.addr; -+ vpmem->size = pmem_region.len; -+ } else { -+ virtio_cread_le(vpmem->vdev, struct virtio_pmem_config, -+ start, &vpmem->start); -+ virtio_cread_le(vpmem->vdev, struct virtio_pmem_config, -+ size, &vpmem->size); -+ } - - res.start = vpmem->start; - res.end = vpmem->start + vpmem->size - 1; -diff --git a/drivers/nvdimm/virtio_pmem.h b/drivers/nvdimm/virtio_pmem.h -index 111111111111..222222222222 100644 ---- a/drivers/nvdimm/virtio_pmem.h -+++ b/drivers/nvdimm/virtio_pmem.h -@@ -50,6 +50,9 @@ struct virtio_pmem { - __u64 size; - }; - -+/* For the id field in virtio_pci_shm_cap */ -+#define VIRTIO_PMEM_SHMCAP_ID_PMEM_REGION 0 -+ - void virtio_pmem_host_ack(struct virtqueue *vq); - int async_pmem_flush(struct nd_region *nd_region, struct bio *bio); - #endif --- -Armbian - diff --git a/patch/kernel/archive/wsl2-arm64-6.1/1708-virtio-pmem-Set-DRIVER_OK-status-prior-to-creating-pmem-region.patch b/patch/kernel/archive/wsl2-arm64-6.1/1708-virtio-pmem-Set-DRIVER_OK-status-prior-to-creating-pmem-region.patch deleted file mode 100644 index 28e61b8953fc..000000000000 --- a/patch/kernel/archive/wsl2-arm64-6.1/1708-virtio-pmem-Set-DRIVER_OK-status-prior-to-creating-pmem-region.patch +++ /dev/null @@ -1,52 +0,0 @@ -From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001 -From: Taylor Stark -Date: Thu, 15 Jul 2021 15:36:38 -0700 -Subject: virtio-pmem: Set DRIVER_OK status prior to creating pmem region - -Update virtio-pmem to call virtio_device_ready prior to creating the pmem -region. Otherwise, the guest may try to access the pmem region prior to -the DRIVER_OK status being set. - -In the case of Hyper-V, the backing pmem file isn't mapped to the guest -until the DRIVER_OK status is set. Therefore, attempts to access the pmem -region can cause the guest to crash. Hyper-V could map the file earlier, -for example at VM creation, but we didn't want to pay the mapping cost if -the device is never used. Additionally, it felt weird to allow the guest -to access the region prior to the device fully coming online. - -Signed-off-by: Taylor Stark -Reviewed-by: Pankaj Gupta - -Link: https://lore.kernel.org/r/20210715223638.GA29649@linuxonhyperv3.guj3yctzbm1etfxqx2vob5hsef.xx.internal.cloudapp.net -Signed-off-by: Tyler Hicks ---- - drivers/nvdimm/virtio_pmem.c | 6 ++++++ - 1 file changed, 6 insertions(+) - -diff --git a/drivers/nvdimm/virtio_pmem.c b/drivers/nvdimm/virtio_pmem.c -index 111111111111..222222222222 100644 ---- a/drivers/nvdimm/virtio_pmem.c -+++ b/drivers/nvdimm/virtio_pmem.c -@@ -91,6 +91,11 @@ static int virtio_pmem_probe(struct virtio_device *vdev) - - dev_set_drvdata(&vdev->dev, vpmem->nvdimm_bus); - -+ /* Online the device prior to creating a pmem region, to ensure that -+ * the region is never touched while the device is offline. -+ */ -+ virtio_device_ready(vdev); -+ - ndr_desc.res = &res; - ndr_desc.numa_node = nid; - ndr_desc.flush = async_pmem_flush; -@@ -111,6 +116,7 @@ static int virtio_pmem_probe(struct virtio_device *vdev) - } - return 0; - out_nd: -+ vdev->config->reset(vdev); - virtio_reset_device(vdev); - nvdimm_bus_unregister(vpmem->nvdimm_bus); - out_vq: --- -Armbian - diff --git a/patch/kernel/archive/wsl2-arm64-6.1/1709-mm-page_reporting-Add-checks-for-page_reporting_order-param.patch b/patch/kernel/archive/wsl2-arm64-6.1/1709-mm-page_reporting-Add-checks-for-page_reporting_order-param.patch deleted file mode 100644 index 2eb110c34ad6..000000000000 --- a/patch/kernel/archive/wsl2-arm64-6.1/1709-mm-page_reporting-Add-checks-for-page_reporting_order-param.patch +++ /dev/null @@ -1,104 +0,0 @@ -From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001 -From: Shradha Gupta -Date: Thu, 29 Sep 2022 23:01:38 -0700 -Subject: mm/page_reporting: Add checks for page_reporting_order param - -Current code allows the page_reporting_order parameter to be changed -via sysfs to any integer value. The new value is used immediately -in page reporting code with no validation, which could cause incorrect -behavior. Fix this by adding validation of the new value. -Export this parameter for use in the driver that is calling the -page_reporting_register(). - -This is needed by drivers like hv_balloon to know the order of the -pages reported. Traditionally the values provided in the kernel boot -line or subsequently changed via sysfs take priority therefore, if -page_reporting_order parameter's value is set, it takes precedence -over the value passed while registering with the driver. - -Signed-off-by: Shradha Gupta -Reviewed-by: Michael Kelley -Acked-by: Andrew Morton -Link: https://lore.kernel.org/r/1664517699-1085-2-git-send-email-shradhagupta@linux.microsoft.com -Signed-off-by: Wei Liu -(cherry picked from commit aebb02ce8b36d20464404206b89069dc9239a7f0) -Link: https://microsoft.visualstudio.com/OS/_workitems/edit/42270731/ -Signed-off-by: Kelsey Steele ---- - mm/page_reporting.c | 50 +++++++++- - 1 file changed, 45 insertions(+), 5 deletions(-) - -diff --git a/mm/page_reporting.c b/mm/page_reporting.c -index 111111111111..222222222222 100644 ---- a/mm/page_reporting.c -+++ b/mm/page_reporting.c -@@ -11,10 +11,42 @@ - #include "page_reporting.h" - #include "internal.h" - --unsigned int page_reporting_order = MAX_ORDER; --module_param(page_reporting_order, uint, 0644); -+/* Initialize to an unsupported value */ -+unsigned int page_reporting_order = -1; -+ -+static int page_order_update_notify(const char *val, const struct kernel_param *kp) -+{ -+ /* -+ * If param is set beyond this limit, order is set to default -+ * pageblock_order value -+ */ -+ return param_set_uint_minmax(val, kp, 0, MAX_ORDER-1); -+} -+ -+static const struct kernel_param_ops page_reporting_param_ops = { -+ .set = &page_order_update_notify, -+ /* -+ * For the get op, use param_get_int instead of param_get_uint. -+ * This is to make sure that when unset the initialized value of -+ * -1 is shown correctly -+ */ -+ .get = ¶m_get_int, -+}; -+ -+module_param_cb(page_reporting_order, &page_reporting_param_ops, -+ &page_reporting_order, 0644); - MODULE_PARM_DESC(page_reporting_order, "Set page reporting order"); - -+/* -+ * This symbol is also a kernel parameter. Export the page_reporting_order -+ * symbol so that other drivers can access it to control order values without -+ * having to introduce another configurable parameter. Only one driver can -+ * register with the page_reporting driver for the service, so we have just -+ * one control parameter for the use case(which can be accessed in both -+ * drivers) -+ */ -+EXPORT_SYMBOL_GPL(page_reporting_order); -+ - #define PAGE_REPORTING_DELAY (2 * HZ) - static struct page_reporting_dev_info __rcu *pr_dev_info __read_mostly; - -@@ -330,10 +362,18 @@ int page_reporting_register(struct page_reporting_dev_info *prdev) - } - - /* -- * Update the page reporting order if it's specified by driver. -- * Otherwise, it falls back to @pageblock_order. -+ * If the page_reporting_order value is not set, we check if -+ * an order is provided from the driver that is performing the -+ * registration. If that is not provided either, we default to -+ * pageblock_order. - */ -- page_reporting_order = prdev->order ? : pageblock_order; -+ -+ if (page_reporting_order == -1) { -+ if (prdev->order > 0 && prdev->order <= MAX_ORDER) -+ page_reporting_order = prdev->order; -+ else -+ page_reporting_order = pageblock_order; -+ } - - /* initialize state and work structures */ - atomic_set(&prdev->state, PAGE_REPORTING_IDLE); --- -Armbian - diff --git a/patch/kernel/archive/wsl2-arm64-6.1/1710-hv_balloon-Add-support-for-configurable-order-free-page-reporting.patch b/patch/kernel/archive/wsl2-arm64-6.1/1710-hv_balloon-Add-support-for-configurable-order-free-page-reporting.patch deleted file mode 100644 index 4ddfb0b52f1a..000000000000 --- a/patch/kernel/archive/wsl2-arm64-6.1/1710-hv_balloon-Add-support-for-configurable-order-free-page-reporting.patch +++ /dev/null @@ -1,202 +0,0 @@ -From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001 -From: Shradha Gupta -Date: Thu, 29 Sep 2022 23:01:39 -0700 -Subject: hv_balloon: Add support for configurable order free page reporting - -Newer versions of Hyper-V allow reporting unused guest pages in chunks -smaller than 2 Mbytes. Using smaller chunks allows reporting more -unused guest pages, but with increased overhead in the finding the -small chunks. To make this tradeoff configurable, use the existing -page_reporting_order module parameter to control the reporting order. -Drop and refine checks that restricted the minimun page reporting order -to 2Mbytes size pages. Add appropriate checks to make sure the -underlying Hyper-V versions support cold discard hints of any order -(and not just starting from 9) - -Signed-off-by: Shradha Gupta -Reviewed-by: Michael Kelley -Link: https://lore.kernel.org/r/1664517699-1085-3-git-send-email-shradhagupta@linux.microsoft.com -Signed-off-by: Wei Liu -(cherry picked from commit dc60f2db39c3f8da4490c1ed827022bbc925d81c) -Link: https://microsoft.visualstudio.com/OS/_workitems/edit/42270731/ -Signed-off-by: Kelsey Steele ---- - drivers/hv/hv_balloon.c | 94 +++++++--- - 1 file changed, 73 insertions(+), 21 deletions(-) - -diff --git a/drivers/hv/hv_balloon.c b/drivers/hv/hv_balloon.c -index 111111111111..222222222222 100644 ---- a/drivers/hv/hv_balloon.c -+++ b/drivers/hv/hv_balloon.c -@@ -469,12 +469,16 @@ static bool do_hot_add; - * the specified number of seconds. - */ - static uint pressure_report_delay = 45; -+extern unsigned int page_reporting_order; -+#define HV_MAX_FAILURES 2 - - /* - * The last time we posted a pressure report to host. - */ - static unsigned long last_post_time; - -+static int hv_hypercall_multi_failure; -+ - module_param(hot_add, bool, (S_IRUGO | S_IWUSR)); - MODULE_PARM_DESC(hot_add, "If set attempt memory hot_add"); - -@@ -579,6 +583,10 @@ static struct hv_dynmem_device dm_device; - - static void post_status(struct hv_dynmem_device *dm); - -+static void enable_page_reporting(void); -+ -+static void disable_page_reporting(void); -+ - #ifdef CONFIG_MEMORY_HOTPLUG - static inline bool has_pfn_is_backed(struct hv_hotadd_state *has, - unsigned long pfn) -@@ -1418,6 +1426,18 @@ static int dm_thread_func(void *dm_dev) - */ - reinit_completion(&dm_device.config_event); - post_status(dm); -+ /* -+ * disable free page reporting if multiple hypercall -+ * failure flag set. It is not done in the page_reporting -+ * callback context as that causes a deadlock between -+ * page_reporting_process() and page_reporting_unregister() -+ */ -+ if (hv_hypercall_multi_failure >= HV_MAX_FAILURES) { -+ pr_err("Multiple failures in cold memory discard hypercall, disabling page reporting\n"); -+ disable_page_reporting(); -+ /* Reset the flag after disabling reporting */ -+ hv_hypercall_multi_failure = 0; -+ } - } - - return 0; -@@ -1593,20 +1613,20 @@ static void balloon_onchannelcallback(void *context) - - } - --/* Hyper-V only supports reporting 2MB pages or higher */ --#define HV_MIN_PAGE_REPORTING_ORDER 9 --#define HV_MIN_PAGE_REPORTING_LEN (HV_HYP_PAGE_SIZE << HV_MIN_PAGE_REPORTING_ORDER) -+#define HV_LARGE_REPORTING_ORDER 9 -+#define HV_LARGE_REPORTING_LEN (HV_HYP_PAGE_SIZE << \ -+ HV_LARGE_REPORTING_ORDER) - static int hv_free_page_report(struct page_reporting_dev_info *pr_dev_info, - struct scatterlist *sgl, unsigned int nents) - { - unsigned long flags; - struct hv_memory_hint *hint; -- int i; -+ int i, order; - u64 status; - struct scatterlist *sg; - - WARN_ON_ONCE(nents > HV_MEMORY_HINT_MAX_GPA_PAGE_RANGES); -- WARN_ON_ONCE(sgl->length < HV_MIN_PAGE_REPORTING_LEN); -+ WARN_ON_ONCE(sgl->length < (HV_HYP_PAGE_SIZE << page_reporting_order)); - local_irq_save(flags); - hint = *(struct hv_memory_hint **)this_cpu_ptr(hyperv_pcpu_input_arg); - if (!hint) { -@@ -1621,21 +1641,53 @@ static int hv_free_page_report(struct page_reporting_dev_info *pr_dev_info, - - range = &hint->ranges[i]; - range->address_space = 0; -- /* page reporting only reports 2MB pages or higher */ -- range->page.largepage = 1; -- range->page.additional_pages = -- (sg->length / HV_MIN_PAGE_REPORTING_LEN) - 1; -- range->page_size = HV_GPA_PAGE_RANGE_PAGE_SIZE_2MB; -- range->base_large_pfn = -- page_to_hvpfn(sg_page(sg)) >> HV_MIN_PAGE_REPORTING_ORDER; -+ order = get_order(sg->length); -+ /* -+ * Hyper-V expects the additional_pages field in the units -+ * of one of these 3 sizes, 4Kbytes, 2Mbytes or 1Gbytes. -+ * This is dictated by the values of the fields page.largesize -+ * and page_size. -+ * This code however, only uses 4Kbytes and 2Mbytes units -+ * and not 1Gbytes unit. -+ */ -+ -+ /* page reporting for pages 2MB or higher */ -+ if (order >= HV_LARGE_REPORTING_ORDER ) { -+ range->page.largepage = 1; -+ range->page_size = HV_GPA_PAGE_RANGE_PAGE_SIZE_2MB; -+ range->base_large_pfn = page_to_hvpfn( -+ sg_page(sg)) >> HV_LARGE_REPORTING_ORDER; -+ range->page.additional_pages = -+ (sg->length / HV_LARGE_REPORTING_LEN) - 1; -+ } else { -+ /* Page reporting for pages below 2MB */ -+ range->page.basepfn = page_to_hvpfn(sg_page(sg)); -+ range->page.largepage = false; -+ range->page.additional_pages = -+ (sg->length / HV_HYP_PAGE_SIZE) - 1; -+ } -+ - } - - status = hv_do_rep_hypercall(HV_EXT_CALL_MEMORY_HEAT_HINT, nents, 0, - hint, NULL); - local_irq_restore(flags); -- if ((status & HV_HYPERCALL_RESULT_MASK) != HV_STATUS_SUCCESS) { -+ if (!hv_result_success(status)) { -+ - pr_err("Cold memory discard hypercall failed with status %llx\n", -- status); -+ status); -+ if (hv_hypercall_multi_failure > 0) -+ hv_hypercall_multi_failure++; -+ -+ if (hv_result(status) == HV_STATUS_INVALID_PARAMETER) { -+ pr_err("Underlying Hyper-V does not support order less than 9. Hypercall failed\n"); -+ pr_err("Defaulting to page_reporting_order %d\n", -+ pageblock_order); -+ page_reporting_order = pageblock_order; -+ hv_hypercall_multi_failure++; -+ return -EINVAL; -+ } -+ - return -EINVAL; - } - -@@ -1646,12 +1698,6 @@ static void enable_page_reporting(void) - { - int ret; - -- /* Essentially, validating 'PAGE_REPORTING_MIN_ORDER' is big enough. */ -- if (pageblock_order < HV_MIN_PAGE_REPORTING_ORDER) { -- pr_debug("Cold memory discard is only supported on 2MB pages and above\n"); -- return; -- } -- - if (!hv_query_ext_cap(HV_EXT_CAPABILITY_MEMORY_COLD_DISCARD_HINT)) { - pr_debug("Cold memory discard hint not supported by Hyper-V\n"); - return; -@@ -1659,12 +1705,18 @@ static void enable_page_reporting(void) - - BUILD_BUG_ON(PAGE_REPORTING_CAPACITY > HV_MEMORY_HINT_MAX_GPA_PAGE_RANGES); - dm_device.pr_dev_info.report = hv_free_page_report; -+ /* -+ * We let the page_reporting_order parameter decide the order -+ * in the page_reporting code -+ */ -+ dm_device.pr_dev_info.order = 0; - ret = page_reporting_register(&dm_device.pr_dev_info); - if (ret < 0) { - dm_device.pr_dev_info.report = NULL; - pr_err("Failed to enable cold memory discard: %d\n", ret); - } else { -- pr_info("Cold memory discard hint enabled\n"); -+ pr_info("Cold memory discard hint enabled with order %d\n", -+ page_reporting_order); - } - } - --- -Armbian - diff --git a/patch/kernel/archive/wsl2-arm64-6.6/1666-Hyper-V-ARM64-Always-use-the-Hyper-V-hypercall-interface.patch b/patch/kernel/archive/wsl2-arm64-6.6/1666-Hyper-V-ARM64-Always-use-the-Hyper-V-hypercall-interface.patch deleted file mode 100644 index 6e573327fdab..000000000000 --- a/patch/kernel/archive/wsl2-arm64-6.6/1666-Hyper-V-ARM64-Always-use-the-Hyper-V-hypercall-interface.patch +++ /dev/null @@ -1,239 +0,0 @@ -From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001 -From: Sunil Muthuswamy -Date: Mon, 3 May 2021 14:17:52 -0700 -Subject: Hyper-V: ARM64: Always use the Hyper-V hypercall interface - -This patch forces the use of the Hyper-V hypercall interface, -instead of the architectural SMCCC interface on ARM64 because -not all versions of Windows support the SMCCC interface. All -versions of Windows will support the Hyper-V hypercall interface, -so this change should be both forward and backward compatible. - -Signed-off-by: Sunil Muthuswamy - -[tyhicks: Forward ported to v5.15] -Signed-off-by: Tyler Hicks -[kms: Forward ported to v6.1] -Signed-off-by: Kelsey Steele ---- - arch/arm64/hyperv/Makefile | 2 +- - arch/arm64/hyperv/hv_core.c | 57 ++++----- - arch/arm64/hyperv/hv_hvc.S | 61 ++++++++++ - arch/arm64/include/asm/mshyperv.h | 4 + - 4 files changed, 91 insertions(+), 33 deletions(-) - -diff --git a/arch/arm64/hyperv/Makefile b/arch/arm64/hyperv/Makefile -index 111111111111..222222222222 100644 ---- a/arch/arm64/hyperv/Makefile -+++ b/arch/arm64/hyperv/Makefile -@@ -1,2 +1,2 @@ - # SPDX-License-Identifier: GPL-2.0 --obj-y := hv_core.o mshyperv.o -+obj-y := hv_core.o mshyperv.o hv_hvc.o -diff --git a/arch/arm64/hyperv/hv_core.c b/arch/arm64/hyperv/hv_core.c -index 111111111111..222222222222 100644 ---- a/arch/arm64/hyperv/hv_core.c -+++ b/arch/arm64/hyperv/hv_core.c -@@ -23,16 +23,13 @@ - */ - u64 hv_do_hypercall(u64 control, void *input, void *output) - { -- struct arm_smccc_res res; - u64 input_address; - u64 output_address; - - input_address = input ? virt_to_phys(input) : 0; - output_address = output ? virt_to_phys(output) : 0; - -- arm_smccc_1_1_hvc(HV_FUNC_ID, control, -- input_address, output_address, &res); -- return res.a0; -+ return hv_do_hvc(control, input_address, output_address); - } - EXPORT_SYMBOL_GPL(hv_do_hypercall); - -@@ -41,27 +38,33 @@ EXPORT_SYMBOL_GPL(hv_do_hypercall); - * with arguments in registers instead of physical memory. - * Avoids the overhead of virt_to_phys for simple hypercalls. - */ -- - u64 hv_do_fast_hypercall8(u16 code, u64 input) - { -- struct arm_smccc_res res; - u64 control; - - control = (u64)code | HV_HYPERCALL_FAST_BIT; -- -- arm_smccc_1_1_hvc(HV_FUNC_ID, control, input, &res); -- return res.a0; -+ return hv_do_hvc(control, input); - } - EXPORT_SYMBOL_GPL(hv_do_fast_hypercall8); - -+union hv_hypercall_status { -+ u64 as_uint64; -+ struct { -+ u16 status; -+ u16 reserved; -+ u16 reps_completed; /* Low 12 bits */ -+ u16 reserved2; -+ }; -+}; -+ - /* - * Set a single VP register to a 64-bit value. - */ - void hv_set_vpreg(u32 msr, u64 value) - { -- struct arm_smccc_res res; -+ union hv_hypercall_status status; - -- arm_smccc_1_1_hvc(HV_FUNC_ID, -+ status.as_uint64 = hv_do_hvc( - HVCALL_SET_VP_REGISTERS | HV_HYPERCALL_FAST_BIT | - HV_HYPERCALL_REP_COMP_1, - HV_PARTITION_ID_SELF, -@@ -69,15 +72,14 @@ void hv_set_vpreg(u32 msr, u64 value) - msr, - 0, - value, -- 0, -- &res); -+ 0); - - /* - * Something is fundamentally broken in the hypervisor if - * setting a VP register fails. There's really no way to - * continue as a guest VM, so panic. - */ -- BUG_ON(!hv_result_success(res.a0)); -+ BUG_ON(status.status != HV_STATUS_SUCCESS); - } - EXPORT_SYMBOL_GPL(hv_set_vpreg); - -@@ -90,31 +92,22 @@ EXPORT_SYMBOL_GPL(hv_set_vpreg); - - void hv_get_vpreg_128(u32 msr, struct hv_get_vp_registers_output *result) - { -- struct arm_smccc_1_2_regs args; -- struct arm_smccc_1_2_regs res; -- -- args.a0 = HV_FUNC_ID; -- args.a1 = HVCALL_GET_VP_REGISTERS | HV_HYPERCALL_FAST_BIT | -- HV_HYPERCALL_REP_COMP_1; -- args.a2 = HV_PARTITION_ID_SELF; -- args.a3 = HV_VP_INDEX_SELF; -- args.a4 = msr; -+ u64 status; - -- /* -- * Use the SMCCC 1.2 interface because the results are in registers -- * beyond X0-X3. -- */ -- arm_smccc_1_2_hvc(&args, &res); -+ status = hv_do_hvc_fast_get( -+ HVCALL_GET_VP_REGISTERS | HV_HYPERCALL_FAST_BIT | -+ HV_HYPERCALL_REP_COMP_1, -+ HV_PARTITION_ID_SELF, -+ HV_VP_INDEX_SELF, -+ msr, -+ result); - - /* - * Something is fundamentally broken in the hypervisor if - * getting a VP register fails. There's really no way to - * continue as a guest VM, so panic. - */ -- BUG_ON(!hv_result_success(res.a0)); -- -- result->as64.low = res.a6; -- result->as64.high = res.a7; -+ BUG_ON((status & HV_HYPERCALL_RESULT_MASK) != HV_STATUS_SUCCESS); - } - EXPORT_SYMBOL_GPL(hv_get_vpreg_128); - -diff --git a/arch/arm64/hyperv/hv_hvc.S b/arch/arm64/hyperv/hv_hvc.S -new file mode 100644 -index 000000000000..111111111111 ---- /dev/null -+++ b/arch/arm64/hyperv/hv_hvc.S -@@ -0,0 +1,61 @@ -+/* SPDX-License-Identifier: GPL-2.0 */ -+ -+/* -+ * Microsoft Hyper-V hypervisor invocation routines -+ * -+ * Copyright (C) 2018, Microsoft, Inc. -+ * -+ * Author : Michael Kelley -+ * -+ * This program is free software; you can redistribute it and/or modify it -+ * under the terms of the GNU General Public License version 2 as published -+ * by the Free Software Foundation. -+ * -+ * This program is distributed in the hope that it will be useful, but -+ * WITHOUT ANY WARRANTY; without even the implied warranty of -+ * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or -+ * NON INFRINGEMENT. See the GNU General Public License for more -+ * details. -+ */ -+ -+#include -+#include -+ -+ .text -+/* -+ * Do the HVC instruction. For Hyper-V the argument is always 1. -+ * x0 contains the hypercall control value, while additional registers -+ * vary depending on the hypercall, and whether the hypercall arguments -+ * are in memory or in registers (a "fast" hypercall per the Hyper-V -+ * TLFS). When the arguments are in memory x1 is the guest physical -+ * address of the input arguments, and x2 is the guest physical -+ * address of the output arguments. When the arguments are in -+ * registers, the register values depends on the hypercall. Note -+ * that this version cannot return any values in registers. -+ */ -+SYM_FUNC_START(hv_do_hvc) -+ hvc #1 -+ ret -+SYM_FUNC_END(hv_do_hvc) -+ -+/* -+ * This variant of HVC invocation is for hv_get_vpreg and -+ * hv_get_vpreg_128. The input parameters are passed in registers -+ * along with a pointer in x4 to where the output result should -+ * be stored. The output is returned in x15 and x16. x19 is used as -+ * scratch space to avoid buildng a stack frame, as Hyper-V does -+ * not preserve registers x0-x17. -+ */ -+SYM_FUNC_START(hv_do_hvc_fast_get) -+ /* -+ * Stash away x19 register so that it can be used as a scratch -+ * register and pop it at the end. -+ */ -+ str x19, [sp, #-16]! -+ mov x19, x4 -+ hvc #1 -+ str x15,[x19] -+ str x16,[x19,#8] -+ ldr x19, [sp], #16 -+ ret -+SYM_FUNC_END(hv_do_hvc_fast_get) -diff --git a/arch/arm64/include/asm/mshyperv.h b/arch/arm64/include/asm/mshyperv.h -index 111111111111..222222222222 100644 ---- a/arch/arm64/include/asm/mshyperv.h -+++ b/arch/arm64/include/asm/mshyperv.h -@@ -22,6 +22,10 @@ - #include - #include - -+extern u64 hv_do_hvc(u64 control, ...); -+extern u64 hv_do_hvc_fast_get(u64 control, u64 input1, u64 input2, u64 input3, -+ struct hv_get_vp_registers_output *output); -+ - /* - * Declare calls to get and set Hyper-V VP register values on ARM64, which - * requires a hypercall. --- -Armbian - diff --git a/patch/kernel/archive/wsl2-arm64-6.6/1667-drivers-hv-dxgkrnl-Add-virtual-compute-device-VMBus-channel-guids.patch b/patch/kernel/archive/wsl2-arm64-6.6/1667-drivers-hv-dxgkrnl-Add-virtual-compute-device-VMBus-channel-guids.patch deleted file mode 100644 index 786542656fbf..000000000000 --- a/patch/kernel/archive/wsl2-arm64-6.6/1667-drivers-hv-dxgkrnl-Add-virtual-compute-device-VMBus-channel-guids.patch +++ /dev/null @@ -1,45 +0,0 @@ -From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001 -From: Iouri Tarassov -Date: Tue, 15 Feb 2022 18:11:52 -0800 -Subject: drivers: hv: dxgkrnl: Add virtual compute device VMBus channel guids - -Add VMBus channel guids, which are used by hyper-v virtual compute -device driver. - -Signed-off-by: Iouri Tarassov -[kms: Forward port to v6.1] -Signed-off-by: Kelsey Steele ---- - include/linux/hyperv.h | 16 ++++++++++ - 1 file changed, 16 insertions(+) - -diff --git a/include/linux/hyperv.h b/include/linux/hyperv.h -index 111111111111..222222222222 100644 ---- a/include/linux/hyperv.h -+++ b/include/linux/hyperv.h -@@ -1472,6 +1472,22 @@ void vmbus_free_mmio(resource_size_t start, resource_size_t size); - .guid = GUID_INIT(0xda0a7802, 0xe377, 0x4aac, 0x8e, 0x77, \ - 0x05, 0x58, 0xeb, 0x10, 0x73, 0xf8) - -+/* -+ * GPU paravirtualization global DXGK channel -+ * {DDE9CBC0-5060-4436-9448-EA1254A5D177} -+ */ -+#define HV_GPUP_DXGK_GLOBAL_GUID \ -+ .guid = GUID_INIT(0xdde9cbc0, 0x5060, 0x4436, 0x94, 0x48, \ -+ 0xea, 0x12, 0x54, 0xa5, 0xd1, 0x77) -+ -+/* -+ * GPU paravirtualization per virtual GPU DXGK channel -+ * {6E382D18-3336-4F4B-ACC4-2B7703D4DF4A} -+ */ -+#define HV_GPUP_DXGK_VGPU_GUID \ -+ .guid = GUID_INIT(0x6e382d18, 0x3336, 0x4f4b, 0xac, 0xc4, \ -+ 0x2b, 0x77, 0x3, 0xd4, 0xdf, 0x4a) -+ - /* - * Synthetic FC GUID - * {2f9bcc4a-0069-4af3-b76b-6fd0be528cda} --- -Armbian - diff --git a/patch/kernel/archive/wsl2-arm64-6.6/1668-drivers-hv-dxgkrnl-Driver-initialization-and-loading.patch b/patch/kernel/archive/wsl2-arm64-6.6/1668-drivers-hv-dxgkrnl-Driver-initialization-and-loading.patch deleted file mode 100644 index d8623689a412..000000000000 --- a/patch/kernel/archive/wsl2-arm64-6.6/1668-drivers-hv-dxgkrnl-Driver-initialization-and-loading.patch +++ /dev/null @@ -1,966 +0,0 @@ -From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001 -From: Iouri Tarassov -Date: Wed, 24 Mar 2021 11:10:28 -0700 -Subject: drivers: hv: dxgkrnl: Driver initialization and loading - -- Create skeleton and add basic functionality for the Hyper-V -compute device driver (dxgkrnl). - -- Register for PCI and VMBus driver notifications and handle -initialization of VMBus channels. - -- Connect the dxgkrnl module to the drivers/hv/ Makefile and Kconfig - -- Create a MAINTAINERS entry - -A VMBus channel is a communication interface between the Hyper-V guest -and the host. The are two type of VMBus channels, used in the driver: - - the global channel - - per virtual compute device channel - -A PCI device is created for each virtual compute device, projected -by the host. The device vendor is PCI_VENDOR_ID_MICROSOFT and device -id is PCI_DEVICE_ID_VIRTUAL_RENDER. dxg_pci_probe_device handles -arrival of such devices. The PCI config space of the virtual compute -device has luid of the corresponding virtual compute device VM -bus channel. This is how the compute device adapter objects are -linked to VMBus channels. - -VMBus interface version is exchanged by reading/writing the PCI config -space of the virtual compute device. - -The IO space is used to handle CPU accessible compute device -allocations. Hyper-V allocates IO space for the global VMBus channel. - -Signed-off-by: Iouri Tarassov -[kms: Forward port to v6.1] -Signed-off-by: Kelsey Steele ---- - MAINTAINERS | 7 + - drivers/hv/Kconfig | 2 + - drivers/hv/Makefile | 1 + - drivers/hv/dxgkrnl/Kconfig | 26 + - drivers/hv/dxgkrnl/Makefile | 5 + - drivers/hv/dxgkrnl/dxgkrnl.h | 155 +++ - drivers/hv/dxgkrnl/dxgmodule.c | 506 ++++++++++ - drivers/hv/dxgkrnl/dxgvmbus.c | 92 ++ - drivers/hv/dxgkrnl/dxgvmbus.h | 19 + - include/uapi/misc/d3dkmthk.h | 27 + - 10 files changed, 840 insertions(+) - -diff --git a/MAINTAINERS b/MAINTAINERS -index 111111111111..222222222222 100644 ---- a/MAINTAINERS -+++ b/MAINTAINERS -@@ -9771,6 +9771,13 @@ F: Documentation/devicetree/bindings/mtd/ti,am654-hbmc.yaml - F: drivers/mtd/hyperbus/ - F: include/linux/mtd/hyperbus.h - -+Hyper-V vGPU DRIVER -+M: Iouri Tarassov -+L: linux-hyperv@vger.kernel.org -+S: Supported -+F: drivers/hv/dxgkrnl/ -+F: include/uapi/misc/d3dkmthk.h -+ - HYPERVISOR VIRTUAL CONSOLE DRIVER - L: linuxppc-dev@lists.ozlabs.org - S: Odd Fixes -diff --git a/drivers/hv/Kconfig b/drivers/hv/Kconfig -index 111111111111..222222222222 100644 ---- a/drivers/hv/Kconfig -+++ b/drivers/hv/Kconfig -@@ -55,4 +55,6 @@ config HYPERV_BALLOON - help - Select this option to enable Hyper-V Balloon driver. - -+source "drivers/hv/dxgkrnl/Kconfig" -+ - endmenu -diff --git a/drivers/hv/Makefile b/drivers/hv/Makefile -index 111111111111..222222222222 100644 ---- a/drivers/hv/Makefile -+++ b/drivers/hv/Makefile -@@ -2,6 +2,7 @@ - obj-$(CONFIG_HYPERV) += hv_vmbus.o - obj-$(CONFIG_HYPERV_UTILS) += hv_utils.o - obj-$(CONFIG_HYPERV_BALLOON) += hv_balloon.o -+obj-$(CONFIG_DXGKRNL) += dxgkrnl/ - - CFLAGS_hv_trace.o = -I$(src) - CFLAGS_hv_balloon.o = -I$(src) -diff --git a/drivers/hv/dxgkrnl/Kconfig b/drivers/hv/dxgkrnl/Kconfig -new file mode 100644 -index 000000000000..111111111111 ---- /dev/null -+++ b/drivers/hv/dxgkrnl/Kconfig -@@ -0,0 +1,26 @@ -+# SPDX-License-Identifier: GPL-2.0 -+# Configuration for the hyper-v virtual compute driver (dxgkrnl) -+# -+ -+config DXGKRNL -+ tristate "Microsoft Paravirtualized GPU support" -+ depends on HYPERV -+ depends on 64BIT || COMPILE_TEST -+ help -+ This driver supports paravirtualized virtual compute devices, exposed -+ by Microsoft Hyper-V when Linux is running inside of a virtual machine -+ hosted by Windows. The virtual machines needs to be configured to use -+ host compute adapters. The driver name is dxgkrnl. -+ -+ An example of such virtual machine is a Windows Subsystem for -+ Linux container. When such container is instantiated, the Windows host -+ assigns compatible host GPU adapters to the container. The corresponding -+ virtual GPU devices appear on the PCI bus in the container. These -+ devices are enumerated and accessed by this driver. -+ -+ Communications with the driver are done by using the Microsoft libdxcore -+ library, which translates the D3DKMT interface -+ -+ to the driver IOCTLs. The virtual GPU devices are paravirtualized, -+ which means that access to the hardware is done in the host. The driver -+ communicates with the host using Hyper-V VM bus communication channels. -diff --git a/drivers/hv/dxgkrnl/Makefile b/drivers/hv/dxgkrnl/Makefile -new file mode 100644 -index 000000000000..111111111111 ---- /dev/null -+++ b/drivers/hv/dxgkrnl/Makefile -@@ -0,0 +1,5 @@ -+# SPDX-License-Identifier: GPL-2.0 -+# Makefile for the hyper-v compute device driver (dxgkrnl). -+ -+obj-$(CONFIG_DXGKRNL) += dxgkrnl.o -+dxgkrnl-y := dxgmodule.o dxgvmbus.o -diff --git a/drivers/hv/dxgkrnl/dxgkrnl.h b/drivers/hv/dxgkrnl/dxgkrnl.h -new file mode 100644 -index 000000000000..111111111111 ---- /dev/null -+++ b/drivers/hv/dxgkrnl/dxgkrnl.h -@@ -0,0 +1,155 @@ -+/* SPDX-License-Identifier: GPL-2.0 */ -+ -+/* -+ * Copyright (c) 2022, Microsoft Corporation. -+ * -+ * Author: -+ * Iouri Tarassov -+ * -+ * Dxgkrnl Graphics Driver -+ * Headers for internal objects -+ * -+ */ -+ -+#ifndef _DXGKRNL_H -+#define _DXGKRNL_H -+ -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+ -+struct dxgadapter; -+ -+/* -+ * Driver private data. -+ * A single /dev/dxg device is created per virtual machine. -+ */ -+struct dxgdriver{ -+ struct dxgglobal *dxgglobal; -+ struct device *dxgdev; -+ struct pci_driver pci_drv; -+ struct hv_driver vmbus_drv; -+}; -+extern struct dxgdriver dxgdrv; -+ -+#define DXGDEV dxgdrv.dxgdev -+ -+struct dxgvmbuschannel { -+ struct vmbus_channel *channel; -+ struct hv_device *hdev; -+ spinlock_t packet_list_mutex; -+ struct list_head packet_list_head; -+ struct kmem_cache *packet_cache; -+ atomic64_t packet_request_id; -+}; -+ -+int dxgvmbuschannel_init(struct dxgvmbuschannel *ch, struct hv_device *hdev); -+void dxgvmbuschannel_destroy(struct dxgvmbuschannel *ch); -+void dxgvmbuschannel_receive(void *ctx); -+ -+/* -+ * The structure defines an offered vGPU vm bus channel. -+ */ -+struct dxgvgpuchannel { -+ struct list_head vgpu_ch_list_entry; -+ struct winluid adapter_luid; -+ struct hv_device *hdev; -+}; -+ -+struct dxgglobal { -+ struct dxgdriver *drvdata; -+ struct dxgvmbuschannel channel; -+ struct hv_device *hdev; -+ u32 num_adapters; -+ u32 vmbus_ver; /* Interface version */ -+ struct resource *mem; -+ u64 mmiospace_base; -+ u64 mmiospace_size; -+ struct miscdevice dxgdevice; -+ struct mutex device_mutex; -+ -+ /* -+ * List of the vGPU VM bus channels (dxgvgpuchannel) -+ * Protected by device_mutex -+ */ -+ struct list_head vgpu_ch_list_head; -+ -+ /* protects acces to the global VM bus channel */ -+ struct rw_semaphore channel_lock; -+ -+ bool global_channel_initialized; -+ bool async_msg_enabled; -+ bool misc_registered; -+ bool pci_registered; -+ bool vmbus_registered; -+}; -+ -+static inline struct dxgglobal *dxggbl(void) -+{ -+ return dxgdrv.dxgglobal; -+} -+ -+struct dxgprocess { -+ /* Placeholder */ -+}; -+ -+/* -+ * The convention is that VNBus instance id is a GUID, but the host sets -+ * the lower part of the value to the host adapter LUID. The function -+ * provides the necessary conversion. -+ */ -+static inline void guid_to_luid(guid_t *guid, struct winluid *luid) -+{ -+ *luid = *(struct winluid *)&guid->b[0]; -+} -+ -+/* -+ * VM bus interface -+ * -+ */ -+ -+/* -+ * The interface version is used to ensure that the host and the guest use the -+ * same VM bus protocol. It needs to be incremented every time the VM bus -+ * interface changes. DXGK_VMBUS_LAST_COMPATIBLE_INTERFACE_VERSION is -+ * incremented each time the earlier versions of the interface are no longer -+ * compatible with the current version. -+ */ -+#define DXGK_VMBUS_INTERFACE_VERSION_OLD 27 -+#define DXGK_VMBUS_INTERFACE_VERSION 40 -+#define DXGK_VMBUS_LAST_COMPATIBLE_INTERFACE_VERSION 16 -+ -+#ifdef DEBUG -+ -+void dxgk_validate_ioctls(void); -+ -+#define DXG_TRACE(fmt, ...) do { \ -+ trace_printk(dev_fmt(fmt) "\n", ##__VA_ARGS__); \ -+} while (0) -+ -+#define DXG_ERR(fmt, ...) do { \ -+ dev_err(DXGDEV, fmt, ##__VA_ARGS__); \ -+ trace_printk("*** dxgkerror *** " dev_fmt(fmt) "\n", ##__VA_ARGS__); \ -+} while (0) -+ -+#else -+ -+#define DXG_TRACE(...) -+#define DXG_ERR(fmt, ...) do { \ -+ dev_err(DXGDEV, fmt, ##__VA_ARGS__); \ -+} while (0) -+ -+#endif /* DEBUG */ -+ -+#endif -diff --git a/drivers/hv/dxgkrnl/dxgmodule.c b/drivers/hv/dxgkrnl/dxgmodule.c -new file mode 100644 -index 000000000000..111111111111 ---- /dev/null -+++ b/drivers/hv/dxgkrnl/dxgmodule.c -@@ -0,0 +1,506 @@ -+// SPDX-License-Identifier: GPL-2.0 -+ -+/* -+ * Copyright (c) 2022, Microsoft Corporation. -+ * -+ * Author: -+ * Iouri Tarassov -+ * -+ * Dxgkrnl Graphics Driver -+ * Interface with Linux kernel, PCI driver and the VM bus driver -+ * -+ */ -+ -+#include -+#include -+#include -+#include -+#include "dxgkrnl.h" -+ -+#define PCI_VENDOR_ID_MICROSOFT 0x1414 -+#define PCI_DEVICE_ID_VIRTUAL_RENDER 0x008E -+ -+#undef pr_fmt -+#define pr_fmt(fmt) "dxgk: " fmt -+ -+/* -+ * Interface from dxgglobal -+ */ -+ -+struct vmbus_channel *dxgglobal_get_vmbus(void) -+{ -+ return dxggbl()->channel.channel; -+} -+ -+struct dxgvmbuschannel *dxgglobal_get_dxgvmbuschannel(void) -+{ -+ return &dxggbl()->channel; -+} -+ -+int dxgglobal_acquire_channel_lock(void) -+{ -+ struct dxgglobal *dxgglobal = dxggbl(); -+ -+ down_read(&dxgglobal->channel_lock); -+ if (dxgglobal->channel.channel == NULL) { -+ DXG_ERR("Failed to acquire global channel lock"); -+ return -ENODEV; -+ } else { -+ return 0; -+ } -+} -+ -+void dxgglobal_release_channel_lock(void) -+{ -+ up_read(&dxggbl()->channel_lock); -+} -+ -+const struct file_operations dxgk_fops = { -+ .owner = THIS_MODULE, -+}; -+ -+/* -+ * Interface with the PCI driver -+ */ -+ -+/* -+ * Part of the PCI config space of the compute device is used for -+ * configuration data. Reading/writing of the PCI config space is forwarded -+ * to the host. -+ * -+ * Below are offsets in the PCI config spaces for various configuration values. -+ */ -+ -+/* Compute device VM bus channel instance ID */ -+#define DXGK_VMBUS_CHANNEL_ID_OFFSET 192 -+ -+/* DXGK_VMBUS_INTERFACE_VERSION (u32) */ -+#define DXGK_VMBUS_VERSION_OFFSET (DXGK_VMBUS_CHANNEL_ID_OFFSET + \ -+ sizeof(guid_t)) -+ -+/* Luid of the virtual GPU on the host (struct winluid) */ -+#define DXGK_VMBUS_VGPU_LUID_OFFSET (DXGK_VMBUS_VERSION_OFFSET + \ -+ sizeof(u32)) -+ -+/* The guest writes its capabilities to this address */ -+#define DXGK_VMBUS_GUESTCAPS_OFFSET (DXGK_VMBUS_VERSION_OFFSET + \ -+ sizeof(u32)) -+ -+/* Capabilities of the guest driver, reported to the host */ -+struct dxgk_vmbus_guestcaps { -+ union { -+ struct { -+ u32 wsl2 : 1; -+ u32 reserved : 31; -+ }; -+ u32 guest_caps; -+ }; -+}; -+ -+/* -+ * A helper function to read PCI config space. -+ */ -+static int dxg_pci_read_dwords(struct pci_dev *dev, int offset, int size, -+ void *val) -+{ -+ int off = offset; -+ int ret; -+ int i; -+ -+ /* Make sure the offset and size are 32 bit aligned */ -+ if (offset & 3 || size & 3) -+ return -EINVAL; -+ -+ for (i = 0; i < size / sizeof(int); i++) { -+ ret = pci_read_config_dword(dev, off, &((int *)val)[i]); -+ if (ret) { -+ DXG_ERR("Failed to read PCI config: %d", off); -+ return ret; -+ } -+ off += sizeof(int); -+ } -+ return 0; -+} -+ -+static int dxg_pci_probe_device(struct pci_dev *dev, -+ const struct pci_device_id *id) -+{ -+ int ret; -+ guid_t guid; -+ u32 vmbus_interface_ver = DXGK_VMBUS_INTERFACE_VERSION; -+ struct winluid vgpu_luid = {}; -+ struct dxgk_vmbus_guestcaps guest_caps = {.wsl2 = 1}; -+ struct dxgglobal *dxgglobal = dxggbl(); -+ -+ mutex_lock(&dxgglobal->device_mutex); -+ -+ if (dxgglobal->vmbus_ver == 0) { -+ /* Report capabilities to the host */ -+ -+ ret = pci_write_config_dword(dev, DXGK_VMBUS_GUESTCAPS_OFFSET, -+ guest_caps.guest_caps); -+ if (ret) -+ goto cleanup; -+ -+ /* Negotiate the VM bus version */ -+ -+ ret = pci_read_config_dword(dev, DXGK_VMBUS_VERSION_OFFSET, -+ &vmbus_interface_ver); -+ if (ret == 0 && vmbus_interface_ver != 0) -+ dxgglobal->vmbus_ver = vmbus_interface_ver; -+ else -+ dxgglobal->vmbus_ver = DXGK_VMBUS_INTERFACE_VERSION_OLD; -+ -+ if (dxgglobal->vmbus_ver < DXGK_VMBUS_INTERFACE_VERSION) -+ goto read_channel_id; -+ -+ ret = pci_write_config_dword(dev, DXGK_VMBUS_VERSION_OFFSET, -+ DXGK_VMBUS_INTERFACE_VERSION); -+ if (ret) -+ goto cleanup; -+ -+ if (dxgglobal->vmbus_ver > DXGK_VMBUS_INTERFACE_VERSION) -+ dxgglobal->vmbus_ver = DXGK_VMBUS_INTERFACE_VERSION; -+ } -+ -+read_channel_id: -+ -+ /* Get the VM bus channel ID for the virtual GPU */ -+ ret = dxg_pci_read_dwords(dev, DXGK_VMBUS_CHANNEL_ID_OFFSET, -+ sizeof(guid), (int *)&guid); -+ if (ret) -+ goto cleanup; -+ -+ if (dxgglobal->vmbus_ver >= DXGK_VMBUS_INTERFACE_VERSION) { -+ ret = dxg_pci_read_dwords(dev, DXGK_VMBUS_VGPU_LUID_OFFSET, -+ sizeof(vgpu_luid), &vgpu_luid); -+ if (ret) -+ goto cleanup; -+ } -+ -+ DXG_TRACE("Adapter channel: %pUb", &guid); -+ DXG_TRACE("Vmbus interface version: %d", dxgglobal->vmbus_ver); -+ DXG_TRACE("Host luid: %x-%x", vgpu_luid.b, vgpu_luid.a); -+ -+cleanup: -+ -+ mutex_unlock(&dxgglobal->device_mutex); -+ -+ if (ret) -+ DXG_TRACE("err: %d", ret); -+ return ret; -+} -+ -+static void dxg_pci_remove_device(struct pci_dev *dev) -+{ -+ /* Placeholder */ -+} -+ -+static struct pci_device_id dxg_pci_id_table[] = { -+ { -+ .vendor = PCI_VENDOR_ID_MICROSOFT, -+ .device = PCI_DEVICE_ID_VIRTUAL_RENDER, -+ .subvendor = PCI_ANY_ID, -+ .subdevice = PCI_ANY_ID -+ }, -+ { 0 } -+}; -+ -+/* -+ * Interface with the VM bus driver -+ */ -+ -+static int dxgglobal_getiospace(struct dxgglobal *dxgglobal) -+{ -+ /* Get mmio space for the global channel */ -+ struct hv_device *hdev = dxgglobal->hdev; -+ struct vmbus_channel *channel = hdev->channel; -+ resource_size_t pot_start = 0; -+ resource_size_t pot_end = -1; -+ int ret; -+ -+ dxgglobal->mmiospace_size = channel->offermsg.offer.mmio_megabytes; -+ if (dxgglobal->mmiospace_size == 0) { -+ DXG_TRACE("Zero mmio space is offered"); -+ return -ENOMEM; -+ } -+ dxgglobal->mmiospace_size <<= 20; -+ DXG_TRACE("mmio offered: %llx", dxgglobal->mmiospace_size); -+ -+ ret = vmbus_allocate_mmio(&dxgglobal->mem, hdev, pot_start, pot_end, -+ dxgglobal->mmiospace_size, 0x10000, false); -+ if (ret) { -+ DXG_ERR("Unable to allocate mmio memory: %d", ret); -+ return ret; -+ } -+ dxgglobal->mmiospace_size = dxgglobal->mem->end - -+ dxgglobal->mem->start + 1; -+ dxgglobal->mmiospace_base = dxgglobal->mem->start; -+ DXG_TRACE("mmio allocated %llx %llx %llx %llx", -+ dxgglobal->mmiospace_base, dxgglobal->mmiospace_size, -+ dxgglobal->mem->start, dxgglobal->mem->end); -+ -+ return 0; -+} -+ -+int dxgglobal_init_global_channel(void) -+{ -+ int ret = 0; -+ struct dxgglobal *dxgglobal = dxggbl(); -+ -+ ret = dxgvmbuschannel_init(&dxgglobal->channel, dxgglobal->hdev); -+ if (ret) { -+ DXG_ERR("dxgvmbuschannel_init failed: %d", ret); -+ goto error; -+ } -+ -+ ret = dxgglobal_getiospace(dxgglobal); -+ if (ret) { -+ DXG_ERR("getiospace failed: %d", ret); -+ goto error; -+ } -+ -+ hv_set_drvdata(dxgglobal->hdev, dxgglobal); -+ -+error: -+ return ret; -+} -+ -+void dxgglobal_destroy_global_channel(void) -+{ -+ struct dxgglobal *dxgglobal = dxggbl(); -+ -+ down_write(&dxgglobal->channel_lock); -+ -+ dxgglobal->global_channel_initialized = false; -+ -+ if (dxgglobal->mem) { -+ vmbus_free_mmio(dxgglobal->mmiospace_base, -+ dxgglobal->mmiospace_size); -+ dxgglobal->mem = NULL; -+ } -+ -+ dxgvmbuschannel_destroy(&dxgglobal->channel); -+ -+ if (dxgglobal->hdev) { -+ hv_set_drvdata(dxgglobal->hdev, NULL); -+ dxgglobal->hdev = NULL; -+ } -+ -+ up_write(&dxgglobal->channel_lock); -+} -+ -+static const struct hv_vmbus_device_id dxg_vmbus_id_table[] = { -+ /* Per GPU Device GUID */ -+ { HV_GPUP_DXGK_VGPU_GUID }, -+ /* Global Dxgkgnl channel for the virtual machine */ -+ { HV_GPUP_DXGK_GLOBAL_GUID }, -+ { } -+}; -+ -+static int dxg_probe_vmbus(struct hv_device *hdev, -+ const struct hv_vmbus_device_id *dev_id) -+{ -+ int ret = 0; -+ struct winluid luid; -+ struct dxgvgpuchannel *vgpuch; -+ struct dxgglobal *dxgglobal = dxggbl(); -+ -+ mutex_lock(&dxgglobal->device_mutex); -+ -+ if (uuid_le_cmp(hdev->dev_type, dxg_vmbus_id_table[0].guid) == 0) { -+ /* This is a new virtual GPU channel */ -+ guid_to_luid(&hdev->channel->offermsg.offer.if_instance, &luid); -+ DXG_TRACE("vGPU channel: %pUb", -+ &hdev->channel->offermsg.offer.if_instance); -+ vgpuch = kzalloc(sizeof(struct dxgvgpuchannel), GFP_KERNEL); -+ if (vgpuch == NULL) { -+ ret = -ENOMEM; -+ goto error; -+ } -+ vgpuch->adapter_luid = luid; -+ vgpuch->hdev = hdev; -+ list_add_tail(&vgpuch->vgpu_ch_list_entry, -+ &dxgglobal->vgpu_ch_list_head); -+ } else if (uuid_le_cmp(hdev->dev_type, -+ dxg_vmbus_id_table[1].guid) == 0) { -+ /* This is the global Dxgkgnl channel */ -+ DXG_TRACE("Global channel: %pUb", -+ &hdev->channel->offermsg.offer.if_instance); -+ if (dxgglobal->hdev) { -+ /* This device should appear only once */ -+ DXG_ERR("global channel already exists"); -+ ret = -EBADE; -+ goto error; -+ } -+ dxgglobal->hdev = hdev; -+ } else { -+ /* Unknown device type */ -+ DXG_ERR("Unknown VM bus device type"); -+ ret = -ENODEV; -+ } -+ -+error: -+ -+ mutex_unlock(&dxgglobal->device_mutex); -+ -+ return ret; -+} -+ -+static int dxg_remove_vmbus(struct hv_device *hdev) -+{ -+ int ret = 0; -+ struct dxgvgpuchannel *vgpu_channel; -+ struct dxgglobal *dxgglobal = dxggbl(); -+ -+ mutex_lock(&dxgglobal->device_mutex); -+ -+ if (uuid_le_cmp(hdev->dev_type, dxg_vmbus_id_table[0].guid) == 0) { -+ DXG_TRACE("Remove virtual GPU channel"); -+ list_for_each_entry(vgpu_channel, -+ &dxgglobal->vgpu_ch_list_head, -+ vgpu_ch_list_entry) { -+ if (vgpu_channel->hdev == hdev) { -+ list_del(&vgpu_channel->vgpu_ch_list_entry); -+ kfree(vgpu_channel); -+ break; -+ } -+ } -+ } else if (uuid_le_cmp(hdev->dev_type, -+ dxg_vmbus_id_table[1].guid) == 0) { -+ DXG_TRACE("Remove global channel device"); -+ dxgglobal_destroy_global_channel(); -+ } else { -+ /* Unknown device type */ -+ DXG_ERR("Unknown device type"); -+ ret = -ENODEV; -+ } -+ -+ mutex_unlock(&dxgglobal->device_mutex); -+ -+ return ret; -+} -+ -+MODULE_DEVICE_TABLE(vmbus, dxg_vmbus_id_table); -+MODULE_DEVICE_TABLE(pci, dxg_pci_id_table); -+ -+/* -+ * Global driver data -+ */ -+ -+struct dxgdriver dxgdrv = { -+ .vmbus_drv.name = KBUILD_MODNAME, -+ .vmbus_drv.id_table = dxg_vmbus_id_table, -+ .vmbus_drv.probe = dxg_probe_vmbus, -+ .vmbus_drv.remove = dxg_remove_vmbus, -+ .vmbus_drv.driver = { -+ .probe_type = PROBE_PREFER_ASYNCHRONOUS, -+ }, -+ .pci_drv.name = KBUILD_MODNAME, -+ .pci_drv.id_table = dxg_pci_id_table, -+ .pci_drv.probe = dxg_pci_probe_device, -+ .pci_drv.remove = dxg_pci_remove_device -+}; -+ -+static struct dxgglobal *dxgglobal_create(void) -+{ -+ struct dxgglobal *dxgglobal; -+ -+ dxgglobal = kzalloc(sizeof(struct dxgglobal), GFP_KERNEL); -+ if (!dxgglobal) -+ return NULL; -+ -+ mutex_init(&dxgglobal->device_mutex); -+ -+ INIT_LIST_HEAD(&dxgglobal->vgpu_ch_list_head); -+ -+ init_rwsem(&dxgglobal->channel_lock); -+ -+ return dxgglobal; -+} -+ -+static void dxgglobal_destroy(struct dxgglobal *dxgglobal) -+{ -+ if (dxgglobal) { -+ mutex_lock(&dxgglobal->device_mutex); -+ dxgglobal_destroy_global_channel(); -+ mutex_unlock(&dxgglobal->device_mutex); -+ -+ if (dxgglobal->vmbus_registered) -+ vmbus_driver_unregister(&dxgdrv.vmbus_drv); -+ -+ dxgglobal_destroy_global_channel(); -+ -+ if (dxgglobal->pci_registered) -+ pci_unregister_driver(&dxgdrv.pci_drv); -+ -+ if (dxgglobal->misc_registered) -+ misc_deregister(&dxgglobal->dxgdevice); -+ -+ dxgglobal->drvdata->dxgdev = NULL; -+ -+ kfree(dxgglobal); -+ dxgglobal = NULL; -+ } -+} -+ -+static int __init dxg_drv_init(void) -+{ -+ int ret; -+ struct dxgglobal *dxgglobal = NULL; -+ -+ dxgglobal = dxgglobal_create(); -+ if (dxgglobal == NULL) { -+ pr_err("dxgglobal_init failed"); -+ ret = -ENOMEM; -+ goto error; -+ } -+ dxgglobal->drvdata = &dxgdrv; -+ -+ dxgglobal->dxgdevice.minor = MISC_DYNAMIC_MINOR; -+ dxgglobal->dxgdevice.name = "dxg"; -+ dxgglobal->dxgdevice.fops = &dxgk_fops; -+ dxgglobal->dxgdevice.mode = 0666; -+ ret = misc_register(&dxgglobal->dxgdevice); -+ if (ret) { -+ pr_err("misc_register failed: %d", ret); -+ goto error; -+ } -+ dxgglobal->misc_registered = true; -+ dxgdrv.dxgdev = dxgglobal->dxgdevice.this_device; -+ dxgdrv.dxgglobal = dxgglobal; -+ -+ ret = vmbus_driver_register(&dxgdrv.vmbus_drv); -+ if (ret) { -+ DXG_ERR("vmbus_driver_register failed: %d", ret); -+ goto error; -+ } -+ dxgglobal->vmbus_registered = true; -+ -+ ret = pci_register_driver(&dxgdrv.pci_drv); -+ if (ret) { -+ DXG_ERR("pci_driver_register failed: %d", ret); -+ goto error; -+ } -+ dxgglobal->pci_registered = true; -+ -+ return 0; -+ -+error: -+ /* This function does the cleanup */ -+ dxgglobal_destroy(dxgglobal); -+ dxgdrv.dxgglobal = NULL; -+ -+ return ret; -+} -+ -+static void __exit dxg_drv_exit(void) -+{ -+ dxgglobal_destroy(dxgdrv.dxgglobal); -+} -+ -+module_init(dxg_drv_init); -+module_exit(dxg_drv_exit); -+ -+MODULE_LICENSE("GPL"); -+MODULE_DESCRIPTION("Microsoft Dxgkrnl virtual compute device Driver"); -diff --git a/drivers/hv/dxgkrnl/dxgvmbus.c b/drivers/hv/dxgkrnl/dxgvmbus.c -new file mode 100644 -index 000000000000..111111111111 ---- /dev/null -+++ b/drivers/hv/dxgkrnl/dxgvmbus.c -@@ -0,0 +1,92 @@ -+// SPDX-License-Identifier: GPL-2.0 -+ -+/* -+ * Copyright (c) 2022, Microsoft Corporation. -+ * -+ * Author: -+ * Iouri Tarassov -+ * -+ * Dxgkrnl Graphics Driver -+ * VM bus interface implementation -+ * -+ */ -+ -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include "dxgkrnl.h" -+#include "dxgvmbus.h" -+ -+#undef pr_fmt -+#define pr_fmt(fmt) "dxgk: " fmt -+ -+#define RING_BUFSIZE (256 * 1024) -+ -+/* -+ * The structure is used to track VM bus packets, waiting for completion. -+ */ -+struct dxgvmbuspacket { -+ struct list_head packet_list_entry; -+ u64 request_id; -+ struct completion wait; -+ void *buffer; -+ u32 buffer_length; -+ int status; -+ bool completed; -+}; -+ -+int dxgvmbuschannel_init(struct dxgvmbuschannel *ch, struct hv_device *hdev) -+{ -+ int ret; -+ -+ ch->hdev = hdev; -+ spin_lock_init(&ch->packet_list_mutex); -+ INIT_LIST_HEAD(&ch->packet_list_head); -+ atomic64_set(&ch->packet_request_id, 0); -+ -+ ch->packet_cache = kmem_cache_create("DXGK packet cache", -+ sizeof(struct dxgvmbuspacket), 0, -+ 0, NULL); -+ if (ch->packet_cache == NULL) { -+ DXG_ERR("packet_cache alloc failed"); -+ ret = -ENOMEM; -+ goto cleanup; -+ } -+ -+#if LINUX_VERSION_CODE >= KERNEL_VERSION(5,15,0) -+ hdev->channel->max_pkt_size = DXG_MAX_VM_BUS_PACKET_SIZE; -+#endif -+ ret = vmbus_open(hdev->channel, RING_BUFSIZE, RING_BUFSIZE, -+ NULL, 0, dxgvmbuschannel_receive, ch); -+ if (ret) { -+ DXG_ERR("vmbus_open failed: %d", ret); -+ goto cleanup; -+ } -+ -+ ch->channel = hdev->channel; -+ -+cleanup: -+ -+ return ret; -+} -+ -+void dxgvmbuschannel_destroy(struct dxgvmbuschannel *ch) -+{ -+ kmem_cache_destroy(ch->packet_cache); -+ ch->packet_cache = NULL; -+ -+ if (ch->channel) { -+ vmbus_close(ch->channel); -+ ch->channel = NULL; -+ } -+} -+ -+/* Receive callback for messages from the host */ -+void dxgvmbuschannel_receive(void *ctx) -+{ -+} -diff --git a/drivers/hv/dxgkrnl/dxgvmbus.h b/drivers/hv/dxgkrnl/dxgvmbus.h -new file mode 100644 -index 000000000000..111111111111 ---- /dev/null -+++ b/drivers/hv/dxgkrnl/dxgvmbus.h -@@ -0,0 +1,19 @@ -+/* SPDX-License-Identifier: GPL-2.0 */ -+ -+/* -+ * Copyright (c) 2022, Microsoft Corporation. -+ * -+ * Author: -+ * Iouri Tarassov -+ * -+ * Dxgkrnl Graphics Driver -+ * VM bus interface with the host definitions -+ * -+ */ -+ -+#ifndef _DXGVMBUS_H -+#define _DXGVMBUS_H -+ -+#define DXG_MAX_VM_BUS_PACKET_SIZE (1024 * 128) -+ -+#endif /* _DXGVMBUS_H */ -diff --git a/include/uapi/misc/d3dkmthk.h b/include/uapi/misc/d3dkmthk.h -new file mode 100644 -index 000000000000..111111111111 ---- /dev/null -+++ b/include/uapi/misc/d3dkmthk.h -@@ -0,0 +1,27 @@ -+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ -+ -+/* -+ * Copyright (c) 2019, Microsoft Corporation. -+ * -+ * Author: -+ * Iouri Tarassov -+ * -+ * Dxgkrnl Graphics Driver -+ * User mode WDDM interface definitions -+ * -+ */ -+ -+#ifndef _D3DKMTHK_H -+#define _D3DKMTHK_H -+ -+/* -+ * Matches the Windows LUID definition. -+ * LUID is a locally unique identifier (similar to GUID, but not global), -+ * which is guaranteed to be unique intil the computer is rebooted. -+ */ -+struct winluid { -+ __u32 a; -+ __u32 b; -+}; -+ -+#endif /* _D3DKMTHK_H */ --- -Armbian - diff --git a/patch/kernel/archive/wsl2-arm64-6.6/1669-drivers-hv-dxgkrnl-Add-VMBus-message-support-initialize-VMBus-channels.patch b/patch/kernel/archive/wsl2-arm64-6.6/1669-drivers-hv-dxgkrnl-Add-VMBus-message-support-initialize-VMBus-channels.patch deleted file mode 100644 index 88a353371e27..000000000000 --- a/patch/kernel/archive/wsl2-arm64-6.6/1669-drivers-hv-dxgkrnl-Add-VMBus-message-support-initialize-VMBus-channels.patch +++ /dev/null @@ -1,660 +0,0 @@ -From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001 -From: Iouri Tarassov -Date: Tue, 15 Feb 2022 18:53:07 -0800 -Subject: drivers: hv: dxgkrnl: Add VMBus message support, initialize VMBus - channels. - -Implement support for sending/receiving VMBus messages between -the host and the guest. - -Initialize the VMBus channels and notify the host about IO space -settings of the VMBus global channel. - -Signed-off-by: Iouri Tarassov -[kms: Forward port to v6.1] -Signed-off-by: Kelsey Steele ---- - drivers/hv/dxgkrnl/dxgkrnl.h | 14 + - drivers/hv/dxgkrnl/dxgmodule.c | 9 +- - drivers/hv/dxgkrnl/dxgvmbus.c | 318 ++++++++++ - drivers/hv/dxgkrnl/dxgvmbus.h | 67 ++ - drivers/hv/dxgkrnl/ioctl.c | 24 + - drivers/hv/dxgkrnl/misc.h | 72 +++ - include/uapi/misc/d3dkmthk.h | 34 + - 7 files changed, 536 insertions(+), 2 deletions(-) - -diff --git a/drivers/hv/dxgkrnl/dxgkrnl.h b/drivers/hv/dxgkrnl/dxgkrnl.h -index 111111111111..222222222222 100644 ---- a/drivers/hv/dxgkrnl/dxgkrnl.h -+++ b/drivers/hv/dxgkrnl/dxgkrnl.h -@@ -28,6 +28,8 @@ - #include - #include - #include -+#include "misc.h" -+#include - - struct dxgadapter; - -@@ -100,6 +102,13 @@ static inline struct dxgglobal *dxggbl(void) - return dxgdrv.dxgglobal; - } - -+int dxgglobal_init_global_channel(void); -+void dxgglobal_destroy_global_channel(void); -+struct vmbus_channel *dxgglobal_get_vmbus(void); -+struct dxgvmbuschannel *dxgglobal_get_dxgvmbuschannel(void); -+int dxgglobal_acquire_channel_lock(void); -+void dxgglobal_release_channel_lock(void); -+ - struct dxgprocess { - /* Placeholder */ - }; -@@ -130,6 +139,11 @@ static inline void guid_to_luid(guid_t *guid, struct winluid *luid) - #define DXGK_VMBUS_INTERFACE_VERSION 40 - #define DXGK_VMBUS_LAST_COMPATIBLE_INTERFACE_VERSION 16 - -+void dxgvmb_initialize(void); -+int dxgvmb_send_set_iospace_region(u64 start, u64 len); -+ -+int ntstatus2int(struct ntstatus status); -+ - #ifdef DEBUG - - void dxgk_validate_ioctls(void); -diff --git a/drivers/hv/dxgkrnl/dxgmodule.c b/drivers/hv/dxgkrnl/dxgmodule.c -index 111111111111..222222222222 100644 ---- a/drivers/hv/dxgkrnl/dxgmodule.c -+++ b/drivers/hv/dxgkrnl/dxgmodule.c -@@ -260,6 +260,13 @@ int dxgglobal_init_global_channel(void) - goto error; - } - -+ ret = dxgvmb_send_set_iospace_region(dxgglobal->mmiospace_base, -+ dxgglobal->mmiospace_size); -+ if (ret < 0) { -+ DXG_ERR("send_set_iospace_region failed"); -+ goto error; -+ } -+ - hv_set_drvdata(dxgglobal->hdev, dxgglobal); - - error: -@@ -429,8 +436,6 @@ static void dxgglobal_destroy(struct dxgglobal *dxgglobal) - if (dxgglobal->vmbus_registered) - vmbus_driver_unregister(&dxgdrv.vmbus_drv); - -- dxgglobal_destroy_global_channel(); -- - if (dxgglobal->pci_registered) - pci_unregister_driver(&dxgdrv.pci_drv); - -diff --git a/drivers/hv/dxgkrnl/dxgvmbus.c b/drivers/hv/dxgkrnl/dxgvmbus.c -index 111111111111..222222222222 100644 ---- a/drivers/hv/dxgkrnl/dxgvmbus.c -+++ b/drivers/hv/dxgkrnl/dxgvmbus.c -@@ -40,6 +40,121 @@ struct dxgvmbuspacket { - bool completed; - }; - -+struct dxgvmb_ext_header { -+ /* Offset from the start of the message to DXGKVMB_COMMAND_BASE */ -+ u32 command_offset; -+ u32 reserved; -+ struct winluid vgpu_luid; -+}; -+ -+#define VMBUSMESSAGEONSTACK 64 -+ -+struct dxgvmbusmsg { -+/* Points to the allocated buffer */ -+ struct dxgvmb_ext_header *hdr; -+/* Points to dxgkvmb_command_vm_to_host or dxgkvmb_command_vgpu_to_host */ -+ void *msg; -+/* The vm bus channel, used to pass the message to the host */ -+ struct dxgvmbuschannel *channel; -+/* Message size in bytes including the header and the payload */ -+ u32 size; -+/* Buffer used for small messages */ -+ char msg_on_stack[VMBUSMESSAGEONSTACK]; -+}; -+ -+struct dxgvmbusmsgres { -+/* Points to the allocated buffer */ -+ struct dxgvmb_ext_header *hdr; -+/* Points to dxgkvmb_command_vm_to_host or dxgkvmb_command_vgpu_to_host */ -+ void *msg; -+/* The vm bus channel, used to pass the message to the host */ -+ struct dxgvmbuschannel *channel; -+/* Message size in bytes including the header, the payload and the result */ -+ u32 size; -+/* Result buffer size in bytes */ -+ u32 res_size; -+/* Points to the result within the allocated buffer */ -+ void *res; -+}; -+ -+static int init_message(struct dxgvmbusmsg *msg, -+ struct dxgprocess *process, u32 size) -+{ -+ struct dxgglobal *dxgglobal = dxggbl(); -+ -+ bool use_ext_header = dxgglobal->vmbus_ver >= -+ DXGK_VMBUS_INTERFACE_VERSION; -+ -+ if (use_ext_header) -+ size += sizeof(struct dxgvmb_ext_header); -+ msg->size = size; -+ if (size <= VMBUSMESSAGEONSTACK) { -+ msg->hdr = (void *)msg->msg_on_stack; -+ memset(msg->hdr, 0, size); -+ } else { -+ msg->hdr = vzalloc(size); -+ if (msg->hdr == NULL) -+ return -ENOMEM; -+ } -+ if (use_ext_header) { -+ msg->msg = (char *)&msg->hdr[1]; -+ msg->hdr->command_offset = sizeof(msg->hdr[0]); -+ } else { -+ msg->msg = (char *)msg->hdr; -+ } -+ msg->channel = &dxgglobal->channel; -+ return 0; -+} -+ -+static void free_message(struct dxgvmbusmsg *msg, struct dxgprocess *process) -+{ -+ if (msg->hdr && (char *)msg->hdr != msg->msg_on_stack) -+ vfree(msg->hdr); -+} -+ -+/* -+ * Helper functions -+ */ -+ -+int ntstatus2int(struct ntstatus status) -+{ -+ if (NT_SUCCESS(status)) -+ return (int)status.v; -+ switch (status.v) { -+ case STATUS_OBJECT_NAME_COLLISION: -+ return -EEXIST; -+ case STATUS_NO_MEMORY: -+ return -ENOMEM; -+ case STATUS_INVALID_PARAMETER: -+ return -EINVAL; -+ case STATUS_OBJECT_NAME_INVALID: -+ case STATUS_OBJECT_NAME_NOT_FOUND: -+ return -ENOENT; -+ case STATUS_TIMEOUT: -+ return -EAGAIN; -+ case STATUS_BUFFER_TOO_SMALL: -+ return -EOVERFLOW; -+ case STATUS_DEVICE_REMOVED: -+ return -ENODEV; -+ case STATUS_ACCESS_DENIED: -+ return -EACCES; -+ case STATUS_NOT_SUPPORTED: -+ return -EPERM; -+ case STATUS_ILLEGAL_INSTRUCTION: -+ return -EOPNOTSUPP; -+ case STATUS_INVALID_HANDLE: -+ return -EBADF; -+ case STATUS_GRAPHICS_ALLOCATION_BUSY: -+ return -EINPROGRESS; -+ case STATUS_OBJECT_TYPE_MISMATCH: -+ return -EPROTOTYPE; -+ case STATUS_NOT_IMPLEMENTED: -+ return -EPERM; -+ default: -+ return -EINVAL; -+ } -+} -+ - int dxgvmbuschannel_init(struct dxgvmbuschannel *ch, struct hv_device *hdev) - { - int ret; -@@ -86,7 +201,210 @@ void dxgvmbuschannel_destroy(struct dxgvmbuschannel *ch) - } - } - -+static void command_vm_to_host_init1(struct dxgkvmb_command_vm_to_host *command, -+ enum dxgkvmb_commandtype_global type) -+{ -+ command->command_type = type; -+ command->process.v = 0; -+ command->command_id = 0; -+ command->channel_type = DXGKVMB_VM_TO_HOST; -+} -+ -+static void process_inband_packet(struct dxgvmbuschannel *channel, -+ struct vmpacket_descriptor *desc) -+{ -+ u32 packet_length = hv_pkt_datalen(desc); -+ struct dxgkvmb_command_host_to_vm *packet; -+ -+ if (packet_length < sizeof(struct dxgkvmb_command_host_to_vm)) { -+ DXG_ERR("Invalid global packet"); -+ } else { -+ packet = hv_pkt_data(desc); -+ DXG_TRACE("global packet %d", -+ packet->command_type); -+ switch (packet->command_type) { -+ case DXGK_VMBCOMMAND_SIGNALGUESTEVENT: -+ case DXGK_VMBCOMMAND_SIGNALGUESTEVENTPASSIVE: -+ break; -+ case DXGK_VMBCOMMAND_SENDWNFNOTIFICATION: -+ break; -+ default: -+ DXG_ERR("unexpected host message %d", -+ packet->command_type); -+ } -+ } -+} -+ -+static void process_completion_packet(struct dxgvmbuschannel *channel, -+ struct vmpacket_descriptor *desc) -+{ -+ struct dxgvmbuspacket *packet = NULL; -+ struct dxgvmbuspacket *entry; -+ u32 packet_length = hv_pkt_datalen(desc); -+ unsigned long flags; -+ -+ spin_lock_irqsave(&channel->packet_list_mutex, flags); -+ list_for_each_entry(entry, &channel->packet_list_head, -+ packet_list_entry) { -+ if (desc->trans_id == entry->request_id) { -+ packet = entry; -+ list_del(&packet->packet_list_entry); -+ packet->completed = true; -+ break; -+ } -+ } -+ spin_unlock_irqrestore(&channel->packet_list_mutex, flags); -+ if (packet) { -+ if (packet->buffer_length) { -+ if (packet_length < packet->buffer_length) { -+ DXG_TRACE("invalid size %d Expected:%d", -+ packet_length, -+ packet->buffer_length); -+ packet->status = -EOVERFLOW; -+ } else { -+ memcpy(packet->buffer, hv_pkt_data(desc), -+ packet->buffer_length); -+ } -+ } -+ complete(&packet->wait); -+ } else { -+ DXG_ERR("did not find packet to complete"); -+ } -+} -+ - /* Receive callback for messages from the host */ - void dxgvmbuschannel_receive(void *ctx) - { -+ struct dxgvmbuschannel *channel = ctx; -+ struct vmpacket_descriptor *desc; -+ u32 packet_length = 0; -+ -+ foreach_vmbus_pkt(desc, channel->channel) { -+ packet_length = hv_pkt_datalen(desc); -+ DXG_TRACE("next packet (id, size, type): %llu %d %d", -+ desc->trans_id, packet_length, desc->type); -+ if (desc->type == VM_PKT_COMP) { -+ process_completion_packet(channel, desc); -+ } else { -+ if (desc->type != VM_PKT_DATA_INBAND) -+ DXG_ERR("unexpected packet type"); -+ else -+ process_inband_packet(channel, desc); -+ } -+ } -+} -+ -+int dxgvmb_send_sync_msg(struct dxgvmbuschannel *channel, -+ void *command, -+ u32 cmd_size, -+ void *result, -+ u32 result_size) -+{ -+ int ret; -+ struct dxgvmbuspacket *packet = NULL; -+ -+ if (cmd_size > DXG_MAX_VM_BUS_PACKET_SIZE || -+ result_size > DXG_MAX_VM_BUS_PACKET_SIZE) { -+ DXG_ERR("%s invalid data size", __func__); -+ return -EINVAL; -+ } -+ -+ packet = kmem_cache_alloc(channel->packet_cache, 0); -+ if (packet == NULL) { -+ DXG_ERR("kmem_cache_alloc failed"); -+ return -ENOMEM; -+ } -+ -+ packet->request_id = atomic64_inc_return(&channel->packet_request_id); -+ init_completion(&packet->wait); -+ packet->buffer = result; -+ packet->buffer_length = result_size; -+ packet->status = 0; -+ packet->completed = false; -+ spin_lock_irq(&channel->packet_list_mutex); -+ list_add_tail(&packet->packet_list_entry, &channel->packet_list_head); -+ spin_unlock_irq(&channel->packet_list_mutex); -+ -+ ret = vmbus_sendpacket(channel->channel, command, cmd_size, -+ packet->request_id, VM_PKT_DATA_INBAND, -+ VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED); -+ if (ret) { -+ DXG_ERR("vmbus_sendpacket failed: %x", ret); -+ spin_lock_irq(&channel->packet_list_mutex); -+ list_del(&packet->packet_list_entry); -+ spin_unlock_irq(&channel->packet_list_mutex); -+ goto cleanup; -+ } -+ -+ DXG_TRACE("waiting completion: %llu", packet->request_id); -+ ret = wait_for_completion_killable(&packet->wait); -+ if (ret) { -+ DXG_ERR("wait_for_completion failed: %x", ret); -+ spin_lock_irq(&channel->packet_list_mutex); -+ if (!packet->completed) -+ list_del(&packet->packet_list_entry); -+ spin_unlock_irq(&channel->packet_list_mutex); -+ goto cleanup; -+ } -+ DXG_TRACE("completion done: %llu %x", -+ packet->request_id, packet->status); -+ ret = packet->status; -+ -+cleanup: -+ -+ kmem_cache_free(channel->packet_cache, packet); -+ if (ret < 0) -+ DXG_TRACE("Error: %x", ret); -+ return ret; -+} -+ -+static int -+dxgvmb_send_sync_msg_ntstatus(struct dxgvmbuschannel *channel, -+ void *command, u32 cmd_size) -+{ -+ struct ntstatus status; -+ int ret; -+ -+ ret = dxgvmb_send_sync_msg(channel, command, cmd_size, -+ &status, sizeof(status)); -+ if (ret >= 0) -+ ret = ntstatus2int(status); -+ return ret; -+} -+ -+/* -+ * Global messages to the host -+ */ -+ -+int dxgvmb_send_set_iospace_region(u64 start, u64 len) -+{ -+ int ret; -+ struct dxgkvmb_command_setiospaceregion *command; -+ struct dxgvmbusmsg msg; -+ struct dxgglobal *dxgglobal = dxggbl(); -+ -+ ret = init_message(&msg, NULL, sizeof(*command)); -+ if (ret) -+ return ret; -+ command = (void *)msg.msg; -+ -+ ret = dxgglobal_acquire_channel_lock(); -+ if (ret < 0) -+ goto cleanup; -+ -+ command_vm_to_host_init1(&command->hdr, -+ DXGK_VMBCOMMAND_SETIOSPACEREGION); -+ command->start = start; -+ command->length = len; -+ ret = dxgvmb_send_sync_msg_ntstatus(&dxgglobal->channel, msg.hdr, -+ msg.size); -+ if (ret < 0) -+ DXG_ERR("send_set_iospace_region failed %x", ret); -+ -+ dxgglobal_release_channel_lock(); -+cleanup: -+ free_message(&msg, NULL); -+ if (ret) -+ DXG_TRACE("Error: %d", ret); -+ return ret; - } -diff --git a/drivers/hv/dxgkrnl/dxgvmbus.h b/drivers/hv/dxgkrnl/dxgvmbus.h -index 111111111111..222222222222 100644 ---- a/drivers/hv/dxgkrnl/dxgvmbus.h -+++ b/drivers/hv/dxgkrnl/dxgvmbus.h -@@ -16,4 +16,71 @@ - - #define DXG_MAX_VM_BUS_PACKET_SIZE (1024 * 128) - -+enum dxgkvmb_commandchanneltype { -+ DXGKVMB_VGPU_TO_HOST, -+ DXGKVMB_VM_TO_HOST, -+ DXGKVMB_HOST_TO_VM -+}; -+ -+/* -+ * -+ * Commands, sent to the host via the guest global VM bus channel -+ * DXG_GUEST_GLOBAL_VMBUS -+ * -+ */ -+ -+enum dxgkvmb_commandtype_global { -+ DXGK_VMBCOMMAND_VM_TO_HOST_FIRST = 1000, -+ DXGK_VMBCOMMAND_CREATEPROCESS = DXGK_VMBCOMMAND_VM_TO_HOST_FIRST, -+ DXGK_VMBCOMMAND_DESTROYPROCESS = 1001, -+ DXGK_VMBCOMMAND_OPENSYNCOBJECT = 1002, -+ DXGK_VMBCOMMAND_DESTROYSYNCOBJECT = 1003, -+ DXGK_VMBCOMMAND_CREATENTSHAREDOBJECT = 1004, -+ DXGK_VMBCOMMAND_DESTROYNTSHAREDOBJECT = 1005, -+ DXGK_VMBCOMMAND_SIGNALFENCE = 1006, -+ DXGK_VMBCOMMAND_NOTIFYPROCESSFREEZE = 1007, -+ DXGK_VMBCOMMAND_NOTIFYPROCESSTHAW = 1008, -+ DXGK_VMBCOMMAND_QUERYETWSESSION = 1009, -+ DXGK_VMBCOMMAND_SETIOSPACEREGION = 1010, -+ DXGK_VMBCOMMAND_COMPLETETRANSACTION = 1011, -+ DXGK_VMBCOMMAND_SHAREOBJECTWITHHOST = 1021, -+ DXGK_VMBCOMMAND_INVALID_VM_TO_HOST -+}; -+ -+/* -+ * Commands, sent by the host to the VM -+ */ -+enum dxgkvmb_commandtype_host_to_vm { -+ DXGK_VMBCOMMAND_SIGNALGUESTEVENT, -+ DXGK_VMBCOMMAND_PROPAGATEPRESENTHISTORYTOKEN, -+ DXGK_VMBCOMMAND_SETGUESTDATA, -+ DXGK_VMBCOMMAND_SIGNALGUESTEVENTPASSIVE, -+ DXGK_VMBCOMMAND_SENDWNFNOTIFICATION, -+ DXGK_VMBCOMMAND_INVALID_HOST_TO_VM -+}; -+ -+struct dxgkvmb_command_vm_to_host { -+ u64 command_id; -+ struct d3dkmthandle process; -+ enum dxgkvmb_commandchanneltype channel_type; -+ enum dxgkvmb_commandtype_global command_type; -+}; -+ -+struct dxgkvmb_command_host_to_vm { -+ u64 command_id; -+ struct d3dkmthandle process; -+ u32 channel_type : 8; -+ u32 async_msg : 1; -+ u32 reserved : 23; -+ enum dxgkvmb_commandtype_host_to_vm command_type; -+}; -+ -+/* Returns ntstatus */ -+struct dxgkvmb_command_setiospaceregion { -+ struct dxgkvmb_command_vm_to_host hdr; -+ u64 start; -+ u64 length; -+ u32 shared_page_gpadl; -+}; -+ - #endif /* _DXGVMBUS_H */ -diff --git a/drivers/hv/dxgkrnl/ioctl.c b/drivers/hv/dxgkrnl/ioctl.c -new file mode 100644 -index 000000000000..111111111111 ---- /dev/null -+++ b/drivers/hv/dxgkrnl/ioctl.c -@@ -0,0 +1,24 @@ -+// SPDX-License-Identifier: GPL-2.0 -+ -+/* -+ * Copyright (c) 2022, Microsoft Corporation. -+ * -+ * Author: -+ * Iouri Tarassov -+ * -+ * Dxgkrnl Graphics Driver -+ * Ioctl implementation -+ * -+ */ -+ -+#include -+#include -+#include -+#include -+#include -+ -+#include "dxgkrnl.h" -+#include "dxgvmbus.h" -+ -+#undef pr_fmt -+#define pr_fmt(fmt) "dxgk: " fmt -diff --git a/drivers/hv/dxgkrnl/misc.h b/drivers/hv/dxgkrnl/misc.h -new file mode 100644 -index 000000000000..111111111111 ---- /dev/null -+++ b/drivers/hv/dxgkrnl/misc.h -@@ -0,0 +1,72 @@ -+/* SPDX-License-Identifier: GPL-2.0 */ -+ -+/* -+ * Copyright (c) 2022, Microsoft Corporation. -+ * -+ * Author: -+ * Iouri Tarassov -+ * -+ * Dxgkrnl Graphics Driver -+ * Misc definitions -+ * -+ */ -+ -+#ifndef _MISC_H_ -+#define _MISC_H_ -+ -+extern const struct d3dkmthandle zerohandle; -+ -+/* -+ * Synchronization lock hierarchy. -+ * -+ * The higher enum value, the higher is the lock order. -+ * When a lower lock ois held, the higher lock should not be acquired. -+ * -+ * channel_lock -+ * device_mutex -+ */ -+ -+/* -+ * Some of the Windows return codes, which needs to be translated to Linux -+ * IOCTL return codes. Positive values are success codes and need to be -+ * returned from the driver IOCTLs. libdxcore.so depends on returning -+ * specific return codes. -+ */ -+#define STATUS_SUCCESS ((int)(0)) -+#define STATUS_OBJECT_NAME_INVALID ((int)(0xC0000033L)) -+#define STATUS_DEVICE_REMOVED ((int)(0xC00002B6L)) -+#define STATUS_INVALID_HANDLE ((int)(0xC0000008L)) -+#define STATUS_ILLEGAL_INSTRUCTION ((int)(0xC000001DL)) -+#define STATUS_NOT_IMPLEMENTED ((int)(0xC0000002L)) -+#define STATUS_PENDING ((int)(0x00000103L)) -+#define STATUS_ACCESS_DENIED ((int)(0xC0000022L)) -+#define STATUS_BUFFER_TOO_SMALL ((int)(0xC0000023L)) -+#define STATUS_OBJECT_TYPE_MISMATCH ((int)(0xC0000024L)) -+#define STATUS_GRAPHICS_ALLOCATION_BUSY ((int)(0xC01E0102L)) -+#define STATUS_NOT_SUPPORTED ((int)(0xC00000BBL)) -+#define STATUS_TIMEOUT ((int)(0x00000102L)) -+#define STATUS_INVALID_PARAMETER ((int)(0xC000000DL)) -+#define STATUS_NO_MEMORY ((int)(0xC0000017L)) -+#define STATUS_OBJECT_NAME_COLLISION ((int)(0xC0000035L)) -+#define STATUS_OBJECT_NAME_NOT_FOUND ((int)(0xC0000034L)) -+ -+ -+#define NT_SUCCESS(status) (status.v >= 0) -+ -+#ifndef DEBUG -+ -+#define DXGKRNL_ASSERT(exp) -+ -+#else -+ -+#define DXGKRNL_ASSERT(exp) \ -+do { \ -+ if (!(exp)) { \ -+ dump_stack(); \ -+ BUG_ON(true); \ -+ } \ -+} while (0) -+ -+#endif /* DEBUG */ -+ -+#endif /* _MISC_H_ */ -diff --git a/include/uapi/misc/d3dkmthk.h b/include/uapi/misc/d3dkmthk.h -index 111111111111..222222222222 100644 ---- a/include/uapi/misc/d3dkmthk.h -+++ b/include/uapi/misc/d3dkmthk.h -@@ -14,6 +14,40 @@ - #ifndef _D3DKMTHK_H - #define _D3DKMTHK_H - -+/* -+ * This structure matches the definition of D3DKMTHANDLE in Windows. -+ * The handle is opaque in user mode. It is used by user mode applications to -+ * represent kernel mode objects, created by dxgkrnl. -+ */ -+struct d3dkmthandle { -+ union { -+ struct { -+ __u32 instance : 6; -+ __u32 index : 24; -+ __u32 unique : 2; -+ }; -+ __u32 v; -+ }; -+}; -+ -+/* -+ * VM bus messages return Windows' NTSTATUS, which is integer and only negative -+ * value indicates a failure. A positive number is a success and needs to be -+ * returned to user mode as the IOCTL return code. Negative status codes are -+ * converted to Linux error codes. -+ */ -+struct ntstatus { -+ union { -+ struct { -+ int code : 16; -+ int facility : 13; -+ int customer : 1; -+ int severity : 2; -+ }; -+ int v; -+ }; -+}; -+ - /* - * Matches the Windows LUID definition. - * LUID is a locally unique identifier (similar to GUID, but not global), --- -Armbian - diff --git a/patch/kernel/archive/wsl2-arm64-6.6/1670-drivers-hv-dxgkrnl-Creation-of-dxgadapter-object.patch b/patch/kernel/archive/wsl2-arm64-6.6/1670-drivers-hv-dxgkrnl-Creation-of-dxgadapter-object.patch deleted file mode 100644 index 90df8654ad1b..000000000000 --- a/patch/kernel/archive/wsl2-arm64-6.6/1670-drivers-hv-dxgkrnl-Creation-of-dxgadapter-object.patch +++ /dev/null @@ -1,1160 +0,0 @@ -From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001 -From: Iouri Tarassov -Date: Tue, 15 Feb 2022 19:00:38 -0800 -Subject: drivers: hv: dxgkrnl: Creation of dxgadapter object - -Handle creation and destruction of dxgadapter object, which -represents a virtual compute device, projected to the VM by -the host. The dxgadapter object is created when the -corresponding VMBus channel is offered by Hyper-V. - -There could be multiple virtual compute device objects, projected -by the host to VM. They are enumerated by issuing IOCTLs to -the /dev/dxg device. - -The adapter object can start functioning only when the global VMBus -channel and the corresponding per device VMBus channel are -initialized. Notifications about arrival of a virtual compute PCI -device and VMBus channels can happen in any order. Therefore, -the initial dxgadapter object state is DXGADAPTER_STATE_WAITING_VMBUS. -A list of VMBus channels and a list of waiting dxgadapter objects -are maintained. When dxgkrnl is notified about a VMBus channel -arrival, if tries to start all adapters, which are not started yet. - -Properties of the adapter object are determined by sending VMBus -messages to the host to the corresponding VMBus channel. - -When the per virtual compute device VMBus channel or the global -channel are destroyed, the adapter object is destroyed. - -Signed-off-by: Iouri Tarassov -[kms: Forward port to v6.1] -Signed-off-by: Kelsey Steele ---- - drivers/hv/dxgkrnl/Makefile | 2 +- - drivers/hv/dxgkrnl/dxgadapter.c | 170 ++++++++ - drivers/hv/dxgkrnl/dxgkrnl.h | 85 ++++ - drivers/hv/dxgkrnl/dxgmodule.c | 204 ++++++++- - drivers/hv/dxgkrnl/dxgvmbus.c | 217 +++++++++- - drivers/hv/dxgkrnl/dxgvmbus.h | 128 ++++++ - drivers/hv/dxgkrnl/misc.c | 37 ++ - drivers/hv/dxgkrnl/misc.h | 24 +- - 8 files changed, 844 insertions(+), 23 deletions(-) - -diff --git a/drivers/hv/dxgkrnl/Makefile b/drivers/hv/dxgkrnl/Makefile -index 111111111111..222222222222 100644 ---- a/drivers/hv/dxgkrnl/Makefile -+++ b/drivers/hv/dxgkrnl/Makefile -@@ -2,4 +2,4 @@ - # Makefile for the hyper-v compute device driver (dxgkrnl). - - obj-$(CONFIG_DXGKRNL) += dxgkrnl.o --dxgkrnl-y := dxgmodule.o dxgvmbus.o -+dxgkrnl-y := dxgmodule.o misc.o dxgadapter.o ioctl.o dxgvmbus.o -diff --git a/drivers/hv/dxgkrnl/dxgadapter.c b/drivers/hv/dxgkrnl/dxgadapter.c -new file mode 100644 -index 000000000000..111111111111 ---- /dev/null -+++ b/drivers/hv/dxgkrnl/dxgadapter.c -@@ -0,0 +1,170 @@ -+// SPDX-License-Identifier: GPL-2.0 -+ -+/* -+ * Copyright (c) 2022, Microsoft Corporation. -+ * -+ * Author: -+ * Iouri Tarassov -+ * -+ * Dxgkrnl Graphics Driver -+ * Implementation of dxgadapter and its objects -+ * -+ */ -+ -+#include -+#include -+#include -+#include -+ -+#include "dxgkrnl.h" -+ -+#undef pr_fmt -+#define pr_fmt(fmt) "dxgk: " fmt -+ -+int dxgadapter_set_vmbus(struct dxgadapter *adapter, struct hv_device *hdev) -+{ -+ int ret; -+ -+ guid_to_luid(&hdev->channel->offermsg.offer.if_instance, -+ &adapter->luid); -+ DXG_TRACE("%x:%x %p %pUb", -+ adapter->luid.b, adapter->luid.a, hdev->channel, -+ &hdev->channel->offermsg.offer.if_instance); -+ -+ ret = dxgvmbuschannel_init(&adapter->channel, hdev); -+ if (ret) -+ goto cleanup; -+ -+ adapter->channel.adapter = adapter; -+ adapter->hv_dev = hdev; -+ -+ ret = dxgvmb_send_open_adapter(adapter); -+ if (ret < 0) { -+ DXG_ERR("dxgvmb_send_open_adapter failed: %d", ret); -+ goto cleanup; -+ } -+ -+ ret = dxgvmb_send_get_internal_adapter_info(adapter); -+ -+cleanup: -+ if (ret) -+ DXG_ERR("Failed to set vmbus: %d", ret); -+ return ret; -+} -+ -+void dxgadapter_start(struct dxgadapter *adapter) -+{ -+ struct dxgvgpuchannel *ch = NULL; -+ struct dxgvgpuchannel *entry; -+ int ret; -+ struct dxgglobal *dxgglobal = dxggbl(); -+ -+ DXG_TRACE("%x-%x", adapter->luid.a, adapter->luid.b); -+ -+ /* Find the corresponding vGPU vm bus channel */ -+ list_for_each_entry(entry, &dxgglobal->vgpu_ch_list_head, -+ vgpu_ch_list_entry) { -+ if (memcmp(&adapter->luid, -+ &entry->adapter_luid, -+ sizeof(struct winluid)) == 0) { -+ ch = entry; -+ break; -+ } -+ } -+ if (ch == NULL) { -+ DXG_TRACE("vGPU chanel is not ready"); -+ return; -+ } -+ -+ /* The global channel is initialized when the first adapter starts */ -+ if (!dxgglobal->global_channel_initialized) { -+ ret = dxgglobal_init_global_channel(); -+ if (ret) { -+ dxgglobal_destroy_global_channel(); -+ return; -+ } -+ dxgglobal->global_channel_initialized = true; -+ } -+ -+ /* Initialize vGPU vm bus channel */ -+ ret = dxgadapter_set_vmbus(adapter, ch->hdev); -+ if (ret) { -+ DXG_ERR("Failed to start adapter %p", adapter); -+ adapter->adapter_state = DXGADAPTER_STATE_STOPPED; -+ return; -+ } -+ -+ adapter->adapter_state = DXGADAPTER_STATE_ACTIVE; -+ DXG_TRACE("Adapter started %p", adapter); -+} -+ -+void dxgadapter_stop(struct dxgadapter *adapter) -+{ -+ bool adapter_stopped = false; -+ -+ down_write(&adapter->core_lock); -+ if (!adapter->stopping_adapter) -+ adapter->stopping_adapter = true; -+ else -+ adapter_stopped = true; -+ up_write(&adapter->core_lock); -+ -+ if (adapter_stopped) -+ return; -+ -+ if (dxgadapter_acquire_lock_exclusive(adapter) == 0) { -+ dxgvmb_send_close_adapter(adapter); -+ dxgadapter_release_lock_exclusive(adapter); -+ } -+ dxgvmbuschannel_destroy(&adapter->channel); -+ -+ adapter->adapter_state = DXGADAPTER_STATE_STOPPED; -+} -+ -+void dxgadapter_release(struct kref *refcount) -+{ -+ struct dxgadapter *adapter; -+ -+ adapter = container_of(refcount, struct dxgadapter, adapter_kref); -+ DXG_TRACE("%p", adapter); -+ kfree(adapter); -+} -+ -+bool dxgadapter_is_active(struct dxgadapter *adapter) -+{ -+ return adapter->adapter_state == DXGADAPTER_STATE_ACTIVE; -+} -+ -+int dxgadapter_acquire_lock_exclusive(struct dxgadapter *adapter) -+{ -+ down_write(&adapter->core_lock); -+ if (adapter->adapter_state != DXGADAPTER_STATE_ACTIVE) { -+ dxgadapter_release_lock_exclusive(adapter); -+ return -ENODEV; -+ } -+ return 0; -+} -+ -+void dxgadapter_acquire_lock_forced(struct dxgadapter *adapter) -+{ -+ down_write(&adapter->core_lock); -+} -+ -+void dxgadapter_release_lock_exclusive(struct dxgadapter *adapter) -+{ -+ up_write(&adapter->core_lock); -+} -+ -+int dxgadapter_acquire_lock_shared(struct dxgadapter *adapter) -+{ -+ down_read(&adapter->core_lock); -+ if (adapter->adapter_state == DXGADAPTER_STATE_ACTIVE) -+ return 0; -+ dxgadapter_release_lock_shared(adapter); -+ return -ENODEV; -+} -+ -+void dxgadapter_release_lock_shared(struct dxgadapter *adapter) -+{ -+ up_read(&adapter->core_lock); -+} -diff --git a/drivers/hv/dxgkrnl/dxgkrnl.h b/drivers/hv/dxgkrnl/dxgkrnl.h -index 111111111111..222222222222 100644 ---- a/drivers/hv/dxgkrnl/dxgkrnl.h -+++ b/drivers/hv/dxgkrnl/dxgkrnl.h -@@ -47,9 +47,39 @@ extern struct dxgdriver dxgdrv; - - #define DXGDEV dxgdrv.dxgdev - -+struct dxgk_device_types { -+ u32 post_device:1; -+ u32 post_device_certain:1; -+ u32 software_device:1; -+ u32 soft_gpu_device:1; -+ u32 warp_device:1; -+ u32 bdd_device:1; -+ u32 support_miracast:1; -+ u32 mismatched_lda:1; -+ u32 indirect_display_device:1; -+ u32 xbox_one_device:1; -+ u32 child_id_support_dwm_clone:1; -+ u32 child_id_support_dwm_clone2:1; -+ u32 has_internal_panel:1; -+ u32 rfx_vgpu_device:1; -+ u32 virtual_render_device:1; -+ u32 support_preserve_boot_display:1; -+ u32 is_uefi_frame_buffer:1; -+ u32 removable_device:1; -+ u32 virtual_monitor_device:1; -+}; -+ -+enum dxgobjectstate { -+ DXGOBJECTSTATE_CREATED, -+ DXGOBJECTSTATE_ACTIVE, -+ DXGOBJECTSTATE_STOPPED, -+ DXGOBJECTSTATE_DESTROYED, -+}; -+ - struct dxgvmbuschannel { - struct vmbus_channel *channel; - struct hv_device *hdev; -+ struct dxgadapter *adapter; - spinlock_t packet_list_mutex; - struct list_head packet_list_head; - struct kmem_cache *packet_cache; -@@ -81,6 +111,10 @@ struct dxgglobal { - struct miscdevice dxgdevice; - struct mutex device_mutex; - -+ /* list of created adapters */ -+ struct list_head adapter_list_head; -+ struct rw_semaphore adapter_list_lock; -+ - /* - * List of the vGPU VM bus channels (dxgvgpuchannel) - * Protected by device_mutex -@@ -102,6 +136,10 @@ static inline struct dxgglobal *dxggbl(void) - return dxgdrv.dxgglobal; - } - -+int dxgglobal_create_adapter(struct pci_dev *dev, guid_t *guid, -+ struct winluid host_vgpu_luid); -+void dxgglobal_acquire_adapter_list_lock(enum dxglockstate state); -+void dxgglobal_release_adapter_list_lock(enum dxglockstate state); - int dxgglobal_init_global_channel(void); - void dxgglobal_destroy_global_channel(void); - struct vmbus_channel *dxgglobal_get_vmbus(void); -@@ -113,6 +151,47 @@ struct dxgprocess { - /* Placeholder */ - }; - -+enum dxgadapter_state { -+ DXGADAPTER_STATE_ACTIVE = 0, -+ DXGADAPTER_STATE_STOPPED = 1, -+ DXGADAPTER_STATE_WAITING_VMBUS = 2, -+}; -+ -+/* -+ * This object represents the grapchis adapter. -+ * Objects, which take reference on the adapter: -+ * - dxgglobal -+ * - adapter handle (struct d3dkmthandle) -+ */ -+struct dxgadapter { -+ struct rw_semaphore core_lock; -+ struct kref adapter_kref; -+ /* Entry in the list of adapters in dxgglobal */ -+ struct list_head adapter_list_entry; -+ struct pci_dev *pci_dev; -+ struct hv_device *hv_dev; -+ struct dxgvmbuschannel channel; -+ struct d3dkmthandle host_handle; -+ enum dxgadapter_state adapter_state; -+ struct winluid host_adapter_luid; -+ struct winluid host_vgpu_luid; -+ struct winluid luid; /* VM bus channel luid */ -+ u16 device_description[80]; -+ u16 device_instance_id[WIN_MAX_PATH]; -+ bool stopping_adapter; -+}; -+ -+int dxgadapter_set_vmbus(struct dxgadapter *adapter, struct hv_device *hdev); -+bool dxgadapter_is_active(struct dxgadapter *adapter); -+void dxgadapter_start(struct dxgadapter *adapter); -+void dxgadapter_stop(struct dxgadapter *adapter); -+void dxgadapter_release(struct kref *refcount); -+int dxgadapter_acquire_lock_shared(struct dxgadapter *adapter); -+void dxgadapter_release_lock_shared(struct dxgadapter *adapter); -+int dxgadapter_acquire_lock_exclusive(struct dxgadapter *adapter); -+void dxgadapter_acquire_lock_forced(struct dxgadapter *adapter); -+void dxgadapter_release_lock_exclusive(struct dxgadapter *adapter); -+ - /* - * The convention is that VNBus instance id is a GUID, but the host sets - * the lower part of the value to the host adapter LUID. The function -@@ -141,6 +220,12 @@ static inline void guid_to_luid(guid_t *guid, struct winluid *luid) - - void dxgvmb_initialize(void); - int dxgvmb_send_set_iospace_region(u64 start, u64 len); -+int dxgvmb_send_open_adapter(struct dxgadapter *adapter); -+int dxgvmb_send_close_adapter(struct dxgadapter *adapter); -+int dxgvmb_send_get_internal_adapter_info(struct dxgadapter *adapter); -+int dxgvmb_send_async_msg(struct dxgvmbuschannel *channel, -+ void *command, -+ u32 cmd_size); - - int ntstatus2int(struct ntstatus status); - -diff --git a/drivers/hv/dxgkrnl/dxgmodule.c b/drivers/hv/dxgkrnl/dxgmodule.c -index 111111111111..222222222222 100644 ---- a/drivers/hv/dxgkrnl/dxgmodule.c -+++ b/drivers/hv/dxgkrnl/dxgmodule.c -@@ -55,6 +55,156 @@ void dxgglobal_release_channel_lock(void) - up_read(&dxggbl()->channel_lock); - } - -+void dxgglobal_acquire_adapter_list_lock(enum dxglockstate state) -+{ -+ struct dxgglobal *dxgglobal = dxggbl(); -+ -+ if (state == DXGLOCK_EXCL) -+ down_write(&dxgglobal->adapter_list_lock); -+ else -+ down_read(&dxgglobal->adapter_list_lock); -+} -+ -+void dxgglobal_release_adapter_list_lock(enum dxglockstate state) -+{ -+ struct dxgglobal *dxgglobal = dxggbl(); -+ -+ if (state == DXGLOCK_EXCL) -+ up_write(&dxgglobal->adapter_list_lock); -+ else -+ up_read(&dxgglobal->adapter_list_lock); -+} -+ -+/* -+ * Returns a pointer to dxgadapter object, which corresponds to the given PCI -+ * device, or NULL. -+ */ -+static struct dxgadapter *find_pci_adapter(struct pci_dev *dev) -+{ -+ struct dxgadapter *entry; -+ struct dxgadapter *adapter = NULL; -+ struct dxgglobal *dxgglobal = dxggbl(); -+ -+ dxgglobal_acquire_adapter_list_lock(DXGLOCK_EXCL); -+ -+ list_for_each_entry(entry, &dxgglobal->adapter_list_head, -+ adapter_list_entry) { -+ if (dev == entry->pci_dev) { -+ adapter = entry; -+ break; -+ } -+ } -+ -+ dxgglobal_release_adapter_list_lock(DXGLOCK_EXCL); -+ return adapter; -+} -+ -+/* -+ * Returns a pointer to dxgadapter object, which has the givel LUID -+ * device, or NULL. -+ */ -+static struct dxgadapter *find_adapter(struct winluid *luid) -+{ -+ struct dxgadapter *entry; -+ struct dxgadapter *adapter = NULL; -+ struct dxgglobal *dxgglobal = dxggbl(); -+ -+ dxgglobal_acquire_adapter_list_lock(DXGLOCK_EXCL); -+ -+ list_for_each_entry(entry, &dxgglobal->adapter_list_head, -+ adapter_list_entry) { -+ if (memcmp(luid, &entry->luid, sizeof(struct winluid)) == 0) { -+ adapter = entry; -+ break; -+ } -+ } -+ -+ dxgglobal_release_adapter_list_lock(DXGLOCK_EXCL); -+ return adapter; -+} -+ -+/* -+ * Creates a new dxgadapter object, which represents a virtual GPU, projected -+ * by the host. -+ * The adapter is in the waiting state. It will become active when the global -+ * VM bus channel and the adapter VM bus channel are created. -+ */ -+int dxgglobal_create_adapter(struct pci_dev *dev, guid_t *guid, -+ struct winluid host_vgpu_luid) -+{ -+ struct dxgadapter *adapter; -+ int ret = 0; -+ struct dxgglobal *dxgglobal = dxggbl(); -+ -+ adapter = kzalloc(sizeof(struct dxgadapter), GFP_KERNEL); -+ if (adapter == NULL) { -+ ret = -ENOMEM; -+ goto cleanup; -+ } -+ -+ adapter->adapter_state = DXGADAPTER_STATE_WAITING_VMBUS; -+ adapter->host_vgpu_luid = host_vgpu_luid; -+ kref_init(&adapter->adapter_kref); -+ init_rwsem(&adapter->core_lock); -+ -+ adapter->pci_dev = dev; -+ guid_to_luid(guid, &adapter->luid); -+ -+ dxgglobal_acquire_adapter_list_lock(DXGLOCK_EXCL); -+ -+ list_add_tail(&adapter->adapter_list_entry, -+ &dxgglobal->adapter_list_head); -+ dxgglobal->num_adapters++; -+ dxgglobal_release_adapter_list_lock(DXGLOCK_EXCL); -+ -+ DXG_TRACE("new adapter added %p %x-%x", adapter, -+ adapter->luid.a, adapter->luid.b); -+cleanup: -+ return ret; -+} -+ -+/* -+ * Attempts to start dxgadapter objects, which are not active yet. -+ */ -+static void dxgglobal_start_adapters(void) -+{ -+ struct dxgadapter *adapter; -+ struct dxgglobal *dxgglobal = dxggbl(); -+ -+ if (dxgglobal->hdev == NULL) { -+ DXG_TRACE("Global channel is not ready"); -+ return; -+ } -+ dxgglobal_acquire_adapter_list_lock(DXGLOCK_EXCL); -+ list_for_each_entry(adapter, &dxgglobal->adapter_list_head, -+ adapter_list_entry) { -+ if (adapter->adapter_state == DXGADAPTER_STATE_WAITING_VMBUS) -+ dxgadapter_start(adapter); -+ } -+ dxgglobal_release_adapter_list_lock(DXGLOCK_EXCL); -+} -+ -+/* -+ * Stopsthe active dxgadapter objects. -+ */ -+static void dxgglobal_stop_adapters(void) -+{ -+ struct dxgadapter *adapter; -+ struct dxgglobal *dxgglobal = dxggbl(); -+ -+ if (dxgglobal->hdev == NULL) { -+ DXG_TRACE("Global channel is not ready"); -+ return; -+ } -+ dxgglobal_acquire_adapter_list_lock(DXGLOCK_EXCL); -+ list_for_each_entry(adapter, &dxgglobal->adapter_list_head, -+ adapter_list_entry) { -+ if (adapter->adapter_state == DXGADAPTER_STATE_ACTIVE) -+ dxgadapter_stop(adapter); -+ } -+ dxgglobal_release_adapter_list_lock(DXGLOCK_EXCL); -+} -+ - const struct file_operations dxgk_fops = { - .owner = THIS_MODULE, - }; -@@ -182,6 +332,15 @@ static int dxg_pci_probe_device(struct pci_dev *dev, - DXG_TRACE("Vmbus interface version: %d", dxgglobal->vmbus_ver); - DXG_TRACE("Host luid: %x-%x", vgpu_luid.b, vgpu_luid.a); - -+ /* Create new virtual GPU adapter */ -+ ret = dxgglobal_create_adapter(dev, &guid, vgpu_luid); -+ if (ret) -+ goto cleanup; -+ -+ /* Attempt to start the adapter in case VM bus channels are created */ -+ -+ dxgglobal_start_adapters(); -+ - cleanup: - - mutex_unlock(&dxgglobal->device_mutex); -@@ -193,7 +352,25 @@ static int dxg_pci_probe_device(struct pci_dev *dev, - - static void dxg_pci_remove_device(struct pci_dev *dev) - { -- /* Placeholder */ -+ struct dxgadapter *adapter; -+ struct dxgglobal *dxgglobal = dxggbl(); -+ -+ mutex_lock(&dxgglobal->device_mutex); -+ -+ adapter = find_pci_adapter(dev); -+ if (adapter) { -+ dxgglobal_acquire_adapter_list_lock(DXGLOCK_EXCL); -+ list_del(&adapter->adapter_list_entry); -+ dxgglobal->num_adapters--; -+ dxgglobal_release_adapter_list_lock(DXGLOCK_EXCL); -+ -+ dxgadapter_stop(adapter); -+ kref_put(&adapter->adapter_kref, dxgadapter_release); -+ } else { -+ DXG_ERR("Failed to find dxgadapter for pcidev"); -+ } -+ -+ mutex_unlock(&dxgglobal->device_mutex); - } - - static struct pci_device_id dxg_pci_id_table[] = { -@@ -297,6 +474,25 @@ void dxgglobal_destroy_global_channel(void) - up_write(&dxgglobal->channel_lock); - } - -+static void dxgglobal_stop_adapter_vmbus(struct hv_device *hdev) -+{ -+ struct dxgadapter *adapter = NULL; -+ struct winluid luid; -+ -+ guid_to_luid(&hdev->channel->offermsg.offer.if_instance, &luid); -+ -+ DXG_TRACE("Stopping adapter %x:%x", luid.b, luid.a); -+ -+ adapter = find_adapter(&luid); -+ -+ if (adapter && adapter->adapter_state == DXGADAPTER_STATE_ACTIVE) { -+ down_write(&adapter->core_lock); -+ dxgvmbuschannel_destroy(&adapter->channel); -+ adapter->adapter_state = DXGADAPTER_STATE_STOPPED; -+ up_write(&adapter->core_lock); -+ } -+} -+ - static const struct hv_vmbus_device_id dxg_vmbus_id_table[] = { - /* Per GPU Device GUID */ - { HV_GPUP_DXGK_VGPU_GUID }, -@@ -329,6 +525,7 @@ static int dxg_probe_vmbus(struct hv_device *hdev, - vgpuch->hdev = hdev; - list_add_tail(&vgpuch->vgpu_ch_list_entry, - &dxgglobal->vgpu_ch_list_head); -+ dxgglobal_start_adapters(); - } else if (uuid_le_cmp(hdev->dev_type, - dxg_vmbus_id_table[1].guid) == 0) { - /* This is the global Dxgkgnl channel */ -@@ -341,6 +538,7 @@ static int dxg_probe_vmbus(struct hv_device *hdev, - goto error; - } - dxgglobal->hdev = hdev; -+ dxgglobal_start_adapters(); - } else { - /* Unknown device type */ - DXG_ERR("Unknown VM bus device type"); -@@ -364,6 +562,7 @@ static int dxg_remove_vmbus(struct hv_device *hdev) - - if (uuid_le_cmp(hdev->dev_type, dxg_vmbus_id_table[0].guid) == 0) { - DXG_TRACE("Remove virtual GPU channel"); -+ dxgglobal_stop_adapter_vmbus(hdev); - list_for_each_entry(vgpu_channel, - &dxgglobal->vgpu_ch_list_head, - vgpu_ch_list_entry) { -@@ -420,6 +619,8 @@ static struct dxgglobal *dxgglobal_create(void) - mutex_init(&dxgglobal->device_mutex); - - INIT_LIST_HEAD(&dxgglobal->vgpu_ch_list_head); -+ INIT_LIST_HEAD(&dxgglobal->adapter_list_head); -+ init_rwsem(&dxgglobal->adapter_list_lock); - - init_rwsem(&dxgglobal->channel_lock); - -@@ -430,6 +631,7 @@ static void dxgglobal_destroy(struct dxgglobal *dxgglobal) - { - if (dxgglobal) { - mutex_lock(&dxgglobal->device_mutex); -+ dxgglobal_stop_adapters(); - dxgglobal_destroy_global_channel(); - mutex_unlock(&dxgglobal->device_mutex); - -diff --git a/drivers/hv/dxgkrnl/dxgvmbus.c b/drivers/hv/dxgkrnl/dxgvmbus.c -index 111111111111..222222222222 100644 ---- a/drivers/hv/dxgkrnl/dxgvmbus.c -+++ b/drivers/hv/dxgkrnl/dxgvmbus.c -@@ -77,7 +77,7 @@ struct dxgvmbusmsgres { - void *res; - }; - --static int init_message(struct dxgvmbusmsg *msg, -+static int init_message(struct dxgvmbusmsg *msg, struct dxgadapter *adapter, - struct dxgprocess *process, u32 size) - { - struct dxgglobal *dxgglobal = dxggbl(); -@@ -99,10 +99,15 @@ static int init_message(struct dxgvmbusmsg *msg, - if (use_ext_header) { - msg->msg = (char *)&msg->hdr[1]; - msg->hdr->command_offset = sizeof(msg->hdr[0]); -+ if (adapter) -+ msg->hdr->vgpu_luid = adapter->host_vgpu_luid; - } else { - msg->msg = (char *)msg->hdr; - } -- msg->channel = &dxgglobal->channel; -+ if (adapter && !dxgglobal->async_msg_enabled) -+ msg->channel = &adapter->channel; -+ else -+ msg->channel = &dxgglobal->channel; - return 0; - } - -@@ -116,6 +121,37 @@ static void free_message(struct dxgvmbusmsg *msg, struct dxgprocess *process) - * Helper functions - */ - -+static void command_vm_to_host_init2(struct dxgkvmb_command_vm_to_host *command, -+ enum dxgkvmb_commandtype_global t, -+ struct d3dkmthandle process) -+{ -+ command->command_type = t; -+ command->process = process; -+ command->command_id = 0; -+ command->channel_type = DXGKVMB_VM_TO_HOST; -+} -+ -+static void command_vgpu_to_host_init1(struct dxgkvmb_command_vgpu_to_host -+ *command, -+ enum dxgkvmb_commandtype type) -+{ -+ command->command_type = type; -+ command->process.v = 0; -+ command->command_id = 0; -+ command->channel_type = DXGKVMB_VGPU_TO_HOST; -+} -+ -+static void command_vgpu_to_host_init2(struct dxgkvmb_command_vgpu_to_host -+ *command, -+ enum dxgkvmb_commandtype type, -+ struct d3dkmthandle process) -+{ -+ command->command_type = type; -+ command->process = process; -+ command->command_id = 0; -+ command->channel_type = DXGKVMB_VGPU_TO_HOST; -+} -+ - int ntstatus2int(struct ntstatus status) - { - if (NT_SUCCESS(status)) -@@ -216,22 +252,26 @@ static void process_inband_packet(struct dxgvmbuschannel *channel, - u32 packet_length = hv_pkt_datalen(desc); - struct dxgkvmb_command_host_to_vm *packet; - -- if (packet_length < sizeof(struct dxgkvmb_command_host_to_vm)) { -- DXG_ERR("Invalid global packet"); -- } else { -- packet = hv_pkt_data(desc); -- DXG_TRACE("global packet %d", -- packet->command_type); -- switch (packet->command_type) { -- case DXGK_VMBCOMMAND_SIGNALGUESTEVENT: -- case DXGK_VMBCOMMAND_SIGNALGUESTEVENTPASSIVE: -- break; -- case DXGK_VMBCOMMAND_SENDWNFNOTIFICATION: -- break; -- default: -- DXG_ERR("unexpected host message %d", -+ if (channel->adapter == NULL) { -+ if (packet_length < sizeof(struct dxgkvmb_command_host_to_vm)) { -+ DXG_ERR("Invalid global packet"); -+ } else { -+ packet = hv_pkt_data(desc); -+ DXG_TRACE("global packet %d", - packet->command_type); -+ switch (packet->command_type) { -+ case DXGK_VMBCOMMAND_SIGNALGUESTEVENT: -+ case DXGK_VMBCOMMAND_SIGNALGUESTEVENTPASSIVE: -+ break; -+ case DXGK_VMBCOMMAND_SENDWNFNOTIFICATION: -+ break; -+ default: -+ DXG_ERR("unexpected host message %d", -+ packet->command_type); -+ } - } -+ } else { -+ DXG_ERR("Unexpected packet for adapter channel"); - } - } - -@@ -279,6 +319,7 @@ void dxgvmbuschannel_receive(void *ctx) - struct vmpacket_descriptor *desc; - u32 packet_length = 0; - -+ DXG_TRACE("New adapter message: %p", channel->adapter); - foreach_vmbus_pkt(desc, channel->channel) { - packet_length = hv_pkt_datalen(desc); - DXG_TRACE("next packet (id, size, type): %llu %d %d", -@@ -302,6 +343,8 @@ int dxgvmb_send_sync_msg(struct dxgvmbuschannel *channel, - { - int ret; - struct dxgvmbuspacket *packet = NULL; -+ struct dxgkvmb_command_vm_to_host *cmd1; -+ struct dxgkvmb_command_vgpu_to_host *cmd2; - - if (cmd_size > DXG_MAX_VM_BUS_PACKET_SIZE || - result_size > DXG_MAX_VM_BUS_PACKET_SIZE) { -@@ -315,6 +358,16 @@ int dxgvmb_send_sync_msg(struct dxgvmbuschannel *channel, - return -ENOMEM; - } - -+ if (channel->adapter == NULL) { -+ cmd1 = command; -+ DXG_TRACE("send_sync_msg global: %d %p %d %d", -+ cmd1->command_type, command, cmd_size, result_size); -+ } else { -+ cmd2 = command; -+ DXG_TRACE("send_sync_msg adapter: %d %p %d %d", -+ cmd2->command_type, command, cmd_size, result_size); -+ } -+ - packet->request_id = atomic64_inc_return(&channel->packet_request_id); - init_completion(&packet->wait); - packet->buffer = result; -@@ -358,6 +411,41 @@ int dxgvmb_send_sync_msg(struct dxgvmbuschannel *channel, - return ret; - } - -+int dxgvmb_send_async_msg(struct dxgvmbuschannel *channel, -+ void *command, -+ u32 cmd_size) -+{ -+ int ret; -+ int try_count = 0; -+ -+ if (cmd_size > DXG_MAX_VM_BUS_PACKET_SIZE) { -+ DXG_ERR("%s invalid data size", __func__); -+ return -EINVAL; -+ } -+ -+ if (channel->adapter) { -+ DXG_ERR("Async message sent to the adapter channel"); -+ return -EINVAL; -+ } -+ -+ do { -+ ret = vmbus_sendpacket(channel->channel, command, cmd_size, -+ 0, VM_PKT_DATA_INBAND, 0); -+ /* -+ * -EAGAIN is returned when the VM bus ring buffer if full. -+ * Wait 2ms to allow the host to process messages and try again. -+ */ -+ if (ret == -EAGAIN) { -+ usleep_range(1000, 2000); -+ try_count++; -+ } -+ } while (ret == -EAGAIN && try_count < 5000); -+ if (ret < 0) -+ DXG_ERR("vmbus_sendpacket failed: %x", ret); -+ -+ return ret; -+} -+ - static int - dxgvmb_send_sync_msg_ntstatus(struct dxgvmbuschannel *channel, - void *command, u32 cmd_size) -@@ -383,7 +471,7 @@ int dxgvmb_send_set_iospace_region(u64 start, u64 len) - struct dxgvmbusmsg msg; - struct dxgglobal *dxgglobal = dxggbl(); - -- ret = init_message(&msg, NULL, sizeof(*command)); -+ ret = init_message(&msg, NULL, NULL, sizeof(*command)); - if (ret) - return ret; - command = (void *)msg.msg; -@@ -408,3 +496,98 @@ int dxgvmb_send_set_iospace_region(u64 start, u64 len) - DXG_TRACE("Error: %d", ret); - return ret; - } -+ -+/* -+ * Virtual GPU messages to the host -+ */ -+ -+int dxgvmb_send_open_adapter(struct dxgadapter *adapter) -+{ -+ int ret; -+ struct dxgkvmb_command_openadapter *command; -+ struct dxgkvmb_command_openadapter_return result = { }; -+ struct dxgvmbusmsg msg; -+ struct dxgglobal *dxgglobal = dxggbl(); -+ -+ ret = init_message(&msg, adapter, NULL, sizeof(*command)); -+ if (ret) -+ return ret; -+ command = (void *)msg.msg; -+ -+ command_vgpu_to_host_init1(&command->hdr, DXGK_VMBCOMMAND_OPENADAPTER); -+ command->vmbus_interface_version = dxgglobal->vmbus_ver; -+ command->vmbus_last_compatible_interface_version = -+ DXGK_VMBUS_LAST_COMPATIBLE_INTERFACE_VERSION; -+ -+ ret = dxgvmb_send_sync_msg(msg.channel, msg.hdr, msg.size, -+ &result, sizeof(result)); -+ if (ret < 0) -+ goto cleanup; -+ -+ ret = ntstatus2int(result.status); -+ adapter->host_handle = result.host_adapter_handle; -+ -+cleanup: -+ free_message(&msg, NULL); -+ if (ret) -+ DXG_ERR("Failed to open adapter: %d", ret); -+ return ret; -+} -+ -+int dxgvmb_send_close_adapter(struct dxgadapter *adapter) -+{ -+ int ret; -+ struct dxgkvmb_command_closeadapter *command; -+ struct dxgvmbusmsg msg; -+ -+ ret = init_message(&msg, adapter, NULL, sizeof(*command)); -+ if (ret) -+ return ret; -+ command = (void *)msg.msg; -+ -+ command_vgpu_to_host_init1(&command->hdr, DXGK_VMBCOMMAND_CLOSEADAPTER); -+ command->host_handle = adapter->host_handle; -+ -+ ret = dxgvmb_send_sync_msg(msg.channel, msg.hdr, msg.size, -+ NULL, 0); -+ free_message(&msg, NULL); -+ if (ret) -+ DXG_ERR("Failed to close adapter: %d", ret); -+ return ret; -+} -+ -+int dxgvmb_send_get_internal_adapter_info(struct dxgadapter *adapter) -+{ -+ int ret; -+ struct dxgkvmb_command_getinternaladapterinfo *command; -+ struct dxgkvmb_command_getinternaladapterinfo_return result = { }; -+ struct dxgvmbusmsg msg; -+ u32 result_size = sizeof(result); -+ struct dxgglobal *dxgglobal = dxggbl(); -+ -+ ret = init_message(&msg, adapter, NULL, sizeof(*command)); -+ if (ret) -+ return ret; -+ command = (void *)msg.msg; -+ -+ command_vgpu_to_host_init1(&command->hdr, -+ DXGK_VMBCOMMAND_GETINTERNALADAPTERINFO); -+ if (dxgglobal->vmbus_ver < DXGK_VMBUS_INTERFACE_VERSION) -+ result_size -= sizeof(struct winluid); -+ -+ ret = dxgvmb_send_sync_msg(msg.channel, msg.hdr, msg.size, -+ &result, result_size); -+ if (ret >= 0) { -+ adapter->host_adapter_luid = result.host_adapter_luid; -+ adapter->host_vgpu_luid = result.host_vgpu_luid; -+ wcsncpy(adapter->device_description, result.device_description, -+ sizeof(adapter->device_description) / sizeof(u16)); -+ wcsncpy(adapter->device_instance_id, result.device_instance_id, -+ sizeof(adapter->device_instance_id) / sizeof(u16)); -+ dxgglobal->async_msg_enabled = result.async_msg_enabled != 0; -+ } -+ free_message(&msg, NULL); -+ if (ret) -+ DXG_ERR("Failed to get adapter info: %d", ret); -+ return ret; -+} -diff --git a/drivers/hv/dxgkrnl/dxgvmbus.h b/drivers/hv/dxgkrnl/dxgvmbus.h -index 111111111111..222222222222 100644 ---- a/drivers/hv/dxgkrnl/dxgvmbus.h -+++ b/drivers/hv/dxgkrnl/dxgvmbus.h -@@ -47,6 +47,83 @@ enum dxgkvmb_commandtype_global { - DXGK_VMBCOMMAND_INVALID_VM_TO_HOST - }; - -+/* -+ * -+ * Commands, sent to the host via the per adapter VM bus channel -+ * DXG_GUEST_VGPU_VMBUS -+ * -+ */ -+ -+enum dxgkvmb_commandtype { -+ DXGK_VMBCOMMAND_CREATEDEVICE = 0, -+ DXGK_VMBCOMMAND_DESTROYDEVICE = 1, -+ DXGK_VMBCOMMAND_QUERYADAPTERINFO = 2, -+ DXGK_VMBCOMMAND_DDIQUERYADAPTERINFO = 3, -+ DXGK_VMBCOMMAND_CREATEALLOCATION = 4, -+ DXGK_VMBCOMMAND_DESTROYALLOCATION = 5, -+ DXGK_VMBCOMMAND_CREATECONTEXTVIRTUAL = 6, -+ DXGK_VMBCOMMAND_DESTROYCONTEXT = 7, -+ DXGK_VMBCOMMAND_CREATESYNCOBJECT = 8, -+ DXGK_VMBCOMMAND_CREATEPAGINGQUEUE = 9, -+ DXGK_VMBCOMMAND_DESTROYPAGINGQUEUE = 10, -+ DXGK_VMBCOMMAND_MAKERESIDENT = 11, -+ DXGK_VMBCOMMAND_EVICT = 12, -+ DXGK_VMBCOMMAND_ESCAPE = 13, -+ DXGK_VMBCOMMAND_OPENADAPTER = 14, -+ DXGK_VMBCOMMAND_CLOSEADAPTER = 15, -+ DXGK_VMBCOMMAND_FREEGPUVIRTUALADDRESS = 16, -+ DXGK_VMBCOMMAND_MAPGPUVIRTUALADDRESS = 17, -+ DXGK_VMBCOMMAND_RESERVEGPUVIRTUALADDRESS = 18, -+ DXGK_VMBCOMMAND_UPDATEGPUVIRTUALADDRESS = 19, -+ DXGK_VMBCOMMAND_SUBMITCOMMAND = 20, -+ dxgk_vmbcommand_queryvideomemoryinfo = 21, -+ DXGK_VMBCOMMAND_WAITFORSYNCOBJECTFROMCPU = 22, -+ DXGK_VMBCOMMAND_LOCK2 = 23, -+ DXGK_VMBCOMMAND_UNLOCK2 = 24, -+ DXGK_VMBCOMMAND_WAITFORSYNCOBJECTFROMGPU = 25, -+ DXGK_VMBCOMMAND_SIGNALSYNCOBJECT = 26, -+ DXGK_VMBCOMMAND_SIGNALFENCENTSHAREDBYREF = 27, -+ DXGK_VMBCOMMAND_GETDEVICESTATE = 28, -+ DXGK_VMBCOMMAND_MARKDEVICEASERROR = 29, -+ DXGK_VMBCOMMAND_ADAPTERSTOP = 30, -+ DXGK_VMBCOMMAND_SETQUEUEDLIMIT = 31, -+ DXGK_VMBCOMMAND_OPENRESOURCE = 32, -+ DXGK_VMBCOMMAND_SETCONTEXTSCHEDULINGPRIORITY = 33, -+ DXGK_VMBCOMMAND_PRESENTHISTORYTOKEN = 34, -+ DXGK_VMBCOMMAND_SETREDIRECTEDFLIPFENCEVALUE = 35, -+ DXGK_VMBCOMMAND_GETINTERNALADAPTERINFO = 36, -+ DXGK_VMBCOMMAND_FLUSHHEAPTRANSITIONS = 37, -+ DXGK_VMBCOMMAND_BLT = 38, -+ DXGK_VMBCOMMAND_DDIGETSTANDARDALLOCATIONDRIVERDATA = 39, -+ DXGK_VMBCOMMAND_CDDGDICOMMAND = 40, -+ DXGK_VMBCOMMAND_QUERYALLOCATIONRESIDENCY = 41, -+ DXGK_VMBCOMMAND_FLUSHDEVICE = 42, -+ DXGK_VMBCOMMAND_FLUSHADAPTER = 43, -+ DXGK_VMBCOMMAND_DDIGETNODEMETADATA = 44, -+ DXGK_VMBCOMMAND_SETEXISTINGSYSMEMSTORE = 45, -+ DXGK_VMBCOMMAND_ISSYNCOBJECTSIGNALED = 46, -+ DXGK_VMBCOMMAND_CDDSYNCGPUACCESS = 47, -+ DXGK_VMBCOMMAND_QUERYSTATISTICS = 48, -+ DXGK_VMBCOMMAND_CHANGEVIDEOMEMORYRESERVATION = 49, -+ DXGK_VMBCOMMAND_CREATEHWQUEUE = 50, -+ DXGK_VMBCOMMAND_DESTROYHWQUEUE = 51, -+ DXGK_VMBCOMMAND_SUBMITCOMMANDTOHWQUEUE = 52, -+ DXGK_VMBCOMMAND_GETDRIVERSTOREFILE = 53, -+ DXGK_VMBCOMMAND_READDRIVERSTOREFILE = 54, -+ DXGK_VMBCOMMAND_GETNEXTHARDLINK = 55, -+ DXGK_VMBCOMMAND_UPDATEALLOCATIONPROPERTY = 56, -+ DXGK_VMBCOMMAND_OFFERALLOCATIONS = 57, -+ DXGK_VMBCOMMAND_RECLAIMALLOCATIONS = 58, -+ DXGK_VMBCOMMAND_SETALLOCATIONPRIORITY = 59, -+ DXGK_VMBCOMMAND_GETALLOCATIONPRIORITY = 60, -+ DXGK_VMBCOMMAND_GETCONTEXTSCHEDULINGPRIORITY = 61, -+ DXGK_VMBCOMMAND_QUERYCLOCKCALIBRATION = 62, -+ DXGK_VMBCOMMAND_QUERYRESOURCEINFO = 64, -+ DXGK_VMBCOMMAND_LOGEVENT = 65, -+ DXGK_VMBCOMMAND_SETEXISTINGSYSMEMPAGES = 66, -+ DXGK_VMBCOMMAND_INVALID -+}; -+ - /* - * Commands, sent by the host to the VM - */ -@@ -66,6 +143,15 @@ struct dxgkvmb_command_vm_to_host { - enum dxgkvmb_commandtype_global command_type; - }; - -+struct dxgkvmb_command_vgpu_to_host { -+ u64 command_id; -+ struct d3dkmthandle process; -+ u32 channel_type : 8; -+ u32 async_msg : 1; -+ u32 reserved : 23; -+ enum dxgkvmb_commandtype command_type; -+}; -+ - struct dxgkvmb_command_host_to_vm { - u64 command_id; - struct d3dkmthandle process; -@@ -83,4 +169,46 @@ struct dxgkvmb_command_setiospaceregion { - u32 shared_page_gpadl; - }; - -+struct dxgkvmb_command_openadapter { -+ struct dxgkvmb_command_vgpu_to_host hdr; -+ u32 vmbus_interface_version; -+ u32 vmbus_last_compatible_interface_version; -+ struct winluid guest_adapter_luid; -+}; -+ -+struct dxgkvmb_command_openadapter_return { -+ struct d3dkmthandle host_adapter_handle; -+ struct ntstatus status; -+ u32 vmbus_interface_version; -+ u32 vmbus_last_compatible_interface_version; -+}; -+ -+struct dxgkvmb_command_closeadapter { -+ struct dxgkvmb_command_vgpu_to_host hdr; -+ struct d3dkmthandle host_handle; -+}; -+ -+struct dxgkvmb_command_getinternaladapterinfo { -+ struct dxgkvmb_command_vgpu_to_host hdr; -+}; -+ -+struct dxgkvmb_command_getinternaladapterinfo_return { -+ struct dxgk_device_types device_types; -+ u32 driver_store_copy_mode; -+ u32 driver_ddi_version; -+ u32 secure_virtual_machine : 1; -+ u32 virtual_machine_reset : 1; -+ u32 is_vail_supported : 1; -+ u32 hw_sch_enabled : 1; -+ u32 hw_sch_capable : 1; -+ u32 va_backed_vm : 1; -+ u32 async_msg_enabled : 1; -+ u32 hw_support_state : 2; -+ u32 reserved : 23; -+ struct winluid host_adapter_luid; -+ u16 device_description[80]; -+ u16 device_instance_id[WIN_MAX_PATH]; -+ struct winluid host_vgpu_luid; -+}; -+ - #endif /* _DXGVMBUS_H */ -diff --git a/drivers/hv/dxgkrnl/misc.c b/drivers/hv/dxgkrnl/misc.c -new file mode 100644 -index 000000000000..111111111111 ---- /dev/null -+++ b/drivers/hv/dxgkrnl/misc.c -@@ -0,0 +1,37 @@ -+// SPDX-License-Identifier: GPL-2.0 -+ -+/* -+ * Copyright (c) 2019, Microsoft Corporation. -+ * -+ * Author: -+ * Iouri Tarassov -+ * -+ * Dxgkrnl Graphics Driver -+ * Helper functions -+ * -+ */ -+ -+#include -+#include -+#include -+ -+#include "dxgkrnl.h" -+#include "misc.h" -+ -+#undef pr_fmt -+#define pr_fmt(fmt) "dxgk: " fmt -+ -+u16 *wcsncpy(u16 *dest, const u16 *src, size_t n) -+{ -+ int i; -+ -+ for (i = 0; i < n; i++) { -+ dest[i] = src[i]; -+ if (src[i] == 0) { -+ i++; -+ break; -+ } -+ } -+ dest[i - 1] = 0; -+ return dest; -+} -diff --git a/drivers/hv/dxgkrnl/misc.h b/drivers/hv/dxgkrnl/misc.h -index 111111111111..222222222222 100644 ---- a/drivers/hv/dxgkrnl/misc.h -+++ b/drivers/hv/dxgkrnl/misc.h -@@ -14,18 +14,34 @@ - #ifndef _MISC_H_ - #define _MISC_H_ - -+/* Max characters in Windows path */ -+#define WIN_MAX_PATH 260 -+ - extern const struct d3dkmthandle zerohandle; - - /* - * Synchronization lock hierarchy. - * -- * The higher enum value, the higher is the lock order. -- * When a lower lock ois held, the higher lock should not be acquired. -+ * The locks here are in the order from lowest to highest. -+ * When a lower lock is held, the higher lock should not be acquired. - * -- * channel_lock -- * device_mutex -+ * channel_lock (VMBus channel lock) -+ * fd_mutex -+ * plistmutex (process list mutex) -+ * table_lock (handle table lock) -+ * core_lock (dxgadapter lock) -+ * device_lock (dxgdevice lock) -+ * adapter_list_lock -+ * device_mutex (dxgglobal mutex) - */ - -+u16 *wcsncpy(u16 *dest, const u16 *src, size_t n); -+ -+enum dxglockstate { -+ DXGLOCK_SHARED, -+ DXGLOCK_EXCL -+}; -+ - /* - * Some of the Windows return codes, which needs to be translated to Linux - * IOCTL return codes. Positive values are success codes and need to be --- -Armbian - diff --git a/patch/kernel/archive/wsl2-arm64-6.6/1671-drivers-hv-dxgkrnl-Opening-of-dev-dxg-device-and-dxgprocess-creation.patch b/patch/kernel/archive/wsl2-arm64-6.6/1671-drivers-hv-dxgkrnl-Opening-of-dev-dxg-device-and-dxgprocess-creation.patch deleted file mode 100644 index b1de83540952..000000000000 --- a/patch/kernel/archive/wsl2-arm64-6.6/1671-drivers-hv-dxgkrnl-Opening-of-dev-dxg-device-and-dxgprocess-creation.patch +++ /dev/null @@ -1,1847 +0,0 @@ -From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001 -From: Iouri Tarassov -Date: Tue, 15 Feb 2022 19:12:48 -0800 -Subject: drivers: hv: dxgkrnl: Opening of /dev/dxg device and dxgprocess - creation - -- Implement opening of the device (/dev/dxg) file object and creation of -dxgprocess objects. - -- Add VM bus messages to create and destroy the host side of a dxgprocess -object. - -- Implement the handle manager, which manages d3dkmthandle handles -for the internal process objects. The handles are used by a user mode -client to reference dxgkrnl objects. - -dxgprocess is created for each process, which opens /dev/dxg. -dxgprocess is ref counted, so the existing dxgprocess objects is used -for a process, which opens the device object multiple time. -dxgprocess is destroyed when the file object is released. - -A corresponding dxgprocess object is created on the host for every -dxgprocess object in the guest. - -When a dxgkrnl object is created, in most cases the corresponding -object is created in the host. The VM references the host objects by -handles (d3dkmthandle). d3dkmthandle values for a host object and -the corresponding VM object are the same. A host handle is allocated -first and its value is assigned to the guest object. - -Signed-off-by: Iouri Tarassov -[kms: Forward port to v6.1] -Signed-off-by: Kelsey Steele ---- - drivers/hv/dxgkrnl/Makefile | 2 +- - drivers/hv/dxgkrnl/dxgadapter.c | 72 ++ - drivers/hv/dxgkrnl/dxgkrnl.h | 95 +- - drivers/hv/dxgkrnl/dxgmodule.c | 97 ++ - drivers/hv/dxgkrnl/dxgprocess.c | 262 +++++ - drivers/hv/dxgkrnl/dxgvmbus.c | 164 +++ - drivers/hv/dxgkrnl/dxgvmbus.h | 36 + - drivers/hv/dxgkrnl/hmgr.c | 563 ++++++++++ - drivers/hv/dxgkrnl/hmgr.h | 112 ++ - drivers/hv/dxgkrnl/ioctl.c | 60 + - drivers/hv/dxgkrnl/misc.h | 9 +- - include/uapi/misc/d3dkmthk.h | 103 ++ - 12 files changed, 1569 insertions(+), 6 deletions(-) - -diff --git a/drivers/hv/dxgkrnl/Makefile b/drivers/hv/dxgkrnl/Makefile -index 111111111111..222222222222 100644 ---- a/drivers/hv/dxgkrnl/Makefile -+++ b/drivers/hv/dxgkrnl/Makefile -@@ -2,4 +2,4 @@ - # Makefile for the hyper-v compute device driver (dxgkrnl). - - obj-$(CONFIG_DXGKRNL) += dxgkrnl.o --dxgkrnl-y := dxgmodule.o misc.o dxgadapter.o ioctl.o dxgvmbus.o -+dxgkrnl-y := dxgmodule.o hmgr.o misc.o dxgadapter.o ioctl.o dxgvmbus.o dxgprocess.o -diff --git a/drivers/hv/dxgkrnl/dxgadapter.c b/drivers/hv/dxgkrnl/dxgadapter.c -index 111111111111..222222222222 100644 ---- a/drivers/hv/dxgkrnl/dxgadapter.c -+++ b/drivers/hv/dxgkrnl/dxgadapter.c -@@ -100,6 +100,7 @@ void dxgadapter_start(struct dxgadapter *adapter) - - void dxgadapter_stop(struct dxgadapter *adapter) - { -+ struct dxgprocess_adapter *entry; - bool adapter_stopped = false; - - down_write(&adapter->core_lock); -@@ -112,6 +113,15 @@ void dxgadapter_stop(struct dxgadapter *adapter) - if (adapter_stopped) - return; - -+ dxgglobal_acquire_process_adapter_lock(); -+ -+ list_for_each_entry(entry, &adapter->adapter_process_list_head, -+ adapter_process_list_entry) { -+ dxgprocess_adapter_stop(entry); -+ } -+ -+ dxgglobal_release_process_adapter_lock(); -+ - if (dxgadapter_acquire_lock_exclusive(adapter) == 0) { - dxgvmb_send_close_adapter(adapter); - dxgadapter_release_lock_exclusive(adapter); -@@ -135,6 +145,21 @@ bool dxgadapter_is_active(struct dxgadapter *adapter) - return adapter->adapter_state == DXGADAPTER_STATE_ACTIVE; - } - -+/* Protected by dxgglobal_acquire_process_adapter_lock */ -+void dxgadapter_add_process(struct dxgadapter *adapter, -+ struct dxgprocess_adapter *process_info) -+{ -+ DXG_TRACE("%p %p", adapter, process_info); -+ list_add_tail(&process_info->adapter_process_list_entry, -+ &adapter->adapter_process_list_head); -+} -+ -+void dxgadapter_remove_process(struct dxgprocess_adapter *process_info) -+{ -+ DXG_TRACE("%p %p", process_info->adapter, process_info); -+ list_del(&process_info->adapter_process_list_entry); -+} -+ - int dxgadapter_acquire_lock_exclusive(struct dxgadapter *adapter) - { - down_write(&adapter->core_lock); -@@ -168,3 +193,50 @@ void dxgadapter_release_lock_shared(struct dxgadapter *adapter) - { - up_read(&adapter->core_lock); - } -+ -+struct dxgprocess_adapter *dxgprocess_adapter_create(struct dxgprocess *process, -+ struct dxgadapter *adapter) -+{ -+ struct dxgprocess_adapter *adapter_info; -+ -+ adapter_info = kzalloc(sizeof(*adapter_info), GFP_KERNEL); -+ if (adapter_info) { -+ if (kref_get_unless_zero(&adapter->adapter_kref) == 0) { -+ DXG_ERR("failed to acquire adapter reference"); -+ goto cleanup; -+ } -+ adapter_info->adapter = adapter; -+ adapter_info->process = process; -+ adapter_info->refcount = 1; -+ list_add_tail(&adapter_info->process_adapter_list_entry, -+ &process->process_adapter_list_head); -+ dxgadapter_add_process(adapter, adapter_info); -+ } -+ return adapter_info; -+cleanup: -+ if (adapter_info) -+ kfree(adapter_info); -+ return NULL; -+} -+ -+void dxgprocess_adapter_stop(struct dxgprocess_adapter *adapter_info) -+{ -+} -+ -+void dxgprocess_adapter_destroy(struct dxgprocess_adapter *adapter_info) -+{ -+ dxgadapter_remove_process(adapter_info); -+ kref_put(&adapter_info->adapter->adapter_kref, dxgadapter_release); -+ list_del(&adapter_info->process_adapter_list_entry); -+ kfree(adapter_info); -+} -+ -+/* -+ * Must be called when dxgglobal::process_adapter_mutex is held -+ */ -+void dxgprocess_adapter_release(struct dxgprocess_adapter *adapter_info) -+{ -+ adapter_info->refcount--; -+ if (adapter_info->refcount == 0) -+ dxgprocess_adapter_destroy(adapter_info); -+} -diff --git a/drivers/hv/dxgkrnl/dxgkrnl.h b/drivers/hv/dxgkrnl/dxgkrnl.h -index 111111111111..222222222222 100644 ---- a/drivers/hv/dxgkrnl/dxgkrnl.h -+++ b/drivers/hv/dxgkrnl/dxgkrnl.h -@@ -29,8 +29,10 @@ - #include - #include - #include "misc.h" -+#include "hmgr.h" - #include - -+struct dxgprocess; - struct dxgadapter; - - /* -@@ -111,6 +113,10 @@ struct dxgglobal { - struct miscdevice dxgdevice; - struct mutex device_mutex; - -+ /* list of created processes */ -+ struct list_head plisthead; -+ struct mutex plistmutex; -+ - /* list of created adapters */ - struct list_head adapter_list_head; - struct rw_semaphore adapter_list_lock; -@@ -124,6 +130,9 @@ struct dxgglobal { - /* protects acces to the global VM bus channel */ - struct rw_semaphore channel_lock; - -+ /* protects the dxgprocess_adapter lists */ -+ struct mutex process_adapter_mutex; -+ - bool global_channel_initialized; - bool async_msg_enabled; - bool misc_registered; -@@ -144,13 +153,84 @@ int dxgglobal_init_global_channel(void); - void dxgglobal_destroy_global_channel(void); - struct vmbus_channel *dxgglobal_get_vmbus(void); - struct dxgvmbuschannel *dxgglobal_get_dxgvmbuschannel(void); -+void dxgglobal_acquire_process_adapter_lock(void); -+void dxgglobal_release_process_adapter_lock(void); - int dxgglobal_acquire_channel_lock(void); - void dxgglobal_release_channel_lock(void); - -+/* -+ * Describes adapter information for each process -+ */ -+struct dxgprocess_adapter { -+ /* Entry in dxgadapter::adapter_process_list_head */ -+ struct list_head adapter_process_list_entry; -+ /* Entry in dxgprocess::process_adapter_list_head */ -+ struct list_head process_adapter_list_entry; -+ struct dxgadapter *adapter; -+ struct dxgprocess *process; -+ int refcount; -+}; -+ -+struct dxgprocess_adapter *dxgprocess_adapter_create(struct dxgprocess *process, -+ struct dxgadapter -+ *adapter); -+void dxgprocess_adapter_release(struct dxgprocess_adapter *adapter); -+void dxgprocess_adapter_stop(struct dxgprocess_adapter *adapter_info); -+void dxgprocess_adapter_destroy(struct dxgprocess_adapter *adapter_info); -+ -+/* -+ * The structure represents a process, which opened the /dev/dxg device. -+ * A corresponding object is created on the host. -+ */ - struct dxgprocess { -- /* Placeholder */ -+ /* -+ * Process list entry in dxgglobal. -+ * Protected by the dxgglobal->plistmutex. -+ */ -+ struct list_head plistentry; -+ pid_t pid; -+ pid_t tgid; -+ /* how many time the process was opened */ -+ struct kref process_kref; -+ /* -+ * This handle table is used for all objects except dxgadapter -+ * The handle table lock order is higher than the local_handle_table -+ * lock -+ */ -+ struct hmgrtable handle_table; -+ /* -+ * This handle table is used for dxgadapter objects. -+ * The handle table lock order is lowest. -+ */ -+ struct hmgrtable local_handle_table; -+ /* Handle of the corresponding objec on the host */ -+ struct d3dkmthandle host_handle; -+ -+ /* List of opened adapters (dxgprocess_adapter) */ -+ struct list_head process_adapter_list_head; - }; - -+struct dxgprocess *dxgprocess_create(void); -+void dxgprocess_destroy(struct dxgprocess *process); -+void dxgprocess_release(struct kref *refcount); -+int dxgprocess_open_adapter(struct dxgprocess *process, -+ struct dxgadapter *adapter, -+ struct d3dkmthandle *handle); -+int dxgprocess_close_adapter(struct dxgprocess *process, -+ struct d3dkmthandle handle); -+struct dxgadapter *dxgprocess_get_adapter(struct dxgprocess *process, -+ struct d3dkmthandle handle); -+struct dxgadapter *dxgprocess_adapter_by_handle(struct dxgprocess *process, -+ struct d3dkmthandle handle); -+void dxgprocess_ht_lock_shared_down(struct dxgprocess *process); -+void dxgprocess_ht_lock_shared_up(struct dxgprocess *process); -+void dxgprocess_ht_lock_exclusive_down(struct dxgprocess *process); -+void dxgprocess_ht_lock_exclusive_up(struct dxgprocess *process); -+struct dxgprocess_adapter *dxgprocess_get_adapter_info(struct dxgprocess -+ *process, -+ struct dxgadapter -+ *adapter); -+ - enum dxgadapter_state { - DXGADAPTER_STATE_ACTIVE = 0, - DXGADAPTER_STATE_STOPPED = 1, -@@ -168,6 +248,8 @@ struct dxgadapter { - struct kref adapter_kref; - /* Entry in the list of adapters in dxgglobal */ - struct list_head adapter_list_entry; -+ /* The list of dxgprocess_adapter entries */ -+ struct list_head adapter_process_list_head; - struct pci_dev *pci_dev; - struct hv_device *hv_dev; - struct dxgvmbuschannel channel; -@@ -191,6 +273,12 @@ void dxgadapter_release_lock_shared(struct dxgadapter *adapter); - int dxgadapter_acquire_lock_exclusive(struct dxgadapter *adapter); - void dxgadapter_acquire_lock_forced(struct dxgadapter *adapter); - void dxgadapter_release_lock_exclusive(struct dxgadapter *adapter); -+void dxgadapter_add_process(struct dxgadapter *adapter, -+ struct dxgprocess_adapter *process_info); -+void dxgadapter_remove_process(struct dxgprocess_adapter *process_info); -+ -+long dxgk_compat_ioctl(struct file *f, unsigned int p1, unsigned long p2); -+long dxgk_unlocked_ioctl(struct file *f, unsigned int p1, unsigned long p2); - - /* - * The convention is that VNBus instance id is a GUID, but the host sets -@@ -220,9 +308,14 @@ static inline void guid_to_luid(guid_t *guid, struct winluid *luid) - - void dxgvmb_initialize(void); - int dxgvmb_send_set_iospace_region(u64 start, u64 len); -+int dxgvmb_send_create_process(struct dxgprocess *process); -+int dxgvmb_send_destroy_process(struct d3dkmthandle process); - int dxgvmb_send_open_adapter(struct dxgadapter *adapter); - int dxgvmb_send_close_adapter(struct dxgadapter *adapter); - int dxgvmb_send_get_internal_adapter_info(struct dxgadapter *adapter); -+int dxgvmb_send_query_adapter_info(struct dxgprocess *process, -+ struct dxgadapter *adapter, -+ struct d3dkmt_queryadapterinfo *args); - int dxgvmb_send_async_msg(struct dxgvmbuschannel *channel, - void *command, - u32 cmd_size); -diff --git a/drivers/hv/dxgkrnl/dxgmodule.c b/drivers/hv/dxgkrnl/dxgmodule.c -index 111111111111..222222222222 100644 ---- a/drivers/hv/dxgkrnl/dxgmodule.c -+++ b/drivers/hv/dxgkrnl/dxgmodule.c -@@ -123,6 +123,20 @@ static struct dxgadapter *find_adapter(struct winluid *luid) - return adapter; - } - -+void dxgglobal_acquire_process_adapter_lock(void) -+{ -+ struct dxgglobal *dxgglobal = dxggbl(); -+ -+ mutex_lock(&dxgglobal->process_adapter_mutex); -+} -+ -+void dxgglobal_release_process_adapter_lock(void) -+{ -+ struct dxgglobal *dxgglobal = dxggbl(); -+ -+ mutex_unlock(&dxgglobal->process_adapter_mutex); -+} -+ - /* - * Creates a new dxgadapter object, which represents a virtual GPU, projected - * by the host. -@@ -147,6 +161,7 @@ int dxgglobal_create_adapter(struct pci_dev *dev, guid_t *guid, - kref_init(&adapter->adapter_kref); - init_rwsem(&adapter->core_lock); - -+ INIT_LIST_HEAD(&adapter->adapter_process_list_head); - adapter->pci_dev = dev; - guid_to_luid(guid, &adapter->luid); - -@@ -205,8 +220,87 @@ static void dxgglobal_stop_adapters(void) - dxgglobal_release_adapter_list_lock(DXGLOCK_EXCL); - } - -+/* -+ * Returns dxgprocess for the current executing process. -+ * Creates dxgprocess if it doesn't exist. -+ */ -+static struct dxgprocess *dxgglobal_get_current_process(void) -+{ -+ /* -+ * Find the DXG process for the current process. -+ * A new process is created if necessary. -+ */ -+ struct dxgprocess *process = NULL; -+ struct dxgprocess *entry = NULL; -+ struct dxgglobal *dxgglobal = dxggbl(); -+ -+ mutex_lock(&dxgglobal->plistmutex); -+ list_for_each_entry(entry, &dxgglobal->plisthead, plistentry) { -+ /* All threads of a process have the same thread group ID */ -+ if (entry->tgid == current->tgid) { -+ if (kref_get_unless_zero(&entry->process_kref)) { -+ process = entry; -+ DXG_TRACE("found dxgprocess"); -+ } else { -+ DXG_TRACE("process is destroyed"); -+ } -+ break; -+ } -+ } -+ mutex_unlock(&dxgglobal->plistmutex); -+ -+ if (process == NULL) -+ process = dxgprocess_create(); -+ -+ return process; -+} -+ -+/* -+ * File operations for the /dev/dxg device -+ */ -+ -+static int dxgk_open(struct inode *n, struct file *f) -+{ -+ int ret = 0; -+ struct dxgprocess *process; -+ -+ DXG_TRACE("%p %d %d", f, current->pid, current->tgid); -+ -+ /* Find/create a dxgprocess structure for this process */ -+ process = dxgglobal_get_current_process(); -+ -+ if (process) { -+ f->private_data = process; -+ } else { -+ DXG_TRACE("cannot create dxgprocess"); -+ ret = -EBADF; -+ } -+ -+ return ret; -+} -+ -+static int dxgk_release(struct inode *n, struct file *f) -+{ -+ struct dxgprocess *process; -+ -+ process = (struct dxgprocess *)f->private_data; -+ DXG_TRACE("%p, %p", f, process); -+ -+ if (process == NULL) -+ return -EINVAL; -+ -+ kref_put(&process->process_kref, dxgprocess_release); -+ -+ f->private_data = NULL; -+ return 0; -+} -+ - const struct file_operations dxgk_fops = { - .owner = THIS_MODULE, -+ .open = dxgk_open, -+ .release = dxgk_release, -+ .compat_ioctl = dxgk_compat_ioctl, -+ .unlocked_ioctl = dxgk_unlocked_ioctl, - }; - - /* -@@ -616,7 +710,10 @@ static struct dxgglobal *dxgglobal_create(void) - if (!dxgglobal) - return NULL; - -+ INIT_LIST_HEAD(&dxgglobal->plisthead); -+ mutex_init(&dxgglobal->plistmutex); - mutex_init(&dxgglobal->device_mutex); -+ mutex_init(&dxgglobal->process_adapter_mutex); - - INIT_LIST_HEAD(&dxgglobal->vgpu_ch_list_head); - INIT_LIST_HEAD(&dxgglobal->adapter_list_head); -diff --git a/drivers/hv/dxgkrnl/dxgprocess.c b/drivers/hv/dxgkrnl/dxgprocess.c -new file mode 100644 -index 000000000000..111111111111 ---- /dev/null -+++ b/drivers/hv/dxgkrnl/dxgprocess.c -@@ -0,0 +1,262 @@ -+// SPDX-License-Identifier: GPL-2.0 -+ -+/* -+ * Copyright (c) 2022, Microsoft Corporation. -+ * -+ * Author: -+ * Iouri Tarassov -+ * -+ * Dxgkrnl Graphics Driver -+ * DXGPROCESS implementation -+ * -+ */ -+ -+#include "dxgkrnl.h" -+ -+#undef pr_fmt -+#define pr_fmt(fmt) "dxgk: " fmt -+ -+/* -+ * Creates a new dxgprocess object -+ * Must be called when dxgglobal->plistmutex is held -+ */ -+struct dxgprocess *dxgprocess_create(void) -+{ -+ struct dxgprocess *process; -+ int ret; -+ struct dxgglobal *dxgglobal = dxggbl(); -+ -+ process = kzalloc(sizeof(struct dxgprocess), GFP_KERNEL); -+ if (process != NULL) { -+ DXG_TRACE("new dxgprocess created"); -+ process->pid = current->pid; -+ process->tgid = current->tgid; -+ ret = dxgvmb_send_create_process(process); -+ if (ret < 0) { -+ DXG_TRACE("send_create_process failed"); -+ kfree(process); -+ process = NULL; -+ } else { -+ INIT_LIST_HEAD(&process->plistentry); -+ kref_init(&process->process_kref); -+ -+ mutex_lock(&dxgglobal->plistmutex); -+ list_add_tail(&process->plistentry, -+ &dxgglobal->plisthead); -+ mutex_unlock(&dxgglobal->plistmutex); -+ -+ hmgrtable_init(&process->handle_table, process); -+ hmgrtable_init(&process->local_handle_table, process); -+ INIT_LIST_HEAD(&process->process_adapter_list_head); -+ } -+ } -+ return process; -+} -+ -+void dxgprocess_destroy(struct dxgprocess *process) -+{ -+ int i; -+ enum hmgrentry_type t; -+ struct d3dkmthandle h; -+ void *o; -+ struct dxgprocess_adapter *entry; -+ struct dxgprocess_adapter *tmp; -+ -+ /* Destroy all adapter state */ -+ dxgglobal_acquire_process_adapter_lock(); -+ list_for_each_entry_safe(entry, tmp, -+ &process->process_adapter_list_head, -+ process_adapter_list_entry) { -+ dxgprocess_adapter_destroy(entry); -+ } -+ dxgglobal_release_process_adapter_lock(); -+ -+ i = 0; -+ while (hmgrtable_next_entry(&process->local_handle_table, -+ &i, &t, &h, &o)) { -+ switch (t) { -+ case HMGRENTRY_TYPE_DXGADAPTER: -+ dxgprocess_close_adapter(process, h); -+ break; -+ default: -+ DXG_ERR("invalid entry in handle table %d", t); -+ break; -+ } -+ } -+ -+ hmgrtable_destroy(&process->handle_table); -+ hmgrtable_destroy(&process->local_handle_table); -+} -+ -+void dxgprocess_release(struct kref *refcount) -+{ -+ struct dxgprocess *process; -+ struct dxgglobal *dxgglobal = dxggbl(); -+ -+ process = container_of(refcount, struct dxgprocess, process_kref); -+ -+ mutex_lock(&dxgglobal->plistmutex); -+ list_del(&process->plistentry); -+ mutex_unlock(&dxgglobal->plistmutex); -+ -+ dxgprocess_destroy(process); -+ -+ if (process->host_handle.v) -+ dxgvmb_send_destroy_process(process->host_handle); -+ kfree(process); -+} -+ -+struct dxgprocess_adapter *dxgprocess_get_adapter_info(struct dxgprocess -+ *process, -+ struct dxgadapter -+ *adapter) -+{ -+ struct dxgprocess_adapter *entry; -+ -+ list_for_each_entry(entry, &process->process_adapter_list_head, -+ process_adapter_list_entry) { -+ if (adapter == entry->adapter) { -+ DXG_TRACE("Found process info %p", entry); -+ return entry; -+ } -+ } -+ return NULL; -+} -+ -+/* -+ * Dxgprocess takes references on dxgadapter and dxgprocess_adapter. -+ * -+ * The process_adapter lock is held. -+ * -+ */ -+int dxgprocess_open_adapter(struct dxgprocess *process, -+ struct dxgadapter *adapter, -+ struct d3dkmthandle *h) -+{ -+ int ret = 0; -+ struct dxgprocess_adapter *adapter_info; -+ struct d3dkmthandle handle; -+ -+ h->v = 0; -+ adapter_info = dxgprocess_get_adapter_info(process, adapter); -+ if (adapter_info == NULL) { -+ DXG_TRACE("creating new process adapter info"); -+ adapter_info = dxgprocess_adapter_create(process, adapter); -+ if (adapter_info == NULL) { -+ ret = -ENOMEM; -+ goto cleanup; -+ } -+ } else { -+ adapter_info->refcount++; -+ } -+ -+ handle = hmgrtable_alloc_handle_safe(&process->local_handle_table, -+ adapter, HMGRENTRY_TYPE_DXGADAPTER, -+ true); -+ if (handle.v) { -+ *h = handle; -+ } else { -+ DXG_ERR("failed to create adapter handle"); -+ ret = -ENOMEM; -+ } -+ -+cleanup: -+ -+ if (ret < 0) { -+ if (adapter_info) -+ dxgprocess_adapter_release(adapter_info); -+ } -+ -+ return ret; -+} -+ -+int dxgprocess_close_adapter(struct dxgprocess *process, -+ struct d3dkmthandle handle) -+{ -+ struct dxgadapter *adapter; -+ struct dxgprocess_adapter *adapter_info; -+ int ret = 0; -+ -+ if (handle.v == 0) -+ return 0; -+ -+ hmgrtable_lock(&process->local_handle_table, DXGLOCK_EXCL); -+ adapter = dxgprocess_get_adapter(process, handle); -+ if (adapter) -+ hmgrtable_free_handle(&process->local_handle_table, -+ HMGRENTRY_TYPE_DXGADAPTER, handle); -+ hmgrtable_unlock(&process->local_handle_table, DXGLOCK_EXCL); -+ -+ if (adapter) { -+ adapter_info = dxgprocess_get_adapter_info(process, adapter); -+ if (adapter_info) { -+ dxgglobal_acquire_process_adapter_lock(); -+ dxgprocess_adapter_release(adapter_info); -+ dxgglobal_release_process_adapter_lock(); -+ } else { -+ ret = -EINVAL; -+ } -+ } else { -+ DXG_ERR("Adapter not found %x", handle.v); -+ ret = -EINVAL; -+ } -+ -+ return ret; -+} -+ -+struct dxgadapter *dxgprocess_get_adapter(struct dxgprocess *process, -+ struct d3dkmthandle handle) -+{ -+ struct dxgadapter *adapter; -+ -+ adapter = hmgrtable_get_object_by_type(&process->local_handle_table, -+ HMGRENTRY_TYPE_DXGADAPTER, -+ handle); -+ if (adapter == NULL) -+ DXG_ERR("Adapter not found %x", handle.v); -+ return adapter; -+} -+ -+/* -+ * Gets the adapter object from the process handle table. -+ * The adapter object is referenced. -+ * The function acquired the handle table lock shared. -+ */ -+struct dxgadapter *dxgprocess_adapter_by_handle(struct dxgprocess *process, -+ struct d3dkmthandle handle) -+{ -+ struct dxgadapter *adapter; -+ -+ hmgrtable_lock(&process->local_handle_table, DXGLOCK_SHARED); -+ adapter = hmgrtable_get_object_by_type(&process->local_handle_table, -+ HMGRENTRY_TYPE_DXGADAPTER, -+ handle); -+ if (adapter == NULL) -+ DXG_ERR("adapter_by_handle failed %x", handle.v); -+ else if (kref_get_unless_zero(&adapter->adapter_kref) == 0) { -+ DXG_ERR("failed to acquire adapter reference"); -+ adapter = NULL; -+ } -+ hmgrtable_unlock(&process->local_handle_table, DXGLOCK_SHARED); -+ return adapter; -+} -+ -+void dxgprocess_ht_lock_shared_down(struct dxgprocess *process) -+{ -+ hmgrtable_lock(&process->handle_table, DXGLOCK_SHARED); -+} -+ -+void dxgprocess_ht_lock_shared_up(struct dxgprocess *process) -+{ -+ hmgrtable_unlock(&process->handle_table, DXGLOCK_SHARED); -+} -+ -+void dxgprocess_ht_lock_exclusive_down(struct dxgprocess *process) -+{ -+ hmgrtable_lock(&process->handle_table, DXGLOCK_EXCL); -+} -+ -+void dxgprocess_ht_lock_exclusive_up(struct dxgprocess *process) -+{ -+ hmgrtable_unlock(&process->handle_table, DXGLOCK_EXCL); -+} -diff --git a/drivers/hv/dxgkrnl/dxgvmbus.c b/drivers/hv/dxgkrnl/dxgvmbus.c -index 111111111111..222222222222 100644 ---- a/drivers/hv/dxgkrnl/dxgvmbus.c -+++ b/drivers/hv/dxgkrnl/dxgvmbus.c -@@ -497,6 +497,87 @@ int dxgvmb_send_set_iospace_region(u64 start, u64 len) - return ret; - } - -+int dxgvmb_send_create_process(struct dxgprocess *process) -+{ -+ int ret; -+ struct dxgkvmb_command_createprocess *command; -+ struct dxgkvmb_command_createprocess_return result = { 0 }; -+ struct dxgvmbusmsg msg; -+ char s[WIN_MAX_PATH]; -+ int i; -+ struct dxgglobal *dxgglobal = dxggbl(); -+ -+ ret = init_message(&msg, NULL, process, sizeof(*command)); -+ if (ret) -+ return ret; -+ command = (void *)msg.msg; -+ -+ ret = dxgglobal_acquire_channel_lock(); -+ if (ret < 0) -+ goto cleanup; -+ -+ command_vm_to_host_init1(&command->hdr, DXGK_VMBCOMMAND_CREATEPROCESS); -+ command->process = process; -+ command->process_id = process->pid; -+ command->linux_process = 1; -+ s[0] = 0; -+ __get_task_comm(s, WIN_MAX_PATH, current); -+ for (i = 0; i < WIN_MAX_PATH; i++) { -+ command->process_name[i] = s[i]; -+ if (s[i] == 0) -+ break; -+ } -+ -+ ret = dxgvmb_send_sync_msg(&dxgglobal->channel, msg.hdr, msg.size, -+ &result, sizeof(result)); -+ if (ret < 0) { -+ DXG_ERR("create_process failed %d", ret); -+ } else if (result.hprocess.v == 0) { -+ DXG_ERR("create_process returned 0 handle"); -+ ret = -ENOTRECOVERABLE; -+ } else { -+ process->host_handle = result.hprocess; -+ DXG_TRACE("create_process returned %x", -+ process->host_handle.v); -+ } -+ -+ dxgglobal_release_channel_lock(); -+ -+cleanup: -+ free_message(&msg, process); -+ if (ret) -+ DXG_TRACE("err: %d", ret); -+ return ret; -+} -+ -+int dxgvmb_send_destroy_process(struct d3dkmthandle process) -+{ -+ int ret; -+ struct dxgkvmb_command_destroyprocess *command; -+ struct dxgvmbusmsg msg; -+ struct dxgglobal *dxgglobal = dxggbl(); -+ -+ ret = init_message(&msg, NULL, NULL, sizeof(*command)); -+ if (ret) -+ return ret; -+ command = (void *)msg.msg; -+ -+ ret = dxgglobal_acquire_channel_lock(); -+ if (ret < 0) -+ goto cleanup; -+ command_vm_to_host_init2(&command->hdr, DXGK_VMBCOMMAND_DESTROYPROCESS, -+ process); -+ ret = dxgvmb_send_sync_msg_ntstatus(&dxgglobal->channel, -+ msg.hdr, msg.size); -+ dxgglobal_release_channel_lock(); -+ -+cleanup: -+ free_message(&msg, NULL); -+ if (ret) -+ DXG_TRACE("err: %d", ret); -+ return ret; -+} -+ - /* - * Virtual GPU messages to the host - */ -@@ -591,3 +672,86 @@ int dxgvmb_send_get_internal_adapter_info(struct dxgadapter *adapter) - DXG_ERR("Failed to get adapter info: %d", ret); - return ret; - } -+ -+int dxgvmb_send_query_adapter_info(struct dxgprocess *process, -+ struct dxgadapter *adapter, -+ struct d3dkmt_queryadapterinfo *args) -+{ -+ struct dxgkvmb_command_queryadapterinfo *command; -+ u32 cmd_size = sizeof(*command) + args->private_data_size - 1; -+ int ret; -+ u32 private_data_size; -+ void *private_data; -+ struct dxgvmbusmsg msg = {.hdr = NULL}; -+ struct dxgglobal *dxgglobal = dxggbl(); -+ -+ ret = init_message(&msg, adapter, process, cmd_size); -+ if (ret) -+ goto cleanup; -+ command = (void *)msg.msg; -+ -+ ret = copy_from_user(command->private_data, -+ args->private_data, args->private_data_size); -+ if (ret) { -+ DXG_ERR("Faled to copy private data"); -+ ret = -EINVAL; -+ goto cleanup; -+ } -+ -+ command_vgpu_to_host_init2(&command->hdr, -+ DXGK_VMBCOMMAND_QUERYADAPTERINFO, -+ process->host_handle); -+ command->private_data_size = args->private_data_size; -+ command->query_type = args->type; -+ -+ if (dxgglobal->vmbus_ver >= DXGK_VMBUS_INTERFACE_VERSION) { -+ private_data = msg.msg; -+ private_data_size = command->private_data_size + -+ sizeof(struct ntstatus); -+ } else { -+ private_data = command->private_data; -+ private_data_size = command->private_data_size; -+ } -+ -+ ret = dxgvmb_send_sync_msg(msg.channel, msg.hdr, msg.size, -+ private_data, private_data_size); -+ if (ret < 0) -+ goto cleanup; -+ -+ if (dxgglobal->vmbus_ver >= DXGK_VMBUS_INTERFACE_VERSION) { -+ ret = ntstatus2int(*(struct ntstatus *)private_data); -+ if (ret < 0) -+ goto cleanup; -+ private_data = (char *)private_data + sizeof(struct ntstatus); -+ } -+ -+ switch (args->type) { -+ case _KMTQAITYPE_ADAPTERTYPE: -+ case _KMTQAITYPE_ADAPTERTYPE_RENDER: -+ { -+ struct d3dkmt_adaptertype *adapter_type = -+ (void *)private_data; -+ adapter_type->paravirtualized = 1; -+ adapter_type->display_supported = 0; -+ adapter_type->post_device = 0; -+ adapter_type->indirect_display_device = 0; -+ adapter_type->acg_supported = 0; -+ adapter_type->support_set_timings_from_vidpn = 0; -+ break; -+ } -+ default: -+ break; -+ } -+ ret = copy_to_user(args->private_data, private_data, -+ args->private_data_size); -+ if (ret) { -+ DXG_ERR("Faled to copy private data to user"); -+ ret = -EINVAL; -+ } -+ -+cleanup: -+ free_message(&msg, process); -+ if (ret) -+ DXG_TRACE("err: %d", ret); -+ return ret; -+} -diff --git a/drivers/hv/dxgkrnl/dxgvmbus.h b/drivers/hv/dxgkrnl/dxgvmbus.h -index 111111111111..222222222222 100644 ---- a/drivers/hv/dxgkrnl/dxgvmbus.h -+++ b/drivers/hv/dxgkrnl/dxgvmbus.h -@@ -14,7 +14,11 @@ - #ifndef _DXGVMBUS_H - #define _DXGVMBUS_H - -+struct dxgprocess; -+struct dxgadapter; -+ - #define DXG_MAX_VM_BUS_PACKET_SIZE (1024 * 128) -+#define DXG_VM_PROCESS_NAME_LENGTH 260 - - enum dxgkvmb_commandchanneltype { - DXGKVMB_VGPU_TO_HOST, -@@ -169,6 +173,26 @@ struct dxgkvmb_command_setiospaceregion { - u32 shared_page_gpadl; - }; - -+struct dxgkvmb_command_createprocess { -+ struct dxgkvmb_command_vm_to_host hdr; -+ void *process; -+ u64 process_id; -+ u16 process_name[DXG_VM_PROCESS_NAME_LENGTH + 1]; -+ u8 csrss_process:1; -+ u8 dwm_process:1; -+ u8 wow64_process:1; -+ u8 linux_process:1; -+}; -+ -+struct dxgkvmb_command_createprocess_return { -+ struct d3dkmthandle hprocess; -+}; -+ -+// The command returns ntstatus -+struct dxgkvmb_command_destroyprocess { -+ struct dxgkvmb_command_vm_to_host hdr; -+}; -+ - struct dxgkvmb_command_openadapter { - struct dxgkvmb_command_vgpu_to_host hdr; - u32 vmbus_interface_version; -@@ -211,4 +235,16 @@ struct dxgkvmb_command_getinternaladapterinfo_return { - struct winluid host_vgpu_luid; - }; - -+struct dxgkvmb_command_queryadapterinfo { -+ struct dxgkvmb_command_vgpu_to_host hdr; -+ enum kmtqueryadapterinfotype query_type; -+ u32 private_data_size; -+ u8 private_data[1]; -+}; -+ -+struct dxgkvmb_command_queryadapterinfo_return { -+ struct ntstatus status; -+ u8 private_data[1]; -+}; -+ - #endif /* _DXGVMBUS_H */ -diff --git a/drivers/hv/dxgkrnl/hmgr.c b/drivers/hv/dxgkrnl/hmgr.c -new file mode 100644 -index 000000000000..111111111111 ---- /dev/null -+++ b/drivers/hv/dxgkrnl/hmgr.c -@@ -0,0 +1,563 @@ -+// SPDX-License-Identifier: GPL-2.0 -+ -+/* -+ * Copyright (c) 2022, Microsoft Corporation. -+ * -+ * Author: -+ * Iouri Tarassov -+ * -+ * Dxgkrnl Graphics Driver -+ * Handle manager implementation -+ * -+ */ -+ -+#include -+#include -+#include -+ -+#include "misc.h" -+#include "dxgkrnl.h" -+#include "hmgr.h" -+ -+#undef pr_fmt -+#define pr_fmt(fmt) "dxgk: " fmt -+ -+const struct d3dkmthandle zerohandle; -+ -+/* -+ * Handle parameters -+ */ -+#define HMGRHANDLE_INSTANCE_BITS 6 -+#define HMGRHANDLE_INDEX_BITS 24 -+#define HMGRHANDLE_UNIQUE_BITS 2 -+ -+#define HMGRHANDLE_INSTANCE_SHIFT 0 -+#define HMGRHANDLE_INDEX_SHIFT \ -+ (HMGRHANDLE_INSTANCE_BITS + HMGRHANDLE_INSTANCE_SHIFT) -+#define HMGRHANDLE_UNIQUE_SHIFT \ -+ (HMGRHANDLE_INDEX_BITS + HMGRHANDLE_INDEX_SHIFT) -+ -+#define HMGRHANDLE_INSTANCE_MASK \ -+ (((1 << HMGRHANDLE_INSTANCE_BITS) - 1) << HMGRHANDLE_INSTANCE_SHIFT) -+#define HMGRHANDLE_INDEX_MASK \ -+ (((1 << HMGRHANDLE_INDEX_BITS) - 1) << HMGRHANDLE_INDEX_SHIFT) -+#define HMGRHANDLE_UNIQUE_MASK \ -+ (((1 << HMGRHANDLE_UNIQUE_BITS) - 1) << HMGRHANDLE_UNIQUE_SHIFT) -+ -+#define HMGRHANDLE_INSTANCE_MAX ((1 << HMGRHANDLE_INSTANCE_BITS) - 1) -+#define HMGRHANDLE_INDEX_MAX ((1 << HMGRHANDLE_INDEX_BITS) - 1) -+#define HMGRHANDLE_UNIQUE_MAX ((1 << HMGRHANDLE_UNIQUE_BITS) - 1) -+ -+/* -+ * Handle entry -+ */ -+struct hmgrentry { -+ union { -+ void *object; -+ struct { -+ u32 prev_free_index; -+ u32 next_free_index; -+ }; -+ }; -+ u32 type:HMGRENTRY_TYPE_BITS + 1; -+ u32 unique:HMGRHANDLE_UNIQUE_BITS; -+ u32 instance:HMGRHANDLE_INSTANCE_BITS; -+ u32 destroyed:1; -+}; -+ -+#define HMGRTABLE_SIZE_INCREMENT 1024 -+#define HMGRTABLE_MIN_FREE_ENTRIES 128 -+#define HMGRTABLE_INVALID_INDEX (~((1 << HMGRHANDLE_INDEX_BITS) - 1)) -+#define HMGRTABLE_SIZE_MAX 0xFFFFFFF -+ -+static u32 table_size_increment = HMGRTABLE_SIZE_INCREMENT; -+ -+static u32 get_unique(struct d3dkmthandle h) -+{ -+ return (h.v & HMGRHANDLE_UNIQUE_MASK) >> HMGRHANDLE_UNIQUE_SHIFT; -+} -+ -+static u32 get_index(struct d3dkmthandle h) -+{ -+ return (h.v & HMGRHANDLE_INDEX_MASK) >> HMGRHANDLE_INDEX_SHIFT; -+} -+ -+static bool is_handle_valid(struct hmgrtable *table, struct d3dkmthandle h, -+ bool ignore_destroyed, enum hmgrentry_type t) -+{ -+ u32 index = get_index(h); -+ u32 unique = get_unique(h); -+ struct hmgrentry *entry; -+ -+ if (index >= table->table_size) { -+ DXG_ERR("Invalid index %x %d", h.v, index); -+ return false; -+ } -+ -+ entry = &table->entry_table[index]; -+ if (unique != entry->unique) { -+ DXG_ERR("Invalid unique %x %d %d %d %p", -+ h.v, unique, entry->unique, index, entry->object); -+ return false; -+ } -+ -+ if (entry->destroyed && !ignore_destroyed) { -+ DXG_ERR("Invalid destroyed value"); -+ return false; -+ } -+ -+ if (entry->type == HMGRENTRY_TYPE_FREE) { -+ DXG_ERR("Entry is freed %x %d", h.v, index); -+ return false; -+ } -+ -+ if (t != HMGRENTRY_TYPE_FREE && t != entry->type) { -+ DXG_ERR("type mismatch %x %d %d", h.v, t, entry->type); -+ return false; -+ } -+ -+ return true; -+} -+ -+static struct d3dkmthandle build_handle(u32 index, u32 unique, u32 instance) -+{ -+ struct d3dkmthandle handle; -+ -+ handle.v = (index << HMGRHANDLE_INDEX_SHIFT) & HMGRHANDLE_INDEX_MASK; -+ handle.v |= (unique << HMGRHANDLE_UNIQUE_SHIFT) & -+ HMGRHANDLE_UNIQUE_MASK; -+ handle.v |= (instance << HMGRHANDLE_INSTANCE_SHIFT) & -+ HMGRHANDLE_INSTANCE_MASK; -+ -+ return handle; -+} -+ -+inline u32 hmgrtable_get_used_entry_count(struct hmgrtable *table) -+{ -+ DXGKRNL_ASSERT(table->table_size >= table->free_count); -+ return (table->table_size - table->free_count); -+} -+ -+bool hmgrtable_mark_destroyed(struct hmgrtable *table, struct d3dkmthandle h) -+{ -+ if (!is_handle_valid(table, h, false, HMGRENTRY_TYPE_FREE)) -+ return false; -+ -+ table->entry_table[get_index(h)].destroyed = true; -+ return true; -+} -+ -+bool hmgrtable_unmark_destroyed(struct hmgrtable *table, struct d3dkmthandle h) -+{ -+ if (!is_handle_valid(table, h, true, HMGRENTRY_TYPE_FREE)) -+ return true; -+ -+ DXGKRNL_ASSERT(table->entry_table[get_index(h)].destroyed); -+ table->entry_table[get_index(h)].destroyed = 0; -+ return true; -+} -+ -+static bool expand_table(struct hmgrtable *table, u32 NumEntries) -+{ -+ u32 new_table_size; -+ struct hmgrentry *new_entry; -+ u32 table_index; -+ u32 new_free_count; -+ u32 prev_free_index; -+ u32 tail_index = table->free_handle_list_tail; -+ -+ /* The tail should point to the last free element in the list */ -+ if (table->free_count != 0) { -+ if (tail_index >= table->table_size || -+ table->entry_table[tail_index].next_free_index != -+ HMGRTABLE_INVALID_INDEX) { -+ DXG_ERR("corruption"); -+ DXG_ERR("tail_index: %x", tail_index); -+ DXG_ERR("table size: %x", table->table_size); -+ DXG_ERR("free_count: %d", table->free_count); -+ DXG_ERR("NumEntries: %x", NumEntries); -+ return false; -+ } -+ } -+ -+ new_free_count = table_size_increment + table->free_count; -+ new_table_size = table->table_size + table_size_increment; -+ if (new_table_size < NumEntries) { -+ new_free_count += NumEntries - new_table_size; -+ new_table_size = NumEntries; -+ } -+ -+ if (new_table_size > HMGRHANDLE_INDEX_MAX) { -+ DXG_ERR("Invalid new table size"); -+ return false; -+ } -+ -+ new_entry = (struct hmgrentry *) -+ vzalloc(new_table_size * sizeof(struct hmgrentry)); -+ if (new_entry == NULL) { -+ DXG_ERR("allocation failed"); -+ return false; -+ } -+ -+ if (table->entry_table) { -+ memcpy(new_entry, table->entry_table, -+ table->table_size * sizeof(struct hmgrentry)); -+ vfree(table->entry_table); -+ } else { -+ table->free_handle_list_head = 0; -+ } -+ -+ table->entry_table = new_entry; -+ -+ /* Initialize new table entries and add to the free list */ -+ table_index = table->table_size; -+ -+ prev_free_index = table->free_handle_list_tail; -+ -+ while (table_index < new_table_size) { -+ struct hmgrentry *entry = &table->entry_table[table_index]; -+ -+ entry->prev_free_index = prev_free_index; -+ entry->next_free_index = table_index + 1; -+ entry->type = HMGRENTRY_TYPE_FREE; -+ entry->unique = 1; -+ entry->instance = 0; -+ prev_free_index = table_index; -+ -+ table_index++; -+ } -+ -+ table->entry_table[table_index - 1].next_free_index = -+ (u32) HMGRTABLE_INVALID_INDEX; -+ -+ if (table->free_count != 0) { -+ /* Link the current free list with the new entries */ -+ struct hmgrentry *entry; -+ -+ entry = &table->entry_table[table->free_handle_list_tail]; -+ entry->next_free_index = table->table_size; -+ } -+ table->free_handle_list_tail = new_table_size - 1; -+ if (table->free_handle_list_head == HMGRTABLE_INVALID_INDEX) -+ table->free_handle_list_head = table->table_size; -+ -+ table->table_size = new_table_size; -+ table->free_count = new_free_count; -+ -+ return true; -+} -+ -+void hmgrtable_init(struct hmgrtable *table, struct dxgprocess *process) -+{ -+ table->process = process; -+ table->entry_table = NULL; -+ table->table_size = 0; -+ table->free_handle_list_head = HMGRTABLE_INVALID_INDEX; -+ table->free_handle_list_tail = HMGRTABLE_INVALID_INDEX; -+ table->free_count = 0; -+ init_rwsem(&table->table_lock); -+} -+ -+void hmgrtable_destroy(struct hmgrtable *table) -+{ -+ if (table->entry_table) { -+ vfree(table->entry_table); -+ table->entry_table = NULL; -+ } -+} -+ -+void hmgrtable_lock(struct hmgrtable *table, enum dxglockstate state) -+{ -+ if (state == DXGLOCK_EXCL) -+ down_write(&table->table_lock); -+ else -+ down_read(&table->table_lock); -+} -+ -+void hmgrtable_unlock(struct hmgrtable *table, enum dxglockstate state) -+{ -+ if (state == DXGLOCK_EXCL) -+ up_write(&table->table_lock); -+ else -+ up_read(&table->table_lock); -+} -+ -+struct d3dkmthandle hmgrtable_alloc_handle(struct hmgrtable *table, -+ void *object, -+ enum hmgrentry_type type, -+ bool make_valid) -+{ -+ u32 index; -+ struct hmgrentry *entry; -+ u32 unique; -+ -+ DXGKRNL_ASSERT(type <= HMGRENTRY_TYPE_LIMIT); -+ DXGKRNL_ASSERT(type > HMGRENTRY_TYPE_FREE); -+ -+ if (table->free_count <= HMGRTABLE_MIN_FREE_ENTRIES) { -+ if (!expand_table(table, 0)) { -+ DXG_ERR("hmgrtable expand_table failed"); -+ return zerohandle; -+ } -+ } -+ -+ if (table->free_handle_list_head >= table->table_size) { -+ DXG_ERR("hmgrtable corrupted handle table head"); -+ return zerohandle; -+ } -+ -+ index = table->free_handle_list_head; -+ entry = &table->entry_table[index]; -+ -+ if (entry->type != HMGRENTRY_TYPE_FREE) { -+ DXG_ERR("hmgrtable expected free handle"); -+ return zerohandle; -+ } -+ -+ table->free_handle_list_head = entry->next_free_index; -+ -+ if (entry->next_free_index != table->free_handle_list_tail) { -+ if (entry->next_free_index >= table->table_size) { -+ DXG_ERR("hmgrtable invalid next free index"); -+ return zerohandle; -+ } -+ table->entry_table[entry->next_free_index].prev_free_index = -+ HMGRTABLE_INVALID_INDEX; -+ } -+ -+ unique = table->entry_table[index].unique; -+ -+ table->entry_table[index].object = object; -+ table->entry_table[index].type = type; -+ table->entry_table[index].instance = 0; -+ table->entry_table[index].destroyed = !make_valid; -+ table->free_count--; -+ DXGKRNL_ASSERT(table->free_count <= table->table_size); -+ -+ return build_handle(index, unique, table->entry_table[index].instance); -+} -+ -+int hmgrtable_assign_handle_safe(struct hmgrtable *table, -+ void *object, -+ enum hmgrentry_type type, -+ struct d3dkmthandle h) -+{ -+ int ret; -+ -+ hmgrtable_lock(table, DXGLOCK_EXCL); -+ ret = hmgrtable_assign_handle(table, object, type, h); -+ hmgrtable_unlock(table, DXGLOCK_EXCL); -+ return ret; -+} -+ -+int hmgrtable_assign_handle(struct hmgrtable *table, void *object, -+ enum hmgrentry_type type, struct d3dkmthandle h) -+{ -+ u32 index = get_index(h); -+ u32 unique = get_unique(h); -+ struct hmgrentry *entry = NULL; -+ -+ DXG_TRACE("%x, %d %p, %p", h.v, index, object, table); -+ -+ if (index >= HMGRHANDLE_INDEX_MAX) { -+ DXG_ERR("handle index is too big: %x %d", h.v, index); -+ return -EINVAL; -+ } -+ -+ if (index >= table->table_size) { -+ u32 new_size = index + table_size_increment; -+ -+ if (new_size > HMGRHANDLE_INDEX_MAX) -+ new_size = HMGRHANDLE_INDEX_MAX; -+ if (!expand_table(table, new_size)) { -+ DXG_ERR("failed to expand handle table %d", -+ new_size); -+ return -ENOMEM; -+ } -+ } -+ -+ entry = &table->entry_table[index]; -+ -+ if (entry->type != HMGRENTRY_TYPE_FREE) { -+ DXG_ERR("the entry is not free: %d %x", entry->type, -+ hmgrtable_build_entry_handle(table, index).v); -+ return -EINVAL; -+ } -+ -+ if (index != table->free_handle_list_tail) { -+ if (entry->next_free_index >= table->table_size) { -+ DXG_ERR("hmgr: invalid next free index %d", -+ entry->next_free_index); -+ return -EINVAL; -+ } -+ table->entry_table[entry->next_free_index].prev_free_index = -+ entry->prev_free_index; -+ } else { -+ table->free_handle_list_tail = entry->prev_free_index; -+ } -+ -+ if (index != table->free_handle_list_head) { -+ if (entry->prev_free_index >= table->table_size) { -+ DXG_ERR("hmgr: invalid next prev index %d", -+ entry->prev_free_index); -+ return -EINVAL; -+ } -+ table->entry_table[entry->prev_free_index].next_free_index = -+ entry->next_free_index; -+ } else { -+ table->free_handle_list_head = entry->next_free_index; -+ } -+ -+ entry->prev_free_index = HMGRTABLE_INVALID_INDEX; -+ entry->next_free_index = HMGRTABLE_INVALID_INDEX; -+ entry->object = object; -+ entry->type = type; -+ entry->instance = 0; -+ entry->unique = unique; -+ entry->destroyed = false; -+ -+ table->free_count--; -+ DXGKRNL_ASSERT(table->free_count <= table->table_size); -+ return 0; -+} -+ -+struct d3dkmthandle hmgrtable_alloc_handle_safe(struct hmgrtable *table, -+ void *obj, -+ enum hmgrentry_type type, -+ bool make_valid) -+{ -+ struct d3dkmthandle h; -+ -+ hmgrtable_lock(table, DXGLOCK_EXCL); -+ h = hmgrtable_alloc_handle(table, obj, type, make_valid); -+ hmgrtable_unlock(table, DXGLOCK_EXCL); -+ return h; -+} -+ -+void hmgrtable_free_handle(struct hmgrtable *table, enum hmgrentry_type t, -+ struct d3dkmthandle h) -+{ -+ struct hmgrentry *entry; -+ u32 i = get_index(h); -+ -+ DXG_TRACE("%p %x", table, h.v); -+ -+ /* Ignore the destroyed flag when checking the handle */ -+ if (is_handle_valid(table, h, true, t)) { -+ DXGKRNL_ASSERT(table->free_count < table->table_size); -+ entry = &table->entry_table[i]; -+ entry->unique = 1; -+ entry->type = HMGRENTRY_TYPE_FREE; -+ entry->destroyed = 0; -+ if (entry->unique != HMGRHANDLE_UNIQUE_MAX) -+ entry->unique += 1; -+ else -+ entry->unique = 1; -+ -+ table->free_count++; -+ DXGKRNL_ASSERT(table->free_count <= table->table_size); -+ -+ /* -+ * Insert the index to the free list at the tail. -+ */ -+ entry->next_free_index = HMGRTABLE_INVALID_INDEX; -+ entry->prev_free_index = table->free_handle_list_tail; -+ entry = &table->entry_table[table->free_handle_list_tail]; -+ entry->next_free_index = i; -+ table->free_handle_list_tail = i; -+ } else { -+ DXG_ERR("Invalid handle to free: %d %x", i, h.v); -+ } -+} -+ -+void hmgrtable_free_handle_safe(struct hmgrtable *table, enum hmgrentry_type t, -+ struct d3dkmthandle h) -+{ -+ hmgrtable_lock(table, DXGLOCK_EXCL); -+ hmgrtable_free_handle(table, t, h); -+ hmgrtable_unlock(table, DXGLOCK_EXCL); -+} -+ -+struct d3dkmthandle hmgrtable_build_entry_handle(struct hmgrtable *table, -+ u32 index) -+{ -+ DXGKRNL_ASSERT(index < table->table_size); -+ -+ return build_handle(index, table->entry_table[index].unique, -+ table->entry_table[index].instance); -+} -+ -+void *hmgrtable_get_object(struct hmgrtable *table, struct d3dkmthandle h) -+{ -+ if (!is_handle_valid(table, h, false, HMGRENTRY_TYPE_FREE)) -+ return NULL; -+ -+ return table->entry_table[get_index(h)].object; -+} -+ -+void *hmgrtable_get_object_by_type(struct hmgrtable *table, -+ enum hmgrentry_type type, -+ struct d3dkmthandle h) -+{ -+ if (!is_handle_valid(table, h, false, type)) { -+ DXG_ERR("Invalid handle %x", h.v); -+ return NULL; -+ } -+ return table->entry_table[get_index(h)].object; -+} -+ -+void *hmgrtable_get_entry_object(struct hmgrtable *table, u32 index) -+{ -+ DXGKRNL_ASSERT(index < table->table_size); -+ DXGKRNL_ASSERT(table->entry_table[index].type != HMGRENTRY_TYPE_FREE); -+ -+ return table->entry_table[index].object; -+} -+ -+static enum hmgrentry_type hmgrtable_get_entry_type(struct hmgrtable *table, -+ u32 index) -+{ -+ DXGKRNL_ASSERT(index < table->table_size); -+ return (enum hmgrentry_type)table->entry_table[index].type; -+} -+ -+enum hmgrentry_type hmgrtable_get_object_type(struct hmgrtable *table, -+ struct d3dkmthandle h) -+{ -+ if (!is_handle_valid(table, h, false, HMGRENTRY_TYPE_FREE)) -+ return HMGRENTRY_TYPE_FREE; -+ -+ return hmgrtable_get_entry_type(table, get_index(h)); -+} -+ -+void *hmgrtable_get_object_ignore_destroyed(struct hmgrtable *table, -+ struct d3dkmthandle h, -+ enum hmgrentry_type type) -+{ -+ if (!is_handle_valid(table, h, true, type)) -+ return NULL; -+ return table->entry_table[get_index(h)].object; -+} -+ -+bool hmgrtable_next_entry(struct hmgrtable *tbl, -+ u32 *index, -+ enum hmgrentry_type *type, -+ struct d3dkmthandle *handle, -+ void **object) -+{ -+ u32 i; -+ struct hmgrentry *entry; -+ -+ for (i = *index; i < tbl->table_size; i++) { -+ entry = &tbl->entry_table[i]; -+ if (entry->type != HMGRENTRY_TYPE_FREE) { -+ *index = i + 1; -+ *object = entry->object; -+ *handle = build_handle(i, entry->unique, -+ entry->instance); -+ *type = entry->type; -+ return true; -+ } -+ } -+ return false; -+} -diff --git a/drivers/hv/dxgkrnl/hmgr.h b/drivers/hv/dxgkrnl/hmgr.h -new file mode 100644 -index 000000000000..111111111111 ---- /dev/null -+++ b/drivers/hv/dxgkrnl/hmgr.h -@@ -0,0 +1,112 @@ -+/* SPDX-License-Identifier: GPL-2.0 */ -+ -+/* -+ * Copyright (c) 2022, Microsoft Corporation. -+ * -+ * Author: -+ * Iouri Tarassov -+ * -+ * Dxgkrnl Graphics Driver -+ * Handle manager definitions -+ * -+ */ -+ -+#ifndef _HMGR_H_ -+#define _HMGR_H_ -+ -+#include "misc.h" -+ -+struct hmgrentry; -+ -+/* -+ * Handle manager table. -+ * -+ * Implementation notes: -+ * A list of free handles is built on top of the array of table entries. -+ * free_handle_list_head is the index of the first entry in the list. -+ * m_FreeHandleListTail is the index of an entry in the list, which is -+ * HMGRTABLE_MIN_FREE_ENTRIES from the head. It means that when a handle is -+ * freed, the next time the handle can be re-used is after allocating -+ * HMGRTABLE_MIN_FREE_ENTRIES number of handles. -+ * Handles are allocated from the start of the list and free handles are -+ * inserted after the tail of the list. -+ * -+ */ -+struct hmgrtable { -+ struct dxgprocess *process; -+ struct hmgrentry *entry_table; -+ u32 free_handle_list_head; -+ u32 free_handle_list_tail; -+ u32 table_size; -+ u32 free_count; -+ struct rw_semaphore table_lock; -+}; -+ -+/* -+ * Handle entry data types. -+ */ -+#define HMGRENTRY_TYPE_BITS 5 -+ -+enum hmgrentry_type { -+ HMGRENTRY_TYPE_FREE = 0, -+ HMGRENTRY_TYPE_DXGADAPTER = 1, -+ HMGRENTRY_TYPE_DXGSHAREDRESOURCE = 2, -+ HMGRENTRY_TYPE_DXGDEVICE = 3, -+ HMGRENTRY_TYPE_DXGRESOURCE = 4, -+ HMGRENTRY_TYPE_DXGALLOCATION = 5, -+ HMGRENTRY_TYPE_DXGOVERLAY = 6, -+ HMGRENTRY_TYPE_DXGCONTEXT = 7, -+ HMGRENTRY_TYPE_DXGSYNCOBJECT = 8, -+ HMGRENTRY_TYPE_DXGKEYEDMUTEX = 9, -+ HMGRENTRY_TYPE_DXGPAGINGQUEUE = 10, -+ HMGRENTRY_TYPE_DXGDEVICESYNCOBJECT = 11, -+ HMGRENTRY_TYPE_DXGPROCESS = 12, -+ HMGRENTRY_TYPE_DXGSHAREDVMOBJECT = 13, -+ HMGRENTRY_TYPE_DXGPROTECTEDSESSION = 14, -+ HMGRENTRY_TYPE_DXGHWQUEUE = 15, -+ HMGRENTRY_TYPE_DXGREMOTEBUNDLEOBJECT = 16, -+ HMGRENTRY_TYPE_DXGCOMPOSITIONSURFACEOBJECT = 17, -+ HMGRENTRY_TYPE_DXGCOMPOSITIONSURFACEPROXY = 18, -+ HMGRENTRY_TYPE_DXGTRACKEDWORKLOAD = 19, -+ HMGRENTRY_TYPE_LIMIT = ((1 << HMGRENTRY_TYPE_BITS) - 1), -+ HMGRENTRY_TYPE_MONITOREDFENCE = HMGRENTRY_TYPE_LIMIT + 1, -+}; -+ -+void hmgrtable_init(struct hmgrtable *tbl, struct dxgprocess *process); -+void hmgrtable_destroy(struct hmgrtable *tbl); -+void hmgrtable_lock(struct hmgrtable *tbl, enum dxglockstate state); -+void hmgrtable_unlock(struct hmgrtable *tbl, enum dxglockstate state); -+struct d3dkmthandle hmgrtable_alloc_handle(struct hmgrtable *tbl, void *object, -+ enum hmgrentry_type t, bool make_valid); -+struct d3dkmthandle hmgrtable_alloc_handle_safe(struct hmgrtable *tbl, -+ void *obj, -+ enum hmgrentry_type t, -+ bool reserve); -+int hmgrtable_assign_handle(struct hmgrtable *tbl, void *obj, -+ enum hmgrentry_type, struct d3dkmthandle h); -+int hmgrtable_assign_handle_safe(struct hmgrtable *tbl, void *obj, -+ enum hmgrentry_type t, struct d3dkmthandle h); -+void hmgrtable_free_handle(struct hmgrtable *tbl, enum hmgrentry_type t, -+ struct d3dkmthandle h); -+void hmgrtable_free_handle_safe(struct hmgrtable *tbl, enum hmgrentry_type t, -+ struct d3dkmthandle h); -+struct d3dkmthandle hmgrtable_build_entry_handle(struct hmgrtable *tbl, -+ u32 index); -+enum hmgrentry_type hmgrtable_get_object_type(struct hmgrtable *tbl, -+ struct d3dkmthandle h); -+void *hmgrtable_get_object(struct hmgrtable *tbl, struct d3dkmthandle h); -+void *hmgrtable_get_object_by_type(struct hmgrtable *tbl, enum hmgrentry_type t, -+ struct d3dkmthandle h); -+void *hmgrtable_get_object_ignore_destroyed(struct hmgrtable *tbl, -+ struct d3dkmthandle h, -+ enum hmgrentry_type t); -+bool hmgrtable_mark_destroyed(struct hmgrtable *tbl, struct d3dkmthandle h); -+bool hmgrtable_unmark_destroyed(struct hmgrtable *tbl, struct d3dkmthandle h); -+void *hmgrtable_get_entry_object(struct hmgrtable *tbl, u32 index); -+bool hmgrtable_next_entry(struct hmgrtable *tbl, -+ u32 *start_index, -+ enum hmgrentry_type *type, -+ struct d3dkmthandle *handle, -+ void **object); -+ -+#endif -diff --git a/drivers/hv/dxgkrnl/ioctl.c b/drivers/hv/dxgkrnl/ioctl.c -index 111111111111..222222222222 100644 ---- a/drivers/hv/dxgkrnl/ioctl.c -+++ b/drivers/hv/dxgkrnl/ioctl.c -@@ -22,3 +22,63 @@ - - #undef pr_fmt - #define pr_fmt(fmt) "dxgk: " fmt -+ -+struct ioctl_desc { -+ int (*ioctl_callback)(struct dxgprocess *p, void __user *arg); -+ u32 ioctl; -+ u32 arg_size; -+}; -+ -+static struct ioctl_desc ioctls[] = { -+ -+}; -+ -+/* -+ * IOCTL processing -+ * The driver IOCTLs return -+ * - 0 in case of success -+ * - positive values, which are Windows NTSTATUS (for example, STATUS_PENDING). -+ * Positive values are success codes. -+ * - Linux negative error codes -+ */ -+static int dxgk_ioctl(struct file *f, unsigned int p1, unsigned long p2) -+{ -+ int code = _IOC_NR(p1); -+ int status; -+ struct dxgprocess *process; -+ -+ if (code < 1 || code >= ARRAY_SIZE(ioctls)) { -+ DXG_ERR("bad ioctl %x %x %x %x", -+ code, _IOC_TYPE(p1), _IOC_SIZE(p1), _IOC_DIR(p1)); -+ return -ENOTTY; -+ } -+ if (ioctls[code].ioctl_callback == NULL) { -+ DXG_ERR("ioctl callback is NULL %x", code); -+ return -ENOTTY; -+ } -+ if (ioctls[code].ioctl != p1) { -+ DXG_ERR("ioctl mismatch. Code: %x User: %x Kernel: %x", -+ code, p1, ioctls[code].ioctl); -+ return -ENOTTY; -+ } -+ process = (struct dxgprocess *)f->private_data; -+ if (process->tgid != current->tgid) { -+ DXG_ERR("Call from a wrong process: %d %d", -+ process->tgid, current->tgid); -+ return -ENOTTY; -+ } -+ status = ioctls[code].ioctl_callback(process, (void *__user)p2); -+ return status; -+} -+ -+long dxgk_compat_ioctl(struct file *f, unsigned int p1, unsigned long p2) -+{ -+ DXG_TRACE("compat ioctl %x", p1); -+ return dxgk_ioctl(f, p1, p2); -+} -+ -+long dxgk_unlocked_ioctl(struct file *f, unsigned int p1, unsigned long p2) -+{ -+ DXG_TRACE("unlocked ioctl %x Code:%d", p1, _IOC_NR(p1)); -+ return dxgk_ioctl(f, p1, p2); -+} -diff --git a/drivers/hv/dxgkrnl/misc.h b/drivers/hv/dxgkrnl/misc.h -index 111111111111..222222222222 100644 ---- a/drivers/hv/dxgkrnl/misc.h -+++ b/drivers/hv/dxgkrnl/misc.h -@@ -27,10 +27,11 @@ extern const struct d3dkmthandle zerohandle; - * - * channel_lock (VMBus channel lock) - * fd_mutex -- * plistmutex (process list mutex) -- * table_lock (handle table lock) -- * core_lock (dxgadapter lock) -- * device_lock (dxgdevice lock) -+ * plistmutex -+ * table_lock -+ * core_lock -+ * device_lock -+ * process_adapter_mutex - * adapter_list_lock - * device_mutex (dxgglobal mutex) - */ -diff --git a/include/uapi/misc/d3dkmthk.h b/include/uapi/misc/d3dkmthk.h -index 111111111111..222222222222 100644 ---- a/include/uapi/misc/d3dkmthk.h -+++ b/include/uapi/misc/d3dkmthk.h -@@ -58,4 +58,107 @@ struct winluid { - __u32 b; - }; - -+#define D3DKMT_ADAPTERS_MAX 64 -+ -+struct d3dkmt_adapterinfo { -+ struct d3dkmthandle adapter_handle; -+ struct winluid adapter_luid; -+ __u32 num_sources; -+ __u32 present_move_regions_preferred; -+}; -+ -+struct d3dkmt_enumadapters2 { -+ __u32 num_adapters; -+ __u32 reserved; -+#ifdef __KERNEL__ -+ struct d3dkmt_adapterinfo *adapters; -+#else -+ __u64 *adapters; -+#endif -+}; -+ -+struct d3dkmt_closeadapter { -+ struct d3dkmthandle adapter_handle; -+}; -+ -+struct d3dkmt_openadapterfromluid { -+ struct winluid adapter_luid; -+ struct d3dkmthandle adapter_handle; -+}; -+ -+struct d3dkmt_adaptertype { -+ union { -+ struct { -+ __u32 render_supported:1; -+ __u32 display_supported:1; -+ __u32 software_device:1; -+ __u32 post_device:1; -+ __u32 hybrid_discrete:1; -+ __u32 hybrid_integrated:1; -+ __u32 indirect_display_device:1; -+ __u32 paravirtualized:1; -+ __u32 acg_supported:1; -+ __u32 support_set_timings_from_vidpn:1; -+ __u32 detachable:1; -+ __u32 compute_only:1; -+ __u32 prototype:1; -+ __u32 reserved:19; -+ }; -+ __u32 value; -+ }; -+}; -+ -+enum kmtqueryadapterinfotype { -+ _KMTQAITYPE_UMDRIVERPRIVATE = 0, -+ _KMTQAITYPE_ADAPTERTYPE = 15, -+ _KMTQAITYPE_ADAPTERTYPE_RENDER = 57 -+}; -+ -+struct d3dkmt_queryadapterinfo { -+ struct d3dkmthandle adapter; -+ enum kmtqueryadapterinfotype type; -+#ifdef __KERNEL__ -+ void *private_data; -+#else -+ __u64 private_data; -+#endif -+ __u32 private_data_size; -+}; -+ -+union d3dkmt_enumadapters_filter { -+ struct { -+ __u64 include_compute_only:1; -+ __u64 include_display_only:1; -+ __u64 reserved:62; -+ }; -+ __u64 value; -+}; -+ -+struct d3dkmt_enumadapters3 { -+ union d3dkmt_enumadapters_filter filter; -+ __u32 adapter_count; -+ __u32 reserved; -+#ifdef __KERNEL__ -+ struct d3dkmt_adapterinfo *adapters; -+#else -+ __u64 adapters; -+#endif -+}; -+ -+/* -+ * Dxgkrnl Graphics Port Driver ioctl definitions -+ * -+ */ -+ -+#define LX_DXOPENADAPTERFROMLUID \ -+ _IOWR(0x47, 0x01, struct d3dkmt_openadapterfromluid) -+#define LX_DXQUERYADAPTERINFO \ -+ _IOWR(0x47, 0x09, struct d3dkmt_queryadapterinfo) -+#define LX_DXENUMADAPTERS2 \ -+ _IOWR(0x47, 0x14, struct d3dkmt_enumadapters2) -+#define LX_DXCLOSEADAPTER \ -+ _IOWR(0x47, 0x15, struct d3dkmt_closeadapter) -+#define LX_DXENUMADAPTERS3 \ -+ _IOWR(0x47, 0x3e, struct d3dkmt_enumadapters3) -+ - #endif /* _D3DKMTHK_H */ --- -Armbian - diff --git a/patch/kernel/archive/wsl2-arm64-6.6/1672-drivers-hv-dxgkrnl-Enumerate-and-open-dxgadapter-objects.patch b/patch/kernel/archive/wsl2-arm64-6.6/1672-drivers-hv-dxgkrnl-Enumerate-and-open-dxgadapter-objects.patch deleted file mode 100644 index 78e761b42b20..000000000000 --- a/patch/kernel/archive/wsl2-arm64-6.6/1672-drivers-hv-dxgkrnl-Enumerate-and-open-dxgadapter-objects.patch +++ /dev/null @@ -1,554 +0,0 @@ -From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001 -From: Iouri Tarassov -Date: Mon, 21 Mar 2022 19:18:50 -0700 -Subject: drivers: hv: dxgkrnl: Enumerate and open dxgadapter objects - -Implement ioctls to enumerate dxgadapter objects: - - The LX_DXENUMADAPTERS2 ioctl - - The LX_DXENUMADAPTERS3 ioctl. - -Implement ioctls to open adapter by LUID and to close adapter -handle: - - The LX_DXOPENADAPTERFROMLUID ioctl - - the LX_DXCLOSEADAPTER ioctl - -Impllement the ioctl to query dxgadapter information: - - The LX_DXQUERYADAPTERINFO ioctl - -When a dxgadapter is enumerated, it is implicitely opened and -a handle (d3dkmthandle) is created in the current process handle -table. The handle is returned to the caller and can be used -by user mode to reference the VGPU adapter in other ioctls. - -The caller is responsible to close the adapter when it is not -longer used by sending the LX_DXCLOSEADAPTER ioctl. - -A dxgprocess has a list of opened dxgadapter objects -(dxgprocess_adapter is used to represent the entry in the list). -A dxgadapter also has a list of dxgprocess_adapter objects. -This is needed for cleanup because either a process or an adapter -could be destroyed first. - -Signed-off-by: Iouri Tarassov -[kms: Forward port to v6.1] -Signed-off-by: Kelsey Steele ---- - drivers/hv/dxgkrnl/dxgmodule.c | 3 + - drivers/hv/dxgkrnl/ioctl.c | 482 +++++++++- - 2 files changed, 484 insertions(+), 1 deletion(-) - -diff --git a/drivers/hv/dxgkrnl/dxgmodule.c b/drivers/hv/dxgkrnl/dxgmodule.c -index 111111111111..222222222222 100644 ---- a/drivers/hv/dxgkrnl/dxgmodule.c -+++ b/drivers/hv/dxgkrnl/dxgmodule.c -@@ -721,6 +721,9 @@ static struct dxgglobal *dxgglobal_create(void) - - init_rwsem(&dxgglobal->channel_lock); - -+#ifdef DEBUG -+ dxgk_validate_ioctls(); -+#endif - return dxgglobal; - } - -diff --git a/drivers/hv/dxgkrnl/ioctl.c b/drivers/hv/dxgkrnl/ioctl.c -index 111111111111..222222222222 100644 ---- a/drivers/hv/dxgkrnl/ioctl.c -+++ b/drivers/hv/dxgkrnl/ioctl.c -@@ -29,8 +29,472 @@ struct ioctl_desc { - u32 arg_size; - }; - --static struct ioctl_desc ioctls[] = { -+#ifdef DEBUG -+static char *errorstr(int ret) -+{ -+ return ret < 0 ? "err" : ""; -+} -+#endif -+ -+static int dxgkio_open_adapter_from_luid(struct dxgprocess *process, -+ void *__user inargs) -+{ -+ struct d3dkmt_openadapterfromluid args; -+ int ret; -+ struct dxgadapter *entry; -+ struct dxgadapter *adapter = NULL; -+ struct d3dkmt_openadapterfromluid *__user result = inargs; -+ struct dxgglobal *dxgglobal = dxggbl(); -+ -+ ret = copy_from_user(&args, inargs, sizeof(args)); -+ if (ret) { -+ DXG_ERR("Faled to copy input args"); -+ ret = -EINVAL; -+ goto cleanup; -+ } -+ -+ dxgglobal_acquire_adapter_list_lock(DXGLOCK_SHARED); -+ dxgglobal_acquire_process_adapter_lock(); -+ -+ list_for_each_entry(entry, &dxgglobal->adapter_list_head, -+ adapter_list_entry) { -+ if (dxgadapter_acquire_lock_shared(entry) == 0) { -+ if (*(u64 *) &entry->luid == -+ *(u64 *) &args.adapter_luid) { -+ ret = dxgprocess_open_adapter(process, entry, -+ &args.adapter_handle); -+ -+ if (ret >= 0) { -+ ret = copy_to_user( -+ &result->adapter_handle, -+ &args.adapter_handle, -+ sizeof(struct d3dkmthandle)); -+ if (ret) -+ ret = -EINVAL; -+ } -+ adapter = entry; -+ } -+ dxgadapter_release_lock_shared(entry); -+ if (adapter) -+ break; -+ } -+ } -+ -+ dxgglobal_release_process_adapter_lock(); -+ dxgglobal_release_adapter_list_lock(DXGLOCK_SHARED); -+ -+ if (args.adapter_handle.v == 0) -+ ret = -EINVAL; -+ -+cleanup: -+ -+ if (ret < 0) -+ dxgprocess_close_adapter(process, args.adapter_handle); -+ -+ DXG_TRACE("ioctl:%s %d", errorstr(ret), ret); -+ return ret; -+} -+ -+static int -+dxgkp_enum_adapters(struct dxgprocess *process, -+ union d3dkmt_enumadapters_filter filter, -+ u32 adapter_count_max, -+ struct d3dkmt_adapterinfo *__user info_out, -+ u32 * __user adapter_count_out) -+{ -+ int ret = 0; -+ struct dxgadapter *entry; -+ struct d3dkmt_adapterinfo *info = NULL; -+ struct dxgadapter **adapters = NULL; -+ int adapter_count = 0; -+ int i; -+ struct dxgglobal *dxgglobal = dxggbl(); -+ -+ if (info_out == NULL || adapter_count_max == 0) { -+ ret = copy_to_user(adapter_count_out, -+ &dxgglobal->num_adapters, sizeof(u32)); -+ if (ret) { -+ DXG_ERR("copy_to_user faled"); -+ ret = -EINVAL; -+ } -+ goto cleanup; -+ } -+ -+ if (adapter_count_max > 0xFFFF) { -+ DXG_ERR("too many adapters"); -+ ret = -EINVAL; -+ goto cleanup; -+ } -+ -+ info = vzalloc(sizeof(struct d3dkmt_adapterinfo) * adapter_count_max); -+ if (info == NULL) { -+ ret = -ENOMEM; -+ goto cleanup; -+ } -+ -+ adapters = vzalloc(sizeof(struct dxgadapter *) * adapter_count_max); -+ if (adapters == NULL) { -+ ret = -ENOMEM; -+ goto cleanup; -+ } -+ -+ dxgglobal_acquire_adapter_list_lock(DXGLOCK_SHARED); -+ dxgglobal_acquire_process_adapter_lock(); - -+ list_for_each_entry(entry, &dxgglobal->adapter_list_head, -+ adapter_list_entry) { -+ if (dxgadapter_acquire_lock_shared(entry) == 0) { -+ struct d3dkmt_adapterinfo *inf = &info[adapter_count]; -+ -+ ret = dxgprocess_open_adapter(process, entry, -+ &inf->adapter_handle); -+ if (ret >= 0) { -+ inf->adapter_luid = entry->luid; -+ adapters[adapter_count] = entry; -+ DXG_TRACE("adapter: %x %x:%x", -+ inf->adapter_handle.v, -+ inf->adapter_luid.b, -+ inf->adapter_luid.a); -+ adapter_count++; -+ } -+ dxgadapter_release_lock_shared(entry); -+ } -+ if (ret < 0) -+ break; -+ } -+ -+ dxgglobal_release_process_adapter_lock(); -+ dxgglobal_release_adapter_list_lock(DXGLOCK_SHARED); -+ -+ if (adapter_count > adapter_count_max) { -+ ret = STATUS_BUFFER_TOO_SMALL; -+ DXG_TRACE("Too many adapters"); -+ ret = copy_to_user(adapter_count_out, -+ &dxgglobal->num_adapters, sizeof(u32)); -+ if (ret) { -+ DXG_ERR("copy_to_user failed"); -+ ret = -EINVAL; -+ } -+ goto cleanup; -+ } -+ -+ ret = copy_to_user(adapter_count_out, &adapter_count, -+ sizeof(adapter_count)); -+ if (ret) { -+ DXG_ERR("failed to copy adapter_count"); -+ ret = -EINVAL; -+ goto cleanup; -+ } -+ ret = copy_to_user(info_out, info, sizeof(info[0]) * adapter_count); -+ if (ret) { -+ DXG_ERR("failed to copy adapter info"); -+ ret = -EINVAL; -+ } -+ -+cleanup: -+ -+ if (ret >= 0) { -+ DXG_TRACE("found %d adapters", adapter_count); -+ goto success; -+ } -+ if (info) { -+ for (i = 0; i < adapter_count; i++) -+ dxgprocess_close_adapter(process, -+ info[i].adapter_handle); -+ } -+success: -+ if (info) -+ vfree(info); -+ if (adapters) -+ vfree(adapters); -+ -+ DXG_TRACE("ioctl:%s %d", errorstr(ret), ret); -+ return ret; -+} -+ -+static int -+dxgkio_enum_adapters(struct dxgprocess *process, void *__user inargs) -+{ -+ struct d3dkmt_enumadapters2 args; -+ int ret; -+ struct dxgadapter *entry; -+ struct d3dkmt_adapterinfo *info = NULL; -+ struct dxgadapter **adapters = NULL; -+ int adapter_count = 0; -+ int i; -+ struct dxgglobal *dxgglobal = dxggbl(); -+ -+ ret = copy_from_user(&args, inargs, sizeof(args)); -+ if (ret) { -+ DXG_ERR("failed to copy input args"); -+ ret = -EINVAL; -+ goto cleanup; -+ } -+ -+ if (args.adapters == NULL) { -+ DXG_TRACE("buffer is NULL"); -+ args.num_adapters = dxgglobal->num_adapters; -+ ret = copy_to_user(inargs, &args, sizeof(args)); -+ if (ret) { -+ DXG_ERR("failed to copy args to user"); -+ ret = -EINVAL; -+ } -+ goto cleanup; -+ } -+ if (args.num_adapters < dxgglobal->num_adapters) { -+ args.num_adapters = dxgglobal->num_adapters; -+ DXG_TRACE("buffer is too small"); -+ ret = -EOVERFLOW; -+ goto cleanup; -+ } -+ -+ if (args.num_adapters > D3DKMT_ADAPTERS_MAX) { -+ DXG_TRACE("too many adapters"); -+ ret = -EINVAL; -+ goto cleanup; -+ } -+ -+ info = vzalloc(sizeof(struct d3dkmt_adapterinfo) * args.num_adapters); -+ if (info == NULL) { -+ ret = -ENOMEM; -+ goto cleanup; -+ } -+ -+ adapters = vzalloc(sizeof(struct dxgadapter *) * args.num_adapters); -+ if (adapters == NULL) { -+ ret = -ENOMEM; -+ goto cleanup; -+ } -+ -+ dxgglobal_acquire_adapter_list_lock(DXGLOCK_SHARED); -+ dxgglobal_acquire_process_adapter_lock(); -+ -+ list_for_each_entry(entry, &dxgglobal->adapter_list_head, -+ adapter_list_entry) { -+ if (dxgadapter_acquire_lock_shared(entry) == 0) { -+ struct d3dkmt_adapterinfo *inf = &info[adapter_count]; -+ -+ ret = dxgprocess_open_adapter(process, entry, -+ &inf->adapter_handle); -+ if (ret >= 0) { -+ inf->adapter_luid = entry->luid; -+ adapters[adapter_count] = entry; -+ DXG_TRACE("adapter: %x %llx", -+ inf->adapter_handle.v, -+ *(u64 *) &inf->adapter_luid); -+ adapter_count++; -+ } -+ dxgadapter_release_lock_shared(entry); -+ } -+ if (ret < 0) -+ break; -+ } -+ -+ dxgglobal_release_process_adapter_lock(); -+ dxgglobal_release_adapter_list_lock(DXGLOCK_SHARED); -+ -+ args.num_adapters = adapter_count; -+ -+ ret = copy_to_user(inargs, &args, sizeof(args)); -+ if (ret) { -+ DXG_ERR("failed to copy args to user"); -+ ret = -EINVAL; -+ goto cleanup; -+ } -+ ret = copy_to_user(args.adapters, info, -+ sizeof(info[0]) * args.num_adapters); -+ if (ret) { -+ DXG_ERR("failed to copy adapter info to user"); -+ ret = -EINVAL; -+ } -+ -+cleanup: -+ -+ if (ret < 0) { -+ if (info) { -+ for (i = 0; i < args.num_adapters; i++) { -+ dxgprocess_close_adapter(process, -+ info[i].adapter_handle); -+ } -+ } -+ } else { -+ DXG_TRACE("found %d adapters", args.num_adapters); -+ } -+ -+ if (info) -+ vfree(info); -+ if (adapters) -+ vfree(adapters); -+ -+ DXG_TRACE("ioctl:%s %d", errorstr(ret), ret); -+ return ret; -+} -+ -+static int -+dxgkio_enum_adapters3(struct dxgprocess *process, void *__user inargs) -+{ -+ struct d3dkmt_enumadapters3 args; -+ int ret; -+ -+ ret = copy_from_user(&args, inargs, sizeof(args)); -+ if (ret) { -+ DXG_ERR("failed to copy input args"); -+ ret = -EINVAL; -+ goto cleanup; -+ } -+ -+ ret = dxgkp_enum_adapters(process, args.filter, -+ args.adapter_count, -+ args.adapters, -+ &((struct d3dkmt_enumadapters3 *)inargs)-> -+ adapter_count); -+ -+cleanup: -+ -+ DXG_TRACE("ioctl: %s %d", errorstr(ret), ret); -+ return ret; -+} -+ -+static int -+dxgkio_close_adapter(struct dxgprocess *process, void *__user inargs) -+{ -+ struct d3dkmthandle args; -+ int ret; -+ -+ ret = copy_from_user(&args, inargs, sizeof(args)); -+ if (ret) { -+ DXG_ERR("failed to copy input args"); -+ ret = -EINVAL; -+ goto cleanup; -+ } -+ -+ ret = dxgprocess_close_adapter(process, args); -+ if (ret < 0) -+ DXG_ERR("failed to close adapter: %d", ret); -+ -+cleanup: -+ -+ DXG_TRACE("ioctl: %s %d", errorstr(ret), ret); -+ return ret; -+} -+ -+static int -+dxgkio_query_adapter_info(struct dxgprocess *process, void *__user inargs) -+{ -+ struct d3dkmt_queryadapterinfo args; -+ int ret; -+ struct dxgadapter *adapter = NULL; -+ -+ ret = copy_from_user(&args, inargs, sizeof(args)); -+ if (ret) { -+ DXG_ERR("failed to copy input args"); -+ ret = -EINVAL; -+ goto cleanup; -+ } -+ -+ if (args.private_data_size > DXG_MAX_VM_BUS_PACKET_SIZE || -+ args.private_data_size == 0) { -+ DXG_ERR("invalid private data size"); -+ ret = -EINVAL; -+ goto cleanup; -+ } -+ -+ DXG_TRACE("Type: %d Size: %x", args.type, args.private_data_size); -+ -+ adapter = dxgprocess_adapter_by_handle(process, args.adapter); -+ if (adapter == NULL) { -+ ret = -EINVAL; -+ goto cleanup; -+ } -+ -+ ret = dxgadapter_acquire_lock_shared(adapter); -+ if (ret < 0) -+ goto cleanup; -+ -+ ret = dxgvmb_send_query_adapter_info(process, adapter, &args); -+ -+ dxgadapter_release_lock_shared(adapter); -+ -+cleanup: -+ -+ if (adapter) -+ kref_put(&adapter->adapter_kref, dxgadapter_release); -+ -+ DXG_TRACE("ioctl:%s %d", errorstr(ret), ret); -+ return ret; -+} -+ -+static struct ioctl_desc ioctls[] = { -+/* 0x00 */ {}, -+/* 0x01 */ {dxgkio_open_adapter_from_luid, LX_DXOPENADAPTERFROMLUID}, -+/* 0x02 */ {}, -+/* 0x03 */ {}, -+/* 0x04 */ {}, -+/* 0x05 */ {}, -+/* 0x06 */ {}, -+/* 0x07 */ {}, -+/* 0x08 */ {}, -+/* 0x09 */ {dxgkio_query_adapter_info, LX_DXQUERYADAPTERINFO}, -+/* 0x0a */ {}, -+/* 0x0b */ {}, -+/* 0x0c */ {}, -+/* 0x0d */ {}, -+/* 0x0e */ {}, -+/* 0x0f */ {}, -+/* 0x10 */ {}, -+/* 0x11 */ {}, -+/* 0x12 */ {}, -+/* 0x13 */ {}, -+/* 0x14 */ {dxgkio_enum_adapters, LX_DXENUMADAPTERS2}, -+/* 0x15 */ {dxgkio_close_adapter, LX_DXCLOSEADAPTER}, -+/* 0x16 */ {}, -+/* 0x17 */ {}, -+/* 0x18 */ {}, -+/* 0x19 */ {}, -+/* 0x1a */ {}, -+/* 0x1b */ {}, -+/* 0x1c */ {}, -+/* 0x1d */ {}, -+/* 0x1e */ {}, -+/* 0x1f */ {}, -+/* 0x20 */ {}, -+/* 0x21 */ {}, -+/* 0x22 */ {}, -+/* 0x23 */ {}, -+/* 0x24 */ {}, -+/* 0x25 */ {}, -+/* 0x26 */ {}, -+/* 0x27 */ {}, -+/* 0x28 */ {}, -+/* 0x29 */ {}, -+/* 0x2a */ {}, -+/* 0x2b */ {}, -+/* 0x2c */ {}, -+/* 0x2d */ {}, -+/* 0x2e */ {}, -+/* 0x2f */ {}, -+/* 0x30 */ {}, -+/* 0x31 */ {}, -+/* 0x32 */ {}, -+/* 0x33 */ {}, -+/* 0x34 */ {}, -+/* 0x35 */ {}, -+/* 0x36 */ {}, -+/* 0x37 */ {}, -+/* 0x38 */ {}, -+/* 0x39 */ {}, -+/* 0x3a */ {}, -+/* 0x3b */ {}, -+/* 0x3c */ {}, -+/* 0x3d */ {}, -+/* 0x3e */ {dxgkio_enum_adapters3, LX_DXENUMADAPTERS3}, -+/* 0x3f */ {}, -+/* 0x40 */ {}, -+/* 0x41 */ {}, -+/* 0x42 */ {}, -+/* 0x43 */ {}, -+/* 0x44 */ {}, -+/* 0x45 */ {}, - }; - - /* -@@ -82,3 +546,19 @@ long dxgk_unlocked_ioctl(struct file *f, unsigned int p1, unsigned long p2) - DXG_TRACE("unlocked ioctl %x Code:%d", p1, _IOC_NR(p1)); - return dxgk_ioctl(f, p1, p2); - } -+ -+#ifdef DEBUG -+void dxgk_validate_ioctls(void) -+{ -+ int i; -+ -+ for (i=0; i < ARRAY_SIZE(ioctls); i++) -+ { -+ if (ioctls[i].ioctl && _IOC_NR(ioctls[i].ioctl) != i) -+ { -+ DXG_ERR("Invalid ioctl"); -+ DXGKRNL_ASSERT(0); -+ } -+ } -+} -+#endif --- -Armbian - diff --git a/patch/kernel/archive/wsl2-arm64-6.6/1673-drivers-hv-dxgkrnl-Creation-of-dxgdevice-objects.patch b/patch/kernel/archive/wsl2-arm64-6.6/1673-drivers-hv-dxgkrnl-Creation-of-dxgdevice-objects.patch deleted file mode 100644 index 8bbd854777b4..000000000000 --- a/patch/kernel/archive/wsl2-arm64-6.6/1673-drivers-hv-dxgkrnl-Creation-of-dxgdevice-objects.patch +++ /dev/null @@ -1,828 +0,0 @@ -From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001 -From: Iouri Tarassov -Date: Tue, 1 Feb 2022 17:23:58 -0800 -Subject: drivers: hv: dxgkrnl: Creation of dxgdevice objects - -Implement ioctls for creation and destruction of dxgdevice -objects: - - the LX_DXCREATEDEVICE ioctl - - the LX_DXDESTROYDEVICE ioctl - -A dxgdevice object represents a container of other virtual -compute device objects (allocations, sync objects, contexts, -etc.). It belongs to a dxgadapter object. - -Signed-off-by: Iouri Tarassov -[kms: Forward port to v6.1] -Signed-off-by: Kelsey Steele ---- - drivers/hv/dxgkrnl/dxgadapter.c | 187 ++++++++++ - drivers/hv/dxgkrnl/dxgkrnl.h | 58 +++ - drivers/hv/dxgkrnl/dxgprocess.c | 43 +++ - drivers/hv/dxgkrnl/dxgvmbus.c | 80 ++++ - drivers/hv/dxgkrnl/dxgvmbus.h | 22 ++ - drivers/hv/dxgkrnl/ioctl.c | 130 ++++++- - drivers/hv/dxgkrnl/misc.h | 8 +- - include/uapi/misc/d3dkmthk.h | 82 ++++ - 8 files changed, 604 insertions(+), 6 deletions(-) - -diff --git a/drivers/hv/dxgkrnl/dxgadapter.c b/drivers/hv/dxgkrnl/dxgadapter.c -index 111111111111..222222222222 100644 ---- a/drivers/hv/dxgkrnl/dxgadapter.c -+++ b/drivers/hv/dxgkrnl/dxgadapter.c -@@ -194,6 +194,122 @@ void dxgadapter_release_lock_shared(struct dxgadapter *adapter) - up_read(&adapter->core_lock); - } - -+struct dxgdevice *dxgdevice_create(struct dxgadapter *adapter, -+ struct dxgprocess *process) -+{ -+ struct dxgdevice *device; -+ int ret; -+ -+ device = kzalloc(sizeof(struct dxgdevice), GFP_KERNEL); -+ if (device) { -+ kref_init(&device->device_kref); -+ device->adapter = adapter; -+ device->process = process; -+ kref_get(&adapter->adapter_kref); -+ init_rwsem(&device->device_lock); -+ INIT_LIST_HEAD(&device->pqueue_list_head); -+ device->object_state = DXGOBJECTSTATE_CREATED; -+ device->execution_state = _D3DKMT_DEVICEEXECUTION_ACTIVE; -+ -+ ret = dxgprocess_adapter_add_device(process, adapter, device); -+ if (ret < 0) { -+ kref_put(&device->device_kref, dxgdevice_release); -+ device = NULL; -+ } -+ } -+ return device; -+} -+ -+void dxgdevice_stop(struct dxgdevice *device) -+{ -+} -+ -+void dxgdevice_mark_destroyed(struct dxgdevice *device) -+{ -+ down_write(&device->device_lock); -+ device->object_state = DXGOBJECTSTATE_DESTROYED; -+ up_write(&device->device_lock); -+} -+ -+void dxgdevice_destroy(struct dxgdevice *device) -+{ -+ struct dxgprocess *process = device->process; -+ struct dxgadapter *adapter = device->adapter; -+ struct d3dkmthandle device_handle = {}; -+ -+ DXG_TRACE("Destroying device: %p", device); -+ -+ down_write(&device->device_lock); -+ -+ if (device->object_state != DXGOBJECTSTATE_ACTIVE) -+ goto cleanup; -+ -+ device->object_state = DXGOBJECTSTATE_DESTROYED; -+ -+ dxgdevice_stop(device); -+ -+ /* Guest handles need to be released before the host handles */ -+ hmgrtable_lock(&process->handle_table, DXGLOCK_EXCL); -+ if (device->handle_valid) { -+ hmgrtable_free_handle(&process->handle_table, -+ HMGRENTRY_TYPE_DXGDEVICE, device->handle); -+ device_handle = device->handle; -+ device->handle_valid = 0; -+ } -+ hmgrtable_unlock(&process->handle_table, DXGLOCK_EXCL); -+ -+ if (device_handle.v) { -+ up_write(&device->device_lock); -+ if (dxgadapter_acquire_lock_shared(adapter) == 0) { -+ dxgvmb_send_destroy_device(adapter, process, -+ device_handle); -+ dxgadapter_release_lock_shared(adapter); -+ } -+ down_write(&device->device_lock); -+ } -+ -+cleanup: -+ -+ if (device->adapter) { -+ dxgprocess_adapter_remove_device(device); -+ kref_put(&device->adapter->adapter_kref, dxgadapter_release); -+ device->adapter = NULL; -+ } -+ -+ up_write(&device->device_lock); -+ -+ kref_put(&device->device_kref, dxgdevice_release); -+ DXG_TRACE("Device destroyed"); -+} -+ -+int dxgdevice_acquire_lock_shared(struct dxgdevice *device) -+{ -+ down_read(&device->device_lock); -+ if (!dxgdevice_is_active(device)) { -+ up_read(&device->device_lock); -+ return -ENODEV; -+ } -+ return 0; -+} -+ -+void dxgdevice_release_lock_shared(struct dxgdevice *device) -+{ -+ up_read(&device->device_lock); -+} -+ -+bool dxgdevice_is_active(struct dxgdevice *device) -+{ -+ return device->object_state == DXGOBJECTSTATE_ACTIVE; -+} -+ -+void dxgdevice_release(struct kref *refcount) -+{ -+ struct dxgdevice *device; -+ -+ device = container_of(refcount, struct dxgdevice, device_kref); -+ kfree(device); -+} -+ - struct dxgprocess_adapter *dxgprocess_adapter_create(struct dxgprocess *process, - struct dxgadapter *adapter) - { -@@ -208,6 +324,8 @@ struct dxgprocess_adapter *dxgprocess_adapter_create(struct dxgprocess *process, - adapter_info->adapter = adapter; - adapter_info->process = process; - adapter_info->refcount = 1; -+ mutex_init(&adapter_info->device_list_mutex); -+ INIT_LIST_HEAD(&adapter_info->device_list_head); - list_add_tail(&adapter_info->process_adapter_list_entry, - &process->process_adapter_list_head); - dxgadapter_add_process(adapter, adapter_info); -@@ -221,10 +339,34 @@ struct dxgprocess_adapter *dxgprocess_adapter_create(struct dxgprocess *process, - - void dxgprocess_adapter_stop(struct dxgprocess_adapter *adapter_info) - { -+ struct dxgdevice *device; -+ -+ mutex_lock(&adapter_info->device_list_mutex); -+ list_for_each_entry(device, &adapter_info->device_list_head, -+ device_list_entry) { -+ dxgdevice_stop(device); -+ } -+ mutex_unlock(&adapter_info->device_list_mutex); - } - - void dxgprocess_adapter_destroy(struct dxgprocess_adapter *adapter_info) - { -+ struct dxgdevice *device; -+ -+ mutex_lock(&adapter_info->device_list_mutex); -+ while (!list_empty(&adapter_info->device_list_head)) { -+ device = list_first_entry(&adapter_info->device_list_head, -+ struct dxgdevice, device_list_entry); -+ list_del(&device->device_list_entry); -+ device->device_list_entry.next = NULL; -+ mutex_unlock(&adapter_info->device_list_mutex); -+ dxgvmb_send_flush_device(device, -+ DXGDEVICE_FLUSHSCHEDULER_DEVICE_TERMINATE); -+ dxgdevice_destroy(device); -+ mutex_lock(&adapter_info->device_list_mutex); -+ } -+ mutex_unlock(&adapter_info->device_list_mutex); -+ - dxgadapter_remove_process(adapter_info); - kref_put(&adapter_info->adapter->adapter_kref, dxgadapter_release); - list_del(&adapter_info->process_adapter_list_entry); -@@ -240,3 +382,48 @@ void dxgprocess_adapter_release(struct dxgprocess_adapter *adapter_info) - if (adapter_info->refcount == 0) - dxgprocess_adapter_destroy(adapter_info); - } -+ -+int dxgprocess_adapter_add_device(struct dxgprocess *process, -+ struct dxgadapter *adapter, -+ struct dxgdevice *device) -+{ -+ struct dxgprocess_adapter *entry; -+ struct dxgprocess_adapter *adapter_info = NULL; -+ int ret = 0; -+ -+ dxgglobal_acquire_process_adapter_lock(); -+ -+ list_for_each_entry(entry, &process->process_adapter_list_head, -+ process_adapter_list_entry) { -+ if (entry->adapter == adapter) { -+ adapter_info = entry; -+ break; -+ } -+ } -+ if (adapter_info == NULL) { -+ DXG_ERR("failed to find process adapter info"); -+ ret = -EINVAL; -+ goto cleanup; -+ } -+ mutex_lock(&adapter_info->device_list_mutex); -+ list_add_tail(&device->device_list_entry, -+ &adapter_info->device_list_head); -+ device->adapter_info = adapter_info; -+ mutex_unlock(&adapter_info->device_list_mutex); -+ -+cleanup: -+ -+ dxgglobal_release_process_adapter_lock(); -+ return ret; -+} -+ -+void dxgprocess_adapter_remove_device(struct dxgdevice *device) -+{ -+ DXG_TRACE("Removing device: %p", device); -+ mutex_lock(&device->adapter_info->device_list_mutex); -+ if (device->device_list_entry.next) { -+ list_del(&device->device_list_entry); -+ device->device_list_entry.next = NULL; -+ } -+ mutex_unlock(&device->adapter_info->device_list_mutex); -+} -diff --git a/drivers/hv/dxgkrnl/dxgkrnl.h b/drivers/hv/dxgkrnl/dxgkrnl.h -index 111111111111..222222222222 100644 ---- a/drivers/hv/dxgkrnl/dxgkrnl.h -+++ b/drivers/hv/dxgkrnl/dxgkrnl.h -@@ -34,6 +34,7 @@ - - struct dxgprocess; - struct dxgadapter; -+struct dxgdevice; - - /* - * Driver private data. -@@ -71,6 +72,10 @@ struct dxgk_device_types { - u32 virtual_monitor_device:1; - }; - -+enum dxgdevice_flushschedulerreason { -+ DXGDEVICE_FLUSHSCHEDULER_DEVICE_TERMINATE = 4, -+}; -+ - enum dxgobjectstate { - DXGOBJECTSTATE_CREATED, - DXGOBJECTSTATE_ACTIVE, -@@ -166,6 +171,9 @@ struct dxgprocess_adapter { - struct list_head adapter_process_list_entry; - /* Entry in dxgprocess::process_adapter_list_head */ - struct list_head process_adapter_list_entry; -+ /* List of all dxgdevice objects created for the process on adapter */ -+ struct list_head device_list_head; -+ struct mutex device_list_mutex; - struct dxgadapter *adapter; - struct dxgprocess *process; - int refcount; -@@ -175,6 +183,10 @@ struct dxgprocess_adapter *dxgprocess_adapter_create(struct dxgprocess *process, - struct dxgadapter - *adapter); - void dxgprocess_adapter_release(struct dxgprocess_adapter *adapter); -+int dxgprocess_adapter_add_device(struct dxgprocess *process, -+ struct dxgadapter *adapter, -+ struct dxgdevice *device); -+void dxgprocess_adapter_remove_device(struct dxgdevice *device); - void dxgprocess_adapter_stop(struct dxgprocess_adapter *adapter_info); - void dxgprocess_adapter_destroy(struct dxgprocess_adapter *adapter_info); - -@@ -222,6 +234,11 @@ struct dxgadapter *dxgprocess_get_adapter(struct dxgprocess *process, - struct d3dkmthandle handle); - struct dxgadapter *dxgprocess_adapter_by_handle(struct dxgprocess *process, - struct d3dkmthandle handle); -+struct dxgdevice *dxgprocess_device_by_handle(struct dxgprocess *process, -+ struct d3dkmthandle handle); -+struct dxgdevice *dxgprocess_device_by_object_handle(struct dxgprocess *process, -+ enum hmgrentry_type t, -+ struct d3dkmthandle h); - void dxgprocess_ht_lock_shared_down(struct dxgprocess *process); - void dxgprocess_ht_lock_shared_up(struct dxgprocess *process); - void dxgprocess_ht_lock_exclusive_down(struct dxgprocess *process); -@@ -241,6 +258,7 @@ enum dxgadapter_state { - * This object represents the grapchis adapter. - * Objects, which take reference on the adapter: - * - dxgglobal -+ * - dxgdevice - * - adapter handle (struct d3dkmthandle) - */ - struct dxgadapter { -@@ -277,6 +295,38 @@ void dxgadapter_add_process(struct dxgadapter *adapter, - struct dxgprocess_adapter *process_info); - void dxgadapter_remove_process(struct dxgprocess_adapter *process_info); - -+/* -+ * The object represent the device object. -+ * The following objects take reference on the device -+ * - device handle (struct d3dkmthandle) -+ */ -+struct dxgdevice { -+ enum dxgobjectstate object_state; -+ /* Device takes reference on the adapter */ -+ struct dxgadapter *adapter; -+ struct dxgprocess_adapter *adapter_info; -+ struct dxgprocess *process; -+ /* Entry in the DGXPROCESS_ADAPTER device list */ -+ struct list_head device_list_entry; -+ struct kref device_kref; -+ /* Protects destcruction of the device object */ -+ struct rw_semaphore device_lock; -+ /* List of paging queues. Protected by process handle table lock. */ -+ struct list_head pqueue_list_head; -+ struct d3dkmthandle handle; -+ enum d3dkmt_deviceexecution_state execution_state; -+ u32 handle_valid; -+}; -+ -+struct dxgdevice *dxgdevice_create(struct dxgadapter *a, struct dxgprocess *p); -+void dxgdevice_destroy(struct dxgdevice *device); -+void dxgdevice_stop(struct dxgdevice *device); -+void dxgdevice_mark_destroyed(struct dxgdevice *device); -+int dxgdevice_acquire_lock_shared(struct dxgdevice *dev); -+void dxgdevice_release_lock_shared(struct dxgdevice *dev); -+void dxgdevice_release(struct kref *refcount); -+bool dxgdevice_is_active(struct dxgdevice *dev); -+ - long dxgk_compat_ioctl(struct file *f, unsigned int p1, unsigned long p2); - long dxgk_unlocked_ioctl(struct file *f, unsigned int p1, unsigned long p2); - -@@ -313,6 +363,14 @@ int dxgvmb_send_destroy_process(struct d3dkmthandle process); - int dxgvmb_send_open_adapter(struct dxgadapter *adapter); - int dxgvmb_send_close_adapter(struct dxgadapter *adapter); - int dxgvmb_send_get_internal_adapter_info(struct dxgadapter *adapter); -+struct d3dkmthandle dxgvmb_send_create_device(struct dxgadapter *adapter, -+ struct dxgprocess *process, -+ struct d3dkmt_createdevice *args); -+int dxgvmb_send_destroy_device(struct dxgadapter *adapter, -+ struct dxgprocess *process, -+ struct d3dkmthandle h); -+int dxgvmb_send_flush_device(struct dxgdevice *device, -+ enum dxgdevice_flushschedulerreason reason); - int dxgvmb_send_query_adapter_info(struct dxgprocess *process, - struct dxgadapter *adapter, - struct d3dkmt_queryadapterinfo *args); -diff --git a/drivers/hv/dxgkrnl/dxgprocess.c b/drivers/hv/dxgkrnl/dxgprocess.c -index 111111111111..222222222222 100644 ---- a/drivers/hv/dxgkrnl/dxgprocess.c -+++ b/drivers/hv/dxgkrnl/dxgprocess.c -@@ -241,6 +241,49 @@ struct dxgadapter *dxgprocess_adapter_by_handle(struct dxgprocess *process, - return adapter; - } - -+struct dxgdevice *dxgprocess_device_by_object_handle(struct dxgprocess *process, -+ enum hmgrentry_type t, -+ struct d3dkmthandle handle) -+{ -+ struct dxgdevice *device = NULL; -+ void *obj; -+ -+ hmgrtable_lock(&process->handle_table, DXGLOCK_SHARED); -+ obj = hmgrtable_get_object_by_type(&process->handle_table, t, handle); -+ if (obj) { -+ struct d3dkmthandle device_handle = {}; -+ -+ switch (t) { -+ case HMGRENTRY_TYPE_DXGDEVICE: -+ device = obj; -+ break; -+ default: -+ DXG_ERR("invalid handle type: %d", t); -+ break; -+ } -+ if (device == NULL) -+ device = hmgrtable_get_object_by_type( -+ &process->handle_table, -+ HMGRENTRY_TYPE_DXGDEVICE, -+ device_handle); -+ if (device) -+ if (kref_get_unless_zero(&device->device_kref) == 0) -+ device = NULL; -+ } -+ if (device == NULL) -+ DXG_ERR("device_by_handle failed: %d %x", t, handle.v); -+ hmgrtable_unlock(&process->handle_table, DXGLOCK_SHARED); -+ return device; -+} -+ -+struct dxgdevice *dxgprocess_device_by_handle(struct dxgprocess *process, -+ struct d3dkmthandle handle) -+{ -+ return dxgprocess_device_by_object_handle(process, -+ HMGRENTRY_TYPE_DXGDEVICE, -+ handle); -+} -+ - void dxgprocess_ht_lock_shared_down(struct dxgprocess *process) - { - hmgrtable_lock(&process->handle_table, DXGLOCK_SHARED); -diff --git a/drivers/hv/dxgkrnl/dxgvmbus.c b/drivers/hv/dxgkrnl/dxgvmbus.c -index 111111111111..222222222222 100644 ---- a/drivers/hv/dxgkrnl/dxgvmbus.c -+++ b/drivers/hv/dxgkrnl/dxgvmbus.c -@@ -673,6 +673,86 @@ int dxgvmb_send_get_internal_adapter_info(struct dxgadapter *adapter) - return ret; - } - -+struct d3dkmthandle dxgvmb_send_create_device(struct dxgadapter *adapter, -+ struct dxgprocess *process, -+ struct d3dkmt_createdevice *args) -+{ -+ int ret; -+ struct dxgkvmb_command_createdevice *command; -+ struct dxgkvmb_command_createdevice_return result = { }; -+ struct dxgvmbusmsg msg; -+ -+ ret = init_message(&msg, adapter, process, sizeof(*command)); -+ if (ret) -+ goto cleanup; -+ command = (void *)msg.msg; -+ -+ command_vgpu_to_host_init2(&command->hdr, DXGK_VMBCOMMAND_CREATEDEVICE, -+ process->host_handle); -+ command->flags = args->flags; -+ -+ ret = dxgvmb_send_sync_msg(msg.channel, msg.hdr, msg.size, -+ &result, sizeof(result)); -+ if (ret < 0) -+ result.device.v = 0; -+ free_message(&msg, process); -+cleanup: -+ if (ret) -+ DXG_TRACE("err: %d", ret); -+ return result.device; -+} -+ -+int dxgvmb_send_destroy_device(struct dxgadapter *adapter, -+ struct dxgprocess *process, -+ struct d3dkmthandle h) -+{ -+ int ret; -+ struct dxgkvmb_command_destroydevice *command; -+ struct dxgvmbusmsg msg = {.hdr = NULL}; -+ -+ ret = init_message(&msg, adapter, process, sizeof(*command)); -+ if (ret) -+ goto cleanup; -+ command = (void *)msg.msg; -+ -+ command_vgpu_to_host_init2(&command->hdr, DXGK_VMBCOMMAND_DESTROYDEVICE, -+ process->host_handle); -+ command->device = h; -+ -+ ret = dxgvmb_send_sync_msg_ntstatus(msg.channel, msg.hdr, msg.size); -+cleanup: -+ free_message(&msg, process); -+ if (ret) -+ DXG_TRACE("err: %d", ret); -+ return ret; -+} -+ -+int dxgvmb_send_flush_device(struct dxgdevice *device, -+ enum dxgdevice_flushschedulerreason reason) -+{ -+ int ret; -+ struct dxgkvmb_command_flushdevice *command; -+ struct dxgvmbusmsg msg = {.hdr = NULL}; -+ struct dxgprocess *process = device->process; -+ -+ ret = init_message(&msg, device->adapter, process, sizeof(*command)); -+ if (ret) -+ goto cleanup; -+ command = (void *)msg.msg; -+ -+ command_vgpu_to_host_init2(&command->hdr, DXGK_VMBCOMMAND_FLUSHDEVICE, -+ process->host_handle); -+ command->device = device->handle; -+ command->reason = reason; -+ -+ ret = dxgvmb_send_sync_msg_ntstatus(msg.channel, msg.hdr, msg.size); -+cleanup: -+ free_message(&msg, process); -+ if (ret) -+ DXG_TRACE("err: %d", ret); -+ return ret; -+} -+ - int dxgvmb_send_query_adapter_info(struct dxgprocess *process, - struct dxgadapter *adapter, - struct d3dkmt_queryadapterinfo *args) -diff --git a/drivers/hv/dxgkrnl/dxgvmbus.h b/drivers/hv/dxgkrnl/dxgvmbus.h -index 111111111111..222222222222 100644 ---- a/drivers/hv/dxgkrnl/dxgvmbus.h -+++ b/drivers/hv/dxgkrnl/dxgvmbus.h -@@ -247,4 +247,26 @@ struct dxgkvmb_command_queryadapterinfo_return { - u8 private_data[1]; - }; - -+struct dxgkvmb_command_createdevice { -+ struct dxgkvmb_command_vgpu_to_host hdr; -+ struct d3dkmt_createdeviceflags flags; -+ bool cdd_device; -+ void *error_code; -+}; -+ -+struct dxgkvmb_command_createdevice_return { -+ struct d3dkmthandle device; -+}; -+ -+struct dxgkvmb_command_destroydevice { -+ struct dxgkvmb_command_vgpu_to_host hdr; -+ struct d3dkmthandle device; -+}; -+ -+struct dxgkvmb_command_flushdevice { -+ struct dxgkvmb_command_vgpu_to_host hdr; -+ struct d3dkmthandle device; -+ enum dxgdevice_flushschedulerreason reason; -+}; -+ - #endif /* _DXGVMBUS_H */ -diff --git a/drivers/hv/dxgkrnl/ioctl.c b/drivers/hv/dxgkrnl/ioctl.c -index 111111111111..222222222222 100644 ---- a/drivers/hv/dxgkrnl/ioctl.c -+++ b/drivers/hv/dxgkrnl/ioctl.c -@@ -424,10 +424,136 @@ dxgkio_query_adapter_info(struct dxgprocess *process, void *__user inargs) - return ret; - } - -+static int -+dxgkio_create_device(struct dxgprocess *process, void *__user inargs) -+{ -+ struct d3dkmt_createdevice args; -+ int ret; -+ struct dxgadapter *adapter = NULL; -+ struct dxgdevice *device = NULL; -+ struct d3dkmthandle host_device_handle = {}; -+ bool adapter_locked = false; -+ -+ ret = copy_from_user(&args, inargs, sizeof(args)); -+ if (ret) { -+ DXG_ERR("failed to copy input args"); -+ ret = -EINVAL; -+ goto cleanup; -+ } -+ -+ /* The call acquires reference on the adapter */ -+ adapter = dxgprocess_adapter_by_handle(process, args.adapter); -+ if (adapter == NULL) { -+ ret = -EINVAL; -+ goto cleanup; -+ } -+ -+ device = dxgdevice_create(adapter, process); -+ if (device == NULL) { -+ ret = -ENOMEM; -+ goto cleanup; -+ } -+ -+ ret = dxgadapter_acquire_lock_shared(adapter); -+ if (ret < 0) -+ goto cleanup; -+ -+ adapter_locked = true; -+ -+ host_device_handle = dxgvmb_send_create_device(adapter, process, &args); -+ if (host_device_handle.v) { -+ ret = copy_to_user(&((struct d3dkmt_createdevice *)inargs)-> -+ device, &host_device_handle, -+ sizeof(struct d3dkmthandle)); -+ if (ret) { -+ DXG_ERR("failed to copy device handle"); -+ ret = -EINVAL; -+ goto cleanup; -+ } -+ -+ hmgrtable_lock(&process->handle_table, DXGLOCK_EXCL); -+ ret = hmgrtable_assign_handle(&process->handle_table, device, -+ HMGRENTRY_TYPE_DXGDEVICE, -+ host_device_handle); -+ if (ret >= 0) { -+ device->handle = host_device_handle; -+ device->handle_valid = 1; -+ device->object_state = DXGOBJECTSTATE_ACTIVE; -+ } -+ hmgrtable_unlock(&process->handle_table, DXGLOCK_EXCL); -+ } -+ -+cleanup: -+ -+ if (ret < 0) { -+ if (host_device_handle.v) -+ dxgvmb_send_destroy_device(adapter, process, -+ host_device_handle); -+ if (device) -+ dxgdevice_destroy(device); -+ } -+ -+ if (adapter_locked) -+ dxgadapter_release_lock_shared(adapter); -+ -+ if (adapter) -+ kref_put(&adapter->adapter_kref, dxgadapter_release); -+ -+ DXG_TRACE("ioctl:%s %d", errorstr(ret), ret); -+ return ret; -+} -+ -+static int -+dxgkio_destroy_device(struct dxgprocess *process, void *__user inargs) -+{ -+ struct d3dkmt_destroydevice args; -+ int ret; -+ struct dxgadapter *adapter = NULL; -+ struct dxgdevice *device = NULL; -+ -+ ret = copy_from_user(&args, inargs, sizeof(args)); -+ if (ret) { -+ DXG_ERR("failed to copy input args"); -+ ret = -EINVAL; -+ goto cleanup; -+ } -+ -+ hmgrtable_lock(&process->handle_table, DXGLOCK_EXCL); -+ device = hmgrtable_get_object_by_type(&process->handle_table, -+ HMGRENTRY_TYPE_DXGDEVICE, -+ args.device); -+ if (device) { -+ hmgrtable_free_handle(&process->handle_table, -+ HMGRENTRY_TYPE_DXGDEVICE, args.device); -+ device->handle_valid = 0; -+ } -+ hmgrtable_unlock(&process->handle_table, DXGLOCK_EXCL); -+ -+ if (device == NULL) { -+ DXG_ERR("invalid device handle: %x", args.device.v); -+ ret = -EINVAL; -+ goto cleanup; -+ } -+ -+ adapter = device->adapter; -+ -+ dxgdevice_destroy(device); -+ -+ if (dxgadapter_acquire_lock_shared(adapter) == 0) { -+ dxgvmb_send_destroy_device(adapter, process, args.device); -+ dxgadapter_release_lock_shared(adapter); -+ } -+ -+cleanup: -+ -+ DXG_TRACE("ioctl:%s %d", errorstr(ret), ret); -+ return ret; -+} -+ - static struct ioctl_desc ioctls[] = { - /* 0x00 */ {}, - /* 0x01 */ {dxgkio_open_adapter_from_luid, LX_DXOPENADAPTERFROMLUID}, --/* 0x02 */ {}, -+/* 0x02 */ {dxgkio_create_device, LX_DXCREATEDEVICE}, - /* 0x03 */ {}, - /* 0x04 */ {}, - /* 0x05 */ {}, -@@ -450,7 +576,7 @@ static struct ioctl_desc ioctls[] = { - /* 0x16 */ {}, - /* 0x17 */ {}, - /* 0x18 */ {}, --/* 0x19 */ {}, -+/* 0x19 */ {dxgkio_destroy_device, LX_DXDESTROYDEVICE}, - /* 0x1a */ {}, - /* 0x1b */ {}, - /* 0x1c */ {}, -diff --git a/drivers/hv/dxgkrnl/misc.h b/drivers/hv/dxgkrnl/misc.h -index 111111111111..222222222222 100644 ---- a/drivers/hv/dxgkrnl/misc.h -+++ b/drivers/hv/dxgkrnl/misc.h -@@ -27,10 +27,10 @@ extern const struct d3dkmthandle zerohandle; - * - * channel_lock (VMBus channel lock) - * fd_mutex -- * plistmutex -- * table_lock -- * core_lock -- * device_lock -+ * plistmutex (process list mutex) -+ * table_lock (handle table lock) -+ * core_lock (dxgadapter lock) -+ * device_lock (dxgdevice lock) - * process_adapter_mutex - * adapter_list_lock - * device_mutex (dxgglobal mutex) -diff --git a/include/uapi/misc/d3dkmthk.h b/include/uapi/misc/d3dkmthk.h -index 111111111111..222222222222 100644 ---- a/include/uapi/misc/d3dkmthk.h -+++ b/include/uapi/misc/d3dkmthk.h -@@ -86,6 +86,74 @@ struct d3dkmt_openadapterfromluid { - struct d3dkmthandle adapter_handle; - }; - -+struct d3dddi_allocationlist { -+ struct d3dkmthandle allocation; -+ union { -+ struct { -+ __u32 write_operation :1; -+ __u32 do_not_retire_instance :1; -+ __u32 offer_priority :3; -+ __u32 reserved :27; -+ }; -+ __u32 value; -+ }; -+}; -+ -+struct d3dddi_patchlocationlist { -+ __u32 allocation_index; -+ union { -+ struct { -+ __u32 slot_id:24; -+ __u32 reserved:8; -+ }; -+ __u32 value; -+ }; -+ __u32 driver_id; -+ __u32 allocation_offset; -+ __u32 patch_offset; -+ __u32 split_offset; -+}; -+ -+struct d3dkmt_createdeviceflags { -+ __u32 legacy_mode:1; -+ __u32 request_vSync:1; -+ __u32 disable_gpu_timeout:1; -+ __u32 gdi_device:1; -+ __u32 reserved:28; -+}; -+ -+struct d3dkmt_createdevice { -+ struct d3dkmthandle adapter; -+ __u32 reserved3; -+ struct d3dkmt_createdeviceflags flags; -+ struct d3dkmthandle device; -+#ifdef __KERNEL__ -+ void *command_buffer; -+#else -+ __u64 command_buffer; -+#endif -+ __u32 command_buffer_size; -+ __u32 reserved; -+#ifdef __KERNEL__ -+ struct d3dddi_allocationlist *allocation_list; -+#else -+ __u64 allocation_list; -+#endif -+ __u32 allocation_list_size; -+ __u32 reserved1; -+#ifdef __KERNEL__ -+ struct d3dddi_patchlocationlist *patch_location_list; -+#else -+ __u64 patch_location_list; -+#endif -+ __u32 patch_location_list_size; -+ __u32 reserved2; -+}; -+ -+struct d3dkmt_destroydevice { -+ struct d3dkmthandle device; -+}; -+ - struct d3dkmt_adaptertype { - union { - struct { -@@ -125,6 +193,16 @@ struct d3dkmt_queryadapterinfo { - __u32 private_data_size; - }; - -+enum d3dkmt_deviceexecution_state { -+ _D3DKMT_DEVICEEXECUTION_ACTIVE = 1, -+ _D3DKMT_DEVICEEXECUTION_RESET = 2, -+ _D3DKMT_DEVICEEXECUTION_HUNG = 3, -+ _D3DKMT_DEVICEEXECUTION_STOPPED = 4, -+ _D3DKMT_DEVICEEXECUTION_ERROR_OUTOFMEMORY = 5, -+ _D3DKMT_DEVICEEXECUTION_ERROR_DMAFAULT = 6, -+ _D3DKMT_DEVICEEXECUTION_ERROR_DMAPAGEFAULT = 7, -+}; -+ - union d3dkmt_enumadapters_filter { - struct { - __u64 include_compute_only:1; -@@ -152,12 +230,16 @@ struct d3dkmt_enumadapters3 { - - #define LX_DXOPENADAPTERFROMLUID \ - _IOWR(0x47, 0x01, struct d3dkmt_openadapterfromluid) -+#define LX_DXCREATEDEVICE \ -+ _IOWR(0x47, 0x02, struct d3dkmt_createdevice) - #define LX_DXQUERYADAPTERINFO \ - _IOWR(0x47, 0x09, struct d3dkmt_queryadapterinfo) - #define LX_DXENUMADAPTERS2 \ - _IOWR(0x47, 0x14, struct d3dkmt_enumadapters2) - #define LX_DXCLOSEADAPTER \ - _IOWR(0x47, 0x15, struct d3dkmt_closeadapter) -+#define LX_DXDESTROYDEVICE \ -+ _IOWR(0x47, 0x19, struct d3dkmt_destroydevice) - #define LX_DXENUMADAPTERS3 \ - _IOWR(0x47, 0x3e, struct d3dkmt_enumadapters3) - --- -Armbian - diff --git a/patch/kernel/archive/wsl2-arm64-6.6/1674-drivers-hv-dxgkrnl-Creation-of-dxgcontext-objects.patch b/patch/kernel/archive/wsl2-arm64-6.6/1674-drivers-hv-dxgkrnl-Creation-of-dxgcontext-objects.patch deleted file mode 100644 index 7e8f50dcb826..000000000000 --- a/patch/kernel/archive/wsl2-arm64-6.6/1674-drivers-hv-dxgkrnl-Creation-of-dxgcontext-objects.patch +++ /dev/null @@ -1,668 +0,0 @@ -From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001 -From: Iouri Tarassov -Date: Tue, 1 Feb 2022 17:03:47 -0800 -Subject: drivers: hv: dxgkrnl: Creation of dxgcontext objects - -Implement ioctls for creation/destruction of dxgcontext -objects: - - the LX_DXCREATECONTEXTVIRTUAL ioctl - - the LX_DXDESTROYCONTEXT ioctl. - -A dxgcontext object represents a compute device execution thread. -Ccompute device DMA buffers and synchronization operations are -submitted for execution to a dxgcontext. dxgcontexts objects -belong to a dxgdevice object. - -Signed-off-by: Iouri Tarassov -[kms: Forward port to v6.1] -Signed-off-by: Kelsey Steele ---- - drivers/hv/dxgkrnl/dxgadapter.c | 103 ++++++ - drivers/hv/dxgkrnl/dxgkrnl.h | 38 +++ - drivers/hv/dxgkrnl/dxgprocess.c | 4 + - drivers/hv/dxgkrnl/dxgvmbus.c | 101 +++++- - drivers/hv/dxgkrnl/dxgvmbus.h | 18 + - drivers/hv/dxgkrnl/ioctl.c | 168 +++++++++- - drivers/hv/dxgkrnl/misc.h | 1 + - include/uapi/misc/d3dkmthk.h | 47 +++ - 8 files changed, 477 insertions(+), 3 deletions(-) - -diff --git a/drivers/hv/dxgkrnl/dxgadapter.c b/drivers/hv/dxgkrnl/dxgadapter.c -index 111111111111..222222222222 100644 ---- a/drivers/hv/dxgkrnl/dxgadapter.c -+++ b/drivers/hv/dxgkrnl/dxgadapter.c -@@ -206,7 +206,9 @@ struct dxgdevice *dxgdevice_create(struct dxgadapter *adapter, - device->adapter = adapter; - device->process = process; - kref_get(&adapter->adapter_kref); -+ INIT_LIST_HEAD(&device->context_list_head); - init_rwsem(&device->device_lock); -+ init_rwsem(&device->context_list_lock); - INIT_LIST_HEAD(&device->pqueue_list_head); - device->object_state = DXGOBJECTSTATE_CREATED; - device->execution_state = _D3DKMT_DEVICEEXECUTION_ACTIVE; -@@ -248,6 +250,20 @@ void dxgdevice_destroy(struct dxgdevice *device) - - dxgdevice_stop(device); - -+ { -+ struct dxgcontext *context; -+ struct dxgcontext *tmp; -+ -+ DXG_TRACE("destroying contexts"); -+ dxgdevice_acquire_context_list_lock(device); -+ list_for_each_entry_safe(context, tmp, -+ &device->context_list_head, -+ context_list_entry) { -+ dxgcontext_destroy(process, context); -+ } -+ dxgdevice_release_context_list_lock(device); -+ } -+ - /* Guest handles need to be released before the host handles */ - hmgrtable_lock(&process->handle_table, DXGLOCK_EXCL); - if (device->handle_valid) { -@@ -302,6 +318,32 @@ bool dxgdevice_is_active(struct dxgdevice *device) - return device->object_state == DXGOBJECTSTATE_ACTIVE; - } - -+void dxgdevice_acquire_context_list_lock(struct dxgdevice *device) -+{ -+ down_write(&device->context_list_lock); -+} -+ -+void dxgdevice_release_context_list_lock(struct dxgdevice *device) -+{ -+ up_write(&device->context_list_lock); -+} -+ -+void dxgdevice_add_context(struct dxgdevice *device, struct dxgcontext *context) -+{ -+ down_write(&device->context_list_lock); -+ list_add_tail(&context->context_list_entry, &device->context_list_head); -+ up_write(&device->context_list_lock); -+} -+ -+void dxgdevice_remove_context(struct dxgdevice *device, -+ struct dxgcontext *context) -+{ -+ if (context->context_list_entry.next) { -+ list_del(&context->context_list_entry); -+ context->context_list_entry.next = NULL; -+ } -+} -+ - void dxgdevice_release(struct kref *refcount) - { - struct dxgdevice *device; -@@ -310,6 +352,67 @@ void dxgdevice_release(struct kref *refcount) - kfree(device); - } - -+struct dxgcontext *dxgcontext_create(struct dxgdevice *device) -+{ -+ struct dxgcontext *context; -+ -+ context = kzalloc(sizeof(struct dxgcontext), GFP_KERNEL); -+ if (context) { -+ kref_init(&context->context_kref); -+ context->device = device; -+ context->process = device->process; -+ context->device_handle = device->handle; -+ kref_get(&device->device_kref); -+ INIT_LIST_HEAD(&context->hwqueue_list_head); -+ init_rwsem(&context->hwqueue_list_lock); -+ dxgdevice_add_context(device, context); -+ context->object_state = DXGOBJECTSTATE_ACTIVE; -+ } -+ return context; -+} -+ -+/* -+ * Called when the device context list lock is held -+ */ -+void dxgcontext_destroy(struct dxgprocess *process, struct dxgcontext *context) -+{ -+ DXG_TRACE("Destroying context %p", context); -+ context->object_state = DXGOBJECTSTATE_DESTROYED; -+ if (context->device) { -+ if (context->handle.v) { -+ hmgrtable_free_handle_safe(&process->handle_table, -+ HMGRENTRY_TYPE_DXGCONTEXT, -+ context->handle); -+ } -+ dxgdevice_remove_context(context->device, context); -+ kref_put(&context->device->device_kref, dxgdevice_release); -+ } -+ kref_put(&context->context_kref, dxgcontext_release); -+} -+ -+void dxgcontext_destroy_safe(struct dxgprocess *process, -+ struct dxgcontext *context) -+{ -+ struct dxgdevice *device = context->device; -+ -+ dxgdevice_acquire_context_list_lock(device); -+ dxgcontext_destroy(process, context); -+ dxgdevice_release_context_list_lock(device); -+} -+ -+bool dxgcontext_is_active(struct dxgcontext *context) -+{ -+ return context->object_state == DXGOBJECTSTATE_ACTIVE; -+} -+ -+void dxgcontext_release(struct kref *refcount) -+{ -+ struct dxgcontext *context; -+ -+ context = container_of(refcount, struct dxgcontext, context_kref); -+ kfree(context); -+} -+ - struct dxgprocess_adapter *dxgprocess_adapter_create(struct dxgprocess *process, - struct dxgadapter *adapter) - { -diff --git a/drivers/hv/dxgkrnl/dxgkrnl.h b/drivers/hv/dxgkrnl/dxgkrnl.h -index 111111111111..222222222222 100644 ---- a/drivers/hv/dxgkrnl/dxgkrnl.h -+++ b/drivers/hv/dxgkrnl/dxgkrnl.h -@@ -35,6 +35,7 @@ - struct dxgprocess; - struct dxgadapter; - struct dxgdevice; -+struct dxgcontext; - - /* - * Driver private data. -@@ -298,6 +299,7 @@ void dxgadapter_remove_process(struct dxgprocess_adapter *process_info); - /* - * The object represent the device object. - * The following objects take reference on the device -+ * - dxgcontext - * - device handle (struct d3dkmthandle) - */ - struct dxgdevice { -@@ -311,6 +313,8 @@ struct dxgdevice { - struct kref device_kref; - /* Protects destcruction of the device object */ - struct rw_semaphore device_lock; -+ struct rw_semaphore context_list_lock; -+ struct list_head context_list_head; - /* List of paging queues. Protected by process handle table lock. */ - struct list_head pqueue_list_head; - struct d3dkmthandle handle; -@@ -325,7 +329,33 @@ void dxgdevice_mark_destroyed(struct dxgdevice *device); - int dxgdevice_acquire_lock_shared(struct dxgdevice *dev); - void dxgdevice_release_lock_shared(struct dxgdevice *dev); - void dxgdevice_release(struct kref *refcount); -+void dxgdevice_add_context(struct dxgdevice *dev, struct dxgcontext *ctx); -+void dxgdevice_remove_context(struct dxgdevice *dev, struct dxgcontext *ctx); - bool dxgdevice_is_active(struct dxgdevice *dev); -+void dxgdevice_acquire_context_list_lock(struct dxgdevice *dev); -+void dxgdevice_release_context_list_lock(struct dxgdevice *dev); -+ -+/* -+ * The object represent the execution context of a device. -+ */ -+struct dxgcontext { -+ enum dxgobjectstate object_state; -+ struct dxgdevice *device; -+ struct dxgprocess *process; -+ /* entry in the device context list */ -+ struct list_head context_list_entry; -+ struct list_head hwqueue_list_head; -+ struct rw_semaphore hwqueue_list_lock; -+ struct kref context_kref; -+ struct d3dkmthandle handle; -+ struct d3dkmthandle device_handle; -+}; -+ -+struct dxgcontext *dxgcontext_create(struct dxgdevice *dev); -+void dxgcontext_destroy(struct dxgprocess *pr, struct dxgcontext *ctx); -+void dxgcontext_destroy_safe(struct dxgprocess *pr, struct dxgcontext *ctx); -+void dxgcontext_release(struct kref *refcount); -+bool dxgcontext_is_active(struct dxgcontext *ctx); - - long dxgk_compat_ioctl(struct file *f, unsigned int p1, unsigned long p2); - long dxgk_unlocked_ioctl(struct file *f, unsigned int p1, unsigned long p2); -@@ -371,6 +401,14 @@ int dxgvmb_send_destroy_device(struct dxgadapter *adapter, - struct d3dkmthandle h); - int dxgvmb_send_flush_device(struct dxgdevice *device, - enum dxgdevice_flushschedulerreason reason); -+struct d3dkmthandle -+dxgvmb_send_create_context(struct dxgadapter *adapter, -+ struct dxgprocess *process, -+ struct d3dkmt_createcontextvirtual -+ *args); -+int dxgvmb_send_destroy_context(struct dxgadapter *adapter, -+ struct dxgprocess *process, -+ struct d3dkmthandle h); - int dxgvmb_send_query_adapter_info(struct dxgprocess *process, - struct dxgadapter *adapter, - struct d3dkmt_queryadapterinfo *args); -diff --git a/drivers/hv/dxgkrnl/dxgprocess.c b/drivers/hv/dxgkrnl/dxgprocess.c -index 111111111111..222222222222 100644 ---- a/drivers/hv/dxgkrnl/dxgprocess.c -+++ b/drivers/hv/dxgkrnl/dxgprocess.c -@@ -257,6 +257,10 @@ struct dxgdevice *dxgprocess_device_by_object_handle(struct dxgprocess *process, - case HMGRENTRY_TYPE_DXGDEVICE: - device = obj; - break; -+ case HMGRENTRY_TYPE_DXGCONTEXT: -+ device_handle = -+ ((struct dxgcontext *)obj)->device_handle; -+ break; - default: - DXG_ERR("invalid handle type: %d", t); - break; -diff --git a/drivers/hv/dxgkrnl/dxgvmbus.c b/drivers/hv/dxgkrnl/dxgvmbus.c -index 111111111111..222222222222 100644 ---- a/drivers/hv/dxgkrnl/dxgvmbus.c -+++ b/drivers/hv/dxgkrnl/dxgvmbus.c -@@ -731,7 +731,7 @@ int dxgvmb_send_flush_device(struct dxgdevice *device, - enum dxgdevice_flushschedulerreason reason) - { - int ret; -- struct dxgkvmb_command_flushdevice *command; -+ struct dxgkvmb_command_flushdevice *command = NULL; - struct dxgvmbusmsg msg = {.hdr = NULL}; - struct dxgprocess *process = device->process; - -@@ -745,6 +745,105 @@ int dxgvmb_send_flush_device(struct dxgdevice *device, - command->device = device->handle; - command->reason = reason; - -+ ret = dxgvmb_send_sync_msg_ntstatus(msg.channel, msg.hdr, msg.size); -+ -+cleanup: -+ free_message(&msg, process); -+ if (ret) -+ DXG_TRACE("err: %d", ret); -+ return ret; -+} -+ -+struct d3dkmthandle -+dxgvmb_send_create_context(struct dxgadapter *adapter, -+ struct dxgprocess *process, -+ struct d3dkmt_createcontextvirtual *args) -+{ -+ struct dxgkvmb_command_createcontextvirtual *command = NULL; -+ u32 cmd_size; -+ int ret; -+ struct d3dkmthandle context = {}; -+ struct dxgvmbusmsg msg = {.hdr = NULL}; -+ -+ if (args->priv_drv_data_size > DXG_MAX_VM_BUS_PACKET_SIZE) { -+ DXG_ERR("PrivateDriverDataSize is invalid"); -+ ret = -EINVAL; -+ goto cleanup; -+ } -+ cmd_size = sizeof(struct dxgkvmb_command_createcontextvirtual) + -+ args->priv_drv_data_size - 1; -+ -+ ret = init_message(&msg, adapter, process, cmd_size); -+ if (ret) -+ goto cleanup; -+ command = (void *)msg.msg; -+ -+ command_vgpu_to_host_init2(&command->hdr, -+ DXGK_VMBCOMMAND_CREATECONTEXTVIRTUAL, -+ process->host_handle); -+ command->device = args->device; -+ command->node_ordinal = args->node_ordinal; -+ command->engine_affinity = args->engine_affinity; -+ command->flags = args->flags; -+ command->client_hint = args->client_hint; -+ command->priv_drv_data_size = args->priv_drv_data_size; -+ if (args->priv_drv_data_size) { -+ ret = copy_from_user(command->priv_drv_data, -+ args->priv_drv_data, -+ args->priv_drv_data_size); -+ if (ret) { -+ DXG_ERR("Faled to copy private data"); -+ ret = -EINVAL; -+ goto cleanup; -+ } -+ } -+ /* Input command is returned back as output */ -+ ret = dxgvmb_send_sync_msg(msg.channel, msg.hdr, msg.size, -+ command, cmd_size); -+ if (ret < 0) { -+ goto cleanup; -+ } else { -+ context = command->context; -+ if (args->priv_drv_data_size) { -+ ret = copy_to_user(args->priv_drv_data, -+ command->priv_drv_data, -+ args->priv_drv_data_size); -+ if (ret) { -+ dev_err(DXGDEV, -+ "Faled to copy private data to user"); -+ ret = -EINVAL; -+ dxgvmb_send_destroy_context(adapter, process, -+ context); -+ context.v = 0; -+ } -+ } -+ } -+ -+cleanup: -+ free_message(&msg, process); -+ if (ret) -+ DXG_TRACE("err: %d", ret); -+ return context; -+} -+ -+int dxgvmb_send_destroy_context(struct dxgadapter *adapter, -+ struct dxgprocess *process, -+ struct d3dkmthandle h) -+{ -+ int ret; -+ struct dxgkvmb_command_destroycontext *command; -+ struct dxgvmbusmsg msg = {.hdr = NULL}; -+ -+ ret = init_message(&msg, adapter, process, sizeof(*command)); -+ if (ret) -+ goto cleanup; -+ command = (void *)msg.msg; -+ -+ command_vgpu_to_host_init2(&command->hdr, -+ DXGK_VMBCOMMAND_DESTROYCONTEXT, -+ process->host_handle); -+ command->context = h; -+ - ret = dxgvmb_send_sync_msg_ntstatus(msg.channel, msg.hdr, msg.size); - cleanup: - free_message(&msg, process); -diff --git a/drivers/hv/dxgkrnl/dxgvmbus.h b/drivers/hv/dxgkrnl/dxgvmbus.h -index 111111111111..222222222222 100644 ---- a/drivers/hv/dxgkrnl/dxgvmbus.h -+++ b/drivers/hv/dxgkrnl/dxgvmbus.h -@@ -269,4 +269,22 @@ struct dxgkvmb_command_flushdevice { - enum dxgdevice_flushschedulerreason reason; - }; - -+struct dxgkvmb_command_createcontextvirtual { -+ struct dxgkvmb_command_vgpu_to_host hdr; -+ struct d3dkmthandle context; -+ struct d3dkmthandle device; -+ u32 node_ordinal; -+ u32 engine_affinity; -+ struct d3dddi_createcontextflags flags; -+ enum d3dkmt_clienthint client_hint; -+ u32 priv_drv_data_size; -+ u8 priv_drv_data[1]; -+}; -+ -+/* The command returns ntstatus */ -+struct dxgkvmb_command_destroycontext { -+ struct dxgkvmb_command_vgpu_to_host hdr; -+ struct d3dkmthandle context; -+}; -+ - #endif /* _DXGVMBUS_H */ -diff --git a/drivers/hv/dxgkrnl/ioctl.c b/drivers/hv/dxgkrnl/ioctl.c -index 111111111111..222222222222 100644 ---- a/drivers/hv/dxgkrnl/ioctl.c -+++ b/drivers/hv/dxgkrnl/ioctl.c -@@ -550,13 +550,177 @@ dxgkio_destroy_device(struct dxgprocess *process, void *__user inargs) - return ret; - } - -+static int -+dxgkio_create_context_virtual(struct dxgprocess *process, void *__user inargs) -+{ -+ struct d3dkmt_createcontextvirtual args; -+ int ret; -+ struct dxgadapter *adapter = NULL; -+ struct dxgdevice *device = NULL; -+ struct dxgcontext *context = NULL; -+ struct d3dkmthandle host_context_handle = {}; -+ bool device_lock_acquired = false; -+ -+ ret = copy_from_user(&args, inargs, sizeof(args)); -+ if (ret) { -+ DXG_ERR("failed to copy input args"); -+ ret = -EINVAL; -+ goto cleanup; -+ } -+ -+ /* -+ * The call acquires reference on the device. It is safe to access the -+ * adapter, because the device holds reference on it. -+ */ -+ device = dxgprocess_device_by_handle(process, args.device); -+ if (device == NULL) { -+ ret = -EINVAL; -+ goto cleanup; -+ } -+ -+ ret = dxgdevice_acquire_lock_shared(device); -+ if (ret < 0) -+ goto cleanup; -+ -+ device_lock_acquired = true; -+ -+ adapter = device->adapter; -+ ret = dxgadapter_acquire_lock_shared(adapter); -+ if (ret < 0) { -+ adapter = NULL; -+ goto cleanup; -+ } -+ -+ context = dxgcontext_create(device); -+ if (context == NULL) { -+ ret = -ENOMEM; -+ goto cleanup; -+ } -+ -+ host_context_handle = dxgvmb_send_create_context(adapter, -+ process, &args); -+ if (host_context_handle.v) { -+ hmgrtable_lock(&process->handle_table, DXGLOCK_EXCL); -+ ret = hmgrtable_assign_handle(&process->handle_table, context, -+ HMGRENTRY_TYPE_DXGCONTEXT, -+ host_context_handle); -+ if (ret >= 0) -+ context->handle = host_context_handle; -+ hmgrtable_unlock(&process->handle_table, DXGLOCK_EXCL); -+ if (ret < 0) -+ goto cleanup; -+ ret = copy_to_user(&((struct d3dkmt_createcontextvirtual *) -+ inargs)->context, &host_context_handle, -+ sizeof(struct d3dkmthandle)); -+ if (ret) { -+ DXG_ERR("failed to copy context handle"); -+ ret = -EINVAL; -+ } -+ } else { -+ DXG_ERR("invalid host handle"); -+ ret = -EINVAL; -+ } -+ -+cleanup: -+ -+ if (ret < 0) { -+ if (host_context_handle.v) { -+ dxgvmb_send_destroy_context(adapter, process, -+ host_context_handle); -+ } -+ if (context) -+ dxgcontext_destroy_safe(process, context); -+ } -+ -+ if (adapter) -+ dxgadapter_release_lock_shared(adapter); -+ -+ if (device) { -+ if (device_lock_acquired) -+ dxgdevice_release_lock_shared(device); -+ kref_put(&device->device_kref, dxgdevice_release); -+ } -+ -+ DXG_TRACE("ioctl:%s %d", errorstr(ret), ret); -+ return ret; -+} -+ -+static int -+dxgkio_destroy_context(struct dxgprocess *process, void *__user inargs) -+{ -+ struct d3dkmt_destroycontext args; -+ int ret; -+ struct dxgadapter *adapter = NULL; -+ struct dxgcontext *context = NULL; -+ struct dxgdevice *device = NULL; -+ struct d3dkmthandle device_handle = {}; -+ -+ ret = copy_from_user(&args, inargs, sizeof(args)); -+ if (ret) { -+ DXG_ERR("failed to copy input args"); -+ ret = -EINVAL; -+ goto cleanup; -+ } -+ -+ hmgrtable_lock(&process->handle_table, DXGLOCK_EXCL); -+ context = hmgrtable_get_object_by_type(&process->handle_table, -+ HMGRENTRY_TYPE_DXGCONTEXT, -+ args.context); -+ if (context) { -+ hmgrtable_free_handle(&process->handle_table, -+ HMGRENTRY_TYPE_DXGCONTEXT, args.context); -+ context->handle.v = 0; -+ device_handle = context->device_handle; -+ context->object_state = DXGOBJECTSTATE_DESTROYED; -+ } -+ hmgrtable_unlock(&process->handle_table, DXGLOCK_EXCL); -+ -+ if (context == NULL) { -+ DXG_ERR("invalid context handle: %x", args.context.v); -+ ret = -EINVAL; -+ goto cleanup; -+ } -+ -+ /* -+ * The call acquires reference on the device. It is safe to access the -+ * adapter, because the device holds reference on it. -+ */ -+ device = dxgprocess_device_by_handle(process, device_handle); -+ if (device == NULL) { -+ ret = -EINVAL; -+ goto cleanup; -+ } -+ -+ adapter = device->adapter; -+ ret = dxgadapter_acquire_lock_shared(adapter); -+ if (ret < 0) { -+ adapter = NULL; -+ goto cleanup; -+ } -+ -+ ret = dxgvmb_send_destroy_context(adapter, process, args.context); -+ -+ dxgcontext_destroy_safe(process, context); -+ -+cleanup: -+ -+ if (adapter) -+ dxgadapter_release_lock_shared(adapter); -+ -+ if (device) -+ kref_put(&device->device_kref, dxgdevice_release); -+ -+ DXG_TRACE("ioctl:%s %s %d", errorstr(ret), __func__, ret); -+ return ret; -+} -+ - static struct ioctl_desc ioctls[] = { - /* 0x00 */ {}, - /* 0x01 */ {dxgkio_open_adapter_from_luid, LX_DXOPENADAPTERFROMLUID}, - /* 0x02 */ {dxgkio_create_device, LX_DXCREATEDEVICE}, - /* 0x03 */ {}, --/* 0x04 */ {}, --/* 0x05 */ {}, -+/* 0x04 */ {dxgkio_create_context_virtual, LX_DXCREATECONTEXTVIRTUAL}, -+/* 0x05 */ {dxgkio_destroy_context, LX_DXDESTROYCONTEXT}, - /* 0x06 */ {}, - /* 0x07 */ {}, - /* 0x08 */ {}, -diff --git a/drivers/hv/dxgkrnl/misc.h b/drivers/hv/dxgkrnl/misc.h -index 111111111111..222222222222 100644 ---- a/drivers/hv/dxgkrnl/misc.h -+++ b/drivers/hv/dxgkrnl/misc.h -@@ -29,6 +29,7 @@ extern const struct d3dkmthandle zerohandle; - * fd_mutex - * plistmutex (process list mutex) - * table_lock (handle table lock) -+ * context_list_lock - * core_lock (dxgadapter lock) - * device_lock (dxgdevice lock) - * process_adapter_mutex -diff --git a/include/uapi/misc/d3dkmthk.h b/include/uapi/misc/d3dkmthk.h -index 111111111111..222222222222 100644 ---- a/include/uapi/misc/d3dkmthk.h -+++ b/include/uapi/misc/d3dkmthk.h -@@ -154,6 +154,49 @@ struct d3dkmt_destroydevice { - struct d3dkmthandle device; - }; - -+enum d3dkmt_clienthint { -+ _D3DKMT_CLIENTHNT_UNKNOWN = 0, -+ _D3DKMT_CLIENTHINT_OPENGL = 1, -+ _D3DKMT_CLIENTHINT_CDD = 2, -+ _D3DKMT_CLIENTHINT_DX7 = 7, -+ _D3DKMT_CLIENTHINT_DX8 = 8, -+ _D3DKMT_CLIENTHINT_DX9 = 9, -+ _D3DKMT_CLIENTHINT_DX10 = 10, -+}; -+ -+struct d3dddi_createcontextflags { -+ union { -+ struct { -+ __u32 null_rendering:1; -+ __u32 initial_data:1; -+ __u32 disable_gpu_timeout:1; -+ __u32 synchronization_only:1; -+ __u32 hw_queue_supported:1; -+ __u32 reserved:27; -+ }; -+ __u32 value; -+ }; -+}; -+ -+struct d3dkmt_destroycontext { -+ struct d3dkmthandle context; -+}; -+ -+struct d3dkmt_createcontextvirtual { -+ struct d3dkmthandle device; -+ __u32 node_ordinal; -+ __u32 engine_affinity; -+ struct d3dddi_createcontextflags flags; -+#ifdef __KERNEL__ -+ void *priv_drv_data; -+#else -+ __u64 priv_drv_data; -+#endif -+ __u32 priv_drv_data_size; -+ enum d3dkmt_clienthint client_hint; -+ struct d3dkmthandle context; -+}; -+ - struct d3dkmt_adaptertype { - union { - struct { -@@ -232,6 +275,10 @@ struct d3dkmt_enumadapters3 { - _IOWR(0x47, 0x01, struct d3dkmt_openadapterfromluid) - #define LX_DXCREATEDEVICE \ - _IOWR(0x47, 0x02, struct d3dkmt_createdevice) -+#define LX_DXCREATECONTEXTVIRTUAL \ -+ _IOWR(0x47, 0x04, struct d3dkmt_createcontextvirtual) -+#define LX_DXDESTROYCONTEXT \ -+ _IOWR(0x47, 0x05, struct d3dkmt_destroycontext) - #define LX_DXQUERYADAPTERINFO \ - _IOWR(0x47, 0x09, struct d3dkmt_queryadapterinfo) - #define LX_DXENUMADAPTERS2 \ --- -Armbian - diff --git a/patch/kernel/archive/wsl2-arm64-6.6/1675-drivers-hv-dxgkrnl-Creation-of-compute-device-allocations-and-resources.patch b/patch/kernel/archive/wsl2-arm64-6.6/1675-drivers-hv-dxgkrnl-Creation-of-compute-device-allocations-and-resources.patch deleted file mode 100644 index 9e0f2e4175a6..000000000000 --- a/patch/kernel/archive/wsl2-arm64-6.6/1675-drivers-hv-dxgkrnl-Creation-of-compute-device-allocations-and-resources.patch +++ /dev/null @@ -1,2263 +0,0 @@ -From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001 -From: Iouri Tarassov -Date: Tue, 1 Feb 2022 15:37:52 -0800 -Subject: drivers: hv: dxgkrnl: Creation of compute device allocations and - resources - -Implemented ioctls to create and destroy virtual compute device -allocations (dxgallocation) and resources (dxgresource): - - the LX_DXCREATEALLOCATION ioctl, - - the LX_DXDESTROYALLOCATION2 ioctl. - -Compute device allocations (dxgallocation objects) represent memory -allocation, which could be accessible by the device. Allocations can -be created around existing system memory (provided by an application) -or memory, allocated by dxgkrnl on the host. - -Compute device resources (dxgresource objects) represent containers of -compute device allocations. Allocations could be dynamically added, -removed from a resource. - -Each allocation/resource has associated driver private data, which -is provided during creation. - -Each created resource or allocation have a handle (d3dkmthandle), -which is used to reference the corresponding object in other ioctls. - -A dxgallocation can be resident (meaning that it is accessible by -the compute device) or evicted. When an allocation is evicted, -its content is stored in the backing store in system memory. - -Signed-off-by: Iouri Tarassov -[kms: Forward port to v6.1] -Signed-off-by: Kelsey Steele ---- - drivers/hv/dxgkrnl/dxgadapter.c | 282 ++++ - drivers/hv/dxgkrnl/dxgkrnl.h | 113 ++ - drivers/hv/dxgkrnl/dxgmodule.c | 1 + - drivers/hv/dxgkrnl/dxgvmbus.c | 649 ++++++++++ - drivers/hv/dxgkrnl/dxgvmbus.h | 123 ++ - drivers/hv/dxgkrnl/ioctl.c | 631 ++++++++- - drivers/hv/dxgkrnl/misc.h | 3 + - include/uapi/misc/d3dkmthk.h | 204 +++ - 8 files changed, 2004 insertions(+), 2 deletions(-) - -diff --git a/drivers/hv/dxgkrnl/dxgadapter.c b/drivers/hv/dxgkrnl/dxgadapter.c -index 111111111111..222222222222 100644 ---- a/drivers/hv/dxgkrnl/dxgadapter.c -+++ b/drivers/hv/dxgkrnl/dxgadapter.c -@@ -207,8 +207,11 @@ struct dxgdevice *dxgdevice_create(struct dxgadapter *adapter, - device->process = process; - kref_get(&adapter->adapter_kref); - INIT_LIST_HEAD(&device->context_list_head); -+ INIT_LIST_HEAD(&device->alloc_list_head); -+ INIT_LIST_HEAD(&device->resource_list_head); - init_rwsem(&device->device_lock); - init_rwsem(&device->context_list_lock); -+ init_rwsem(&device->alloc_list_lock); - INIT_LIST_HEAD(&device->pqueue_list_head); - device->object_state = DXGOBJECTSTATE_CREATED; - device->execution_state = _D3DKMT_DEVICEEXECUTION_ACTIVE; -@@ -224,6 +227,14 @@ struct dxgdevice *dxgdevice_create(struct dxgadapter *adapter, - - void dxgdevice_stop(struct dxgdevice *device) - { -+ struct dxgallocation *alloc; -+ -+ DXG_TRACE("Destroying device: %p", device); -+ dxgdevice_acquire_alloc_list_lock(device); -+ list_for_each_entry(alloc, &device->alloc_list_head, alloc_list_entry) { -+ dxgallocation_stop(alloc); -+ } -+ dxgdevice_release_alloc_list_lock(device); - } - - void dxgdevice_mark_destroyed(struct dxgdevice *device) -@@ -250,6 +261,33 @@ void dxgdevice_destroy(struct dxgdevice *device) - - dxgdevice_stop(device); - -+ dxgdevice_acquire_alloc_list_lock(device); -+ -+ { -+ struct dxgallocation *alloc; -+ struct dxgallocation *tmp; -+ -+ DXG_TRACE("destroying allocations"); -+ list_for_each_entry_safe(alloc, tmp, &device->alloc_list_head, -+ alloc_list_entry) { -+ dxgallocation_destroy(alloc); -+ } -+ } -+ -+ { -+ struct dxgresource *resource; -+ struct dxgresource *tmp; -+ -+ DXG_TRACE("destroying resources"); -+ list_for_each_entry_safe(resource, tmp, -+ &device->resource_list_head, -+ resource_list_entry) { -+ dxgresource_destroy(resource); -+ } -+ } -+ -+ dxgdevice_release_alloc_list_lock(device); -+ - { - struct dxgcontext *context; - struct dxgcontext *tmp; -@@ -328,6 +366,26 @@ void dxgdevice_release_context_list_lock(struct dxgdevice *device) - up_write(&device->context_list_lock); - } - -+void dxgdevice_acquire_alloc_list_lock(struct dxgdevice *device) -+{ -+ down_write(&device->alloc_list_lock); -+} -+ -+void dxgdevice_release_alloc_list_lock(struct dxgdevice *device) -+{ -+ up_write(&device->alloc_list_lock); -+} -+ -+void dxgdevice_acquire_alloc_list_lock_shared(struct dxgdevice *device) -+{ -+ down_read(&device->alloc_list_lock); -+} -+ -+void dxgdevice_release_alloc_list_lock_shared(struct dxgdevice *device) -+{ -+ up_read(&device->alloc_list_lock); -+} -+ - void dxgdevice_add_context(struct dxgdevice *device, struct dxgcontext *context) - { - down_write(&device->context_list_lock); -@@ -344,6 +402,161 @@ void dxgdevice_remove_context(struct dxgdevice *device, - } - } - -+void dxgdevice_add_alloc(struct dxgdevice *device, struct dxgallocation *alloc) -+{ -+ dxgdevice_acquire_alloc_list_lock(device); -+ list_add_tail(&alloc->alloc_list_entry, &device->alloc_list_head); -+ kref_get(&device->device_kref); -+ alloc->owner.device = device; -+ dxgdevice_release_alloc_list_lock(device); -+} -+ -+void dxgdevice_remove_alloc(struct dxgdevice *device, -+ struct dxgallocation *alloc) -+{ -+ if (alloc->alloc_list_entry.next) { -+ list_del(&alloc->alloc_list_entry); -+ alloc->alloc_list_entry.next = NULL; -+ kref_put(&device->device_kref, dxgdevice_release); -+ } -+} -+ -+void dxgdevice_remove_alloc_safe(struct dxgdevice *device, -+ struct dxgallocation *alloc) -+{ -+ dxgdevice_acquire_alloc_list_lock(device); -+ dxgdevice_remove_alloc(device, alloc); -+ dxgdevice_release_alloc_list_lock(device); -+} -+ -+void dxgdevice_add_resource(struct dxgdevice *device, struct dxgresource *res) -+{ -+ dxgdevice_acquire_alloc_list_lock(device); -+ list_add_tail(&res->resource_list_entry, &device->resource_list_head); -+ kref_get(&device->device_kref); -+ dxgdevice_release_alloc_list_lock(device); -+} -+ -+void dxgdevice_remove_resource(struct dxgdevice *device, -+ struct dxgresource *res) -+{ -+ if (res->resource_list_entry.next) { -+ list_del(&res->resource_list_entry); -+ res->resource_list_entry.next = NULL; -+ kref_put(&device->device_kref, dxgdevice_release); -+ } -+} -+ -+struct dxgresource *dxgresource_create(struct dxgdevice *device) -+{ -+ struct dxgresource *resource; -+ -+ resource = kzalloc(sizeof(struct dxgresource), GFP_KERNEL); -+ if (resource) { -+ kref_init(&resource->resource_kref); -+ resource->device = device; -+ resource->process = device->process; -+ resource->object_state = DXGOBJECTSTATE_ACTIVE; -+ mutex_init(&resource->resource_mutex); -+ INIT_LIST_HEAD(&resource->alloc_list_head); -+ dxgdevice_add_resource(device, resource); -+ } -+ return resource; -+} -+ -+void dxgresource_free_handle(struct dxgresource *resource) -+{ -+ struct dxgallocation *alloc; -+ struct dxgprocess *process; -+ -+ if (resource->handle_valid) { -+ process = resource->device->process; -+ hmgrtable_free_handle_safe(&process->handle_table, -+ HMGRENTRY_TYPE_DXGRESOURCE, -+ resource->handle); -+ resource->handle_valid = 0; -+ } -+ list_for_each_entry(alloc, &resource->alloc_list_head, -+ alloc_list_entry) { -+ dxgallocation_free_handle(alloc); -+ } -+} -+ -+void dxgresource_destroy(struct dxgresource *resource) -+{ -+ /* device->alloc_list_lock is held */ -+ struct dxgallocation *alloc; -+ struct dxgallocation *tmp; -+ struct d3dkmt_destroyallocation2 args = { }; -+ int destroyed = test_and_set_bit(0, &resource->flags); -+ struct dxgdevice *device = resource->device; -+ -+ if (!destroyed) { -+ dxgresource_free_handle(resource); -+ if (resource->handle.v) { -+ args.device = device->handle; -+ args.resource = resource->handle; -+ dxgvmb_send_destroy_allocation(device->process, -+ device, &args, NULL); -+ resource->handle.v = 0; -+ } -+ list_for_each_entry_safe(alloc, tmp, &resource->alloc_list_head, -+ alloc_list_entry) { -+ dxgallocation_destroy(alloc); -+ } -+ dxgdevice_remove_resource(device, resource); -+ } -+ kref_put(&resource->resource_kref, dxgresource_release); -+} -+ -+void dxgresource_release(struct kref *refcount) -+{ -+ struct dxgresource *resource; -+ -+ resource = container_of(refcount, struct dxgresource, resource_kref); -+ kfree(resource); -+} -+ -+bool dxgresource_is_active(struct dxgresource *resource) -+{ -+ return resource->object_state == DXGOBJECTSTATE_ACTIVE; -+} -+ -+int dxgresource_add_alloc(struct dxgresource *resource, -+ struct dxgallocation *alloc) -+{ -+ int ret = -ENODEV; -+ struct dxgdevice *device = resource->device; -+ -+ dxgdevice_acquire_alloc_list_lock(device); -+ if (dxgresource_is_active(resource)) { -+ list_add_tail(&alloc->alloc_list_entry, -+ &resource->alloc_list_head); -+ alloc->owner.resource = resource; -+ ret = 0; -+ } -+ alloc->resource_owner = 1; -+ dxgdevice_release_alloc_list_lock(device); -+ return ret; -+} -+ -+void dxgresource_remove_alloc(struct dxgresource *resource, -+ struct dxgallocation *alloc) -+{ -+ if (alloc->alloc_list_entry.next) { -+ list_del(&alloc->alloc_list_entry); -+ alloc->alloc_list_entry.next = NULL; -+ } -+} -+ -+void dxgresource_remove_alloc_safe(struct dxgresource *resource, -+ struct dxgallocation *alloc) -+{ -+ dxgdevice_acquire_alloc_list_lock(resource->device); -+ dxgresource_remove_alloc(resource, alloc); -+ dxgdevice_release_alloc_list_lock(resource->device); -+} -+ - void dxgdevice_release(struct kref *refcount) - { - struct dxgdevice *device; -@@ -413,6 +626,75 @@ void dxgcontext_release(struct kref *refcount) - kfree(context); - } - -+struct dxgallocation *dxgallocation_create(struct dxgprocess *process) -+{ -+ struct dxgallocation *alloc; -+ -+ alloc = kzalloc(sizeof(struct dxgallocation), GFP_KERNEL); -+ if (alloc) -+ alloc->process = process; -+ return alloc; -+} -+ -+void dxgallocation_stop(struct dxgallocation *alloc) -+{ -+ if (alloc->pages) { -+ release_pages(alloc->pages, alloc->num_pages); -+ vfree(alloc->pages); -+ alloc->pages = NULL; -+ } -+} -+ -+void dxgallocation_free_handle(struct dxgallocation *alloc) -+{ -+ dxgprocess_ht_lock_exclusive_down(alloc->process); -+ if (alloc->handle_valid) { -+ hmgrtable_free_handle(&alloc->process->handle_table, -+ HMGRENTRY_TYPE_DXGALLOCATION, -+ alloc->alloc_handle); -+ alloc->handle_valid = 0; -+ } -+ dxgprocess_ht_lock_exclusive_up(alloc->process); -+} -+ -+void dxgallocation_destroy(struct dxgallocation *alloc) -+{ -+ struct dxgprocess *process = alloc->process; -+ struct d3dkmt_destroyallocation2 args = { }; -+ -+ dxgallocation_stop(alloc); -+ if (alloc->resource_owner) -+ dxgresource_remove_alloc(alloc->owner.resource, alloc); -+ else if (alloc->owner.device) -+ dxgdevice_remove_alloc(alloc->owner.device, alloc); -+ dxgallocation_free_handle(alloc); -+ if (alloc->alloc_handle.v && !alloc->resource_owner) { -+ args.device = alloc->owner.device->handle; -+ args.alloc_count = 1; -+ dxgvmb_send_destroy_allocation(process, -+ alloc->owner.device, -+ &args, &alloc->alloc_handle); -+ } -+#ifdef _MAIN_KERNEL_ -+ if (alloc->gpadl.gpadl_handle) { -+ DXG_TRACE("Teardown gpadl %d", -+ alloc->gpadl.gpadl_handle); -+ vmbus_teardown_gpadl(dxgglobal_get_vmbus(), &alloc->gpadl); -+ alloc->gpadl.gpadl_handle = 0; -+ } -+else -+ if (alloc->gpadl) { -+ DXG_TRACE("Teardown gpadl %d", -+ alloc->gpadl); -+ vmbus_teardown_gpadl(dxgglobal_get_vmbus(), alloc->gpadl); -+ alloc->gpadl = 0; -+ } -+#endif -+ if (alloc->priv_drv_data) -+ vfree(alloc->priv_drv_data); -+ kfree(alloc); -+} -+ - struct dxgprocess_adapter *dxgprocess_adapter_create(struct dxgprocess *process, - struct dxgadapter *adapter) - { -diff --git a/drivers/hv/dxgkrnl/dxgkrnl.h b/drivers/hv/dxgkrnl/dxgkrnl.h -index 111111111111..222222222222 100644 ---- a/drivers/hv/dxgkrnl/dxgkrnl.h -+++ b/drivers/hv/dxgkrnl/dxgkrnl.h -@@ -36,6 +36,8 @@ struct dxgprocess; - struct dxgadapter; - struct dxgdevice; - struct dxgcontext; -+struct dxgallocation; -+struct dxgresource; - - /* - * Driver private data. -@@ -269,6 +271,8 @@ struct dxgadapter { - struct list_head adapter_list_entry; - /* The list of dxgprocess_adapter entries */ - struct list_head adapter_process_list_head; -+ /* This lock protects shared resource and syncobject lists */ -+ struct rw_semaphore shared_resource_list_lock; - struct pci_dev *pci_dev; - struct hv_device *hv_dev; - struct dxgvmbuschannel channel; -@@ -315,6 +319,10 @@ struct dxgdevice { - struct rw_semaphore device_lock; - struct rw_semaphore context_list_lock; - struct list_head context_list_head; -+ /* List of device allocations */ -+ struct rw_semaphore alloc_list_lock; -+ struct list_head alloc_list_head; -+ struct list_head resource_list_head; - /* List of paging queues. Protected by process handle table lock. */ - struct list_head pqueue_list_head; - struct d3dkmthandle handle; -@@ -331,9 +339,19 @@ void dxgdevice_release_lock_shared(struct dxgdevice *dev); - void dxgdevice_release(struct kref *refcount); - void dxgdevice_add_context(struct dxgdevice *dev, struct dxgcontext *ctx); - void dxgdevice_remove_context(struct dxgdevice *dev, struct dxgcontext *ctx); -+void dxgdevice_add_alloc(struct dxgdevice *dev, struct dxgallocation *a); -+void dxgdevice_remove_alloc(struct dxgdevice *dev, struct dxgallocation *a); -+void dxgdevice_remove_alloc_safe(struct dxgdevice *dev, -+ struct dxgallocation *a); -+void dxgdevice_add_resource(struct dxgdevice *dev, struct dxgresource *res); -+void dxgdevice_remove_resource(struct dxgdevice *dev, struct dxgresource *res); - bool dxgdevice_is_active(struct dxgdevice *dev); - void dxgdevice_acquire_context_list_lock(struct dxgdevice *dev); - void dxgdevice_release_context_list_lock(struct dxgdevice *dev); -+void dxgdevice_acquire_alloc_list_lock(struct dxgdevice *dev); -+void dxgdevice_release_alloc_list_lock(struct dxgdevice *dev); -+void dxgdevice_acquire_alloc_list_lock_shared(struct dxgdevice *dev); -+void dxgdevice_release_alloc_list_lock_shared(struct dxgdevice *dev); - - /* - * The object represent the execution context of a device. -@@ -357,6 +375,83 @@ void dxgcontext_destroy_safe(struct dxgprocess *pr, struct dxgcontext *ctx); - void dxgcontext_release(struct kref *refcount); - bool dxgcontext_is_active(struct dxgcontext *ctx); - -+struct dxgresource { -+ struct kref resource_kref; -+ enum dxgobjectstate object_state; -+ struct d3dkmthandle handle; -+ struct list_head alloc_list_head; -+ struct list_head resource_list_entry; -+ struct list_head shared_resource_list_entry; -+ struct dxgdevice *device; -+ struct dxgprocess *process; -+ /* Protects adding allocations to resource and resource destruction */ -+ struct mutex resource_mutex; -+ u64 private_runtime_handle; -+ union { -+ struct { -+ u32 destroyed:1; /* Must be the first */ -+ u32 handle_valid:1; -+ u32 reserved:30; -+ }; -+ long flags; -+ }; -+}; -+ -+struct dxgresource *dxgresource_create(struct dxgdevice *dev); -+void dxgresource_destroy(struct dxgresource *res); -+void dxgresource_free_handle(struct dxgresource *res); -+void dxgresource_release(struct kref *refcount); -+int dxgresource_add_alloc(struct dxgresource *res, -+ struct dxgallocation *a); -+void dxgresource_remove_alloc(struct dxgresource *res, struct dxgallocation *a); -+void dxgresource_remove_alloc_safe(struct dxgresource *res, -+ struct dxgallocation *a); -+bool dxgresource_is_active(struct dxgresource *res); -+ -+struct privdata { -+ u32 data_size; -+ u8 data[1]; -+}; -+ -+struct dxgallocation { -+ /* Entry in the device list or resource list (when resource exists) */ -+ struct list_head alloc_list_entry; -+ /* Allocation owner */ -+ union { -+ struct dxgdevice *device; -+ struct dxgresource *resource; -+ } owner; -+ struct dxgprocess *process; -+ /* Pointer to private driver data desc. Used for shared resources */ -+ struct privdata *priv_drv_data; -+ struct d3dkmthandle alloc_handle; -+ /* Set to 1 when allocation belongs to resource. */ -+ u32 resource_owner:1; -+ /* Set to 1 when the allocatio is mapped as cached */ -+ u32 cached:1; -+ u32 handle_valid:1; -+ /* GPADL address list for existing sysmem allocations */ -+#ifdef _MAIN_KERNEL_ -+ struct vmbus_gpadl gpadl; -+#else -+ u32 gpadl; -+#endif -+ /* Number of pages in the 'pages' array */ -+ u32 num_pages; -+ /* -+ * CPU address from the existing sysmem allocation, or -+ * mapped to the CPU visible backing store in the IO space -+ */ -+ void *cpu_address; -+ /* Describes pages for the existing sysmem allocation */ -+ struct page **pages; -+}; -+ -+struct dxgallocation *dxgallocation_create(struct dxgprocess *process); -+void dxgallocation_stop(struct dxgallocation *a); -+void dxgallocation_destroy(struct dxgallocation *a); -+void dxgallocation_free_handle(struct dxgallocation *a); -+ - long dxgk_compat_ioctl(struct file *f, unsigned int p1, unsigned long p2); - long dxgk_unlocked_ioctl(struct file *f, unsigned int p1, unsigned long p2); - -@@ -409,9 +504,27 @@ dxgvmb_send_create_context(struct dxgadapter *adapter, - int dxgvmb_send_destroy_context(struct dxgadapter *adapter, - struct dxgprocess *process, - struct d3dkmthandle h); -+int dxgvmb_send_create_allocation(struct dxgprocess *pr, struct dxgdevice *dev, -+ struct d3dkmt_createallocation *args, -+ struct d3dkmt_createallocation *__user inargs, -+ struct dxgresource *res, -+ struct dxgallocation **allocs, -+ struct d3dddi_allocationinfo2 *alloc_info, -+ struct d3dkmt_createstandardallocation *stda); -+int dxgvmb_send_destroy_allocation(struct dxgprocess *pr, struct dxgdevice *dev, -+ struct d3dkmt_destroyallocation2 *args, -+ struct d3dkmthandle *alloc_handles); - int dxgvmb_send_query_adapter_info(struct dxgprocess *process, - struct dxgadapter *adapter, - struct d3dkmt_queryadapterinfo *args); -+int dxgvmb_send_get_stdalloc_data(struct dxgdevice *device, -+ enum d3dkmdt_standardallocationtype t, -+ struct d3dkmdt_gdisurfacedata *data, -+ u32 physical_adapter_index, -+ u32 *alloc_priv_driver_size, -+ void *prive_alloc_data, -+ u32 *res_priv_data_size, -+ void *priv_res_data); - int dxgvmb_send_async_msg(struct dxgvmbuschannel *channel, - void *command, - u32 cmd_size); -diff --git a/drivers/hv/dxgkrnl/dxgmodule.c b/drivers/hv/dxgkrnl/dxgmodule.c -index 111111111111..222222222222 100644 ---- a/drivers/hv/dxgkrnl/dxgmodule.c -+++ b/drivers/hv/dxgkrnl/dxgmodule.c -@@ -162,6 +162,7 @@ int dxgglobal_create_adapter(struct pci_dev *dev, guid_t *guid, - init_rwsem(&adapter->core_lock); - - INIT_LIST_HEAD(&adapter->adapter_process_list_head); -+ init_rwsem(&adapter->shared_resource_list_lock); - adapter->pci_dev = dev; - guid_to_luid(guid, &adapter->luid); - -diff --git a/drivers/hv/dxgkrnl/dxgvmbus.c b/drivers/hv/dxgkrnl/dxgvmbus.c -index 111111111111..222222222222 100644 ---- a/drivers/hv/dxgkrnl/dxgvmbus.c -+++ b/drivers/hv/dxgkrnl/dxgvmbus.c -@@ -111,6 +111,41 @@ static int init_message(struct dxgvmbusmsg *msg, struct dxgadapter *adapter, - return 0; - } - -+static int init_message_res(struct dxgvmbusmsgres *msg, -+ struct dxgadapter *adapter, -+ struct dxgprocess *process, -+ u32 size, -+ u32 result_size) -+{ -+ struct dxgglobal *dxgglobal = dxggbl(); -+ bool use_ext_header = dxgglobal->vmbus_ver >= -+ DXGK_VMBUS_INTERFACE_VERSION; -+ -+ if (use_ext_header) -+ size += sizeof(struct dxgvmb_ext_header); -+ msg->size = size; -+ msg->res_size += (result_size + 7) & ~7; -+ size += msg->res_size; -+ msg->hdr = vzalloc(size); -+ if (msg->hdr == NULL) { -+ DXG_ERR("Failed to allocate VM bus message: %d", size); -+ return -ENOMEM; -+ } -+ if (use_ext_header) { -+ msg->msg = (char *)&msg->hdr[1]; -+ msg->hdr->command_offset = sizeof(msg->hdr[0]); -+ msg->hdr->vgpu_luid = adapter->host_vgpu_luid; -+ } else { -+ msg->msg = (char *)msg->hdr; -+ } -+ msg->res = (char *)msg->hdr + msg->size; -+ if (dxgglobal->async_msg_enabled) -+ msg->channel = &dxgglobal->channel; -+ else -+ msg->channel = &adapter->channel; -+ return 0; -+} -+ - static void free_message(struct dxgvmbusmsg *msg, struct dxgprocess *process) - { - if (msg->hdr && (char *)msg->hdr != msg->msg_on_stack) -@@ -852,6 +887,620 @@ int dxgvmb_send_destroy_context(struct dxgadapter *adapter, - return ret; - } - -+static int -+copy_private_data(struct d3dkmt_createallocation *args, -+ struct dxgkvmb_command_createallocation *command, -+ struct d3dddi_allocationinfo2 *input_alloc_info, -+ struct d3dkmt_createstandardallocation *standard_alloc) -+{ -+ struct dxgkvmb_command_createallocation_allocinfo *alloc_info; -+ struct d3dddi_allocationinfo2 *input_alloc; -+ int ret = 0; -+ int i; -+ u8 *private_data_dest = (u8 *) &command[1] + -+ (args->alloc_count * -+ sizeof(struct dxgkvmb_command_createallocation_allocinfo)); -+ -+ if (args->private_runtime_data_size) { -+ ret = copy_from_user(private_data_dest, -+ args->private_runtime_data, -+ args->private_runtime_data_size); -+ if (ret) { -+ DXG_ERR("failed to copy runtime data"); -+ ret = -EINVAL; -+ goto cleanup; -+ } -+ private_data_dest += args->private_runtime_data_size; -+ } -+ -+ if (args->flags.standard_allocation) { -+ DXG_TRACE("private data offset %d", -+ (u32) (private_data_dest - (u8 *) command)); -+ -+ args->priv_drv_data_size = sizeof(*args->standard_allocation); -+ memcpy(private_data_dest, standard_alloc, -+ sizeof(*standard_alloc)); -+ private_data_dest += args->priv_drv_data_size; -+ } else if (args->priv_drv_data_size) { -+ ret = copy_from_user(private_data_dest, -+ args->priv_drv_data, -+ args->priv_drv_data_size); -+ if (ret) { -+ DXG_ERR("failed to copy private data"); -+ ret = -EINVAL; -+ goto cleanup; -+ } -+ private_data_dest += args->priv_drv_data_size; -+ } -+ -+ alloc_info = (void *)&command[1]; -+ input_alloc = input_alloc_info; -+ if (input_alloc_info[0].sysmem) -+ command->flags.existing_sysmem = 1; -+ for (i = 0; i < args->alloc_count; i++) { -+ alloc_info->flags = input_alloc->flags.value; -+ alloc_info->vidpn_source_id = input_alloc->vidpn_source_id; -+ alloc_info->priv_drv_data_size = -+ input_alloc->priv_drv_data_size; -+ if (input_alloc->priv_drv_data_size) { -+ ret = copy_from_user(private_data_dest, -+ input_alloc->priv_drv_data, -+ input_alloc->priv_drv_data_size); -+ if (ret) { -+ DXG_ERR("failed to copy alloc data"); -+ ret = -EINVAL; -+ goto cleanup; -+ } -+ private_data_dest += input_alloc->priv_drv_data_size; -+ } -+ alloc_info++; -+ input_alloc++; -+ } -+ -+cleanup: -+ if (ret) -+ DXG_TRACE("err: %d", ret); -+ return ret; -+} -+ -+static -+int create_existing_sysmem(struct dxgdevice *device, -+ struct dxgkvmb_command_allocinfo_return *host_alloc, -+ struct dxgallocation *dxgalloc, -+ bool read_only, -+ const void *sysmem) -+{ -+ int ret1 = 0; -+ void *kmem = NULL; -+ int ret = 0; -+ struct dxgkvmb_command_setexistingsysmemstore *set_store_command; -+ u64 alloc_size = host_alloc->allocation_size; -+ u32 npages = alloc_size >> PAGE_SHIFT; -+ struct dxgvmbusmsg msg = {.hdr = NULL}; -+ -+ ret = init_message(&msg, device->adapter, device->process, -+ sizeof(*set_store_command)); -+ if (ret) -+ goto cleanup; -+ set_store_command = (void *)msg.msg; -+ -+ /* -+ * Create a guest physical address list and set it as the allocation -+ * backing store in the host. This is done after creating the host -+ * allocation, because only now the allocation size is known. -+ */ -+ -+ DXG_TRACE("Alloc size: %lld", alloc_size); -+ -+ dxgalloc->cpu_address = (void *)sysmem; -+ dxgalloc->pages = vzalloc(npages * sizeof(void *)); -+ if (dxgalloc->pages == NULL) { -+ DXG_ERR("failed to allocate pages"); -+ ret = -ENOMEM; -+ goto cleanup; -+ } -+ ret1 = get_user_pages_fast((unsigned long)sysmem, npages, !read_only, -+ dxgalloc->pages); -+ if (ret1 != npages) { -+ DXG_ERR("get_user_pages_fast failed: %d", ret1); -+ if (ret1 > 0 && ret1 < npages) -+ release_pages(dxgalloc->pages, ret1); -+ vfree(dxgalloc->pages); -+ dxgalloc->pages = NULL; -+ ret = -ENOMEM; -+ goto cleanup; -+ } -+ kmem = vmap(dxgalloc->pages, npages, VM_MAP, PAGE_KERNEL); -+ if (kmem == NULL) { -+ DXG_ERR("vmap failed"); -+ ret = -ENOMEM; -+ goto cleanup; -+ } -+ ret1 = vmbus_establish_gpadl(dxgglobal_get_vmbus(), kmem, -+ alloc_size, &dxgalloc->gpadl); -+ if (ret1) { -+ DXG_ERR("establish_gpadl failed: %d", ret1); -+ ret = -ENOMEM; -+ goto cleanup; -+ } -+ DXG_TRACE("New gpadl %d", dxgalloc->gpadl.gpadl_handle); -+ -+ command_vgpu_to_host_init2(&set_store_command->hdr, -+ DXGK_VMBCOMMAND_SETEXISTINGSYSMEMSTORE, -+ device->process->host_handle); -+ set_store_command->device = device->handle; -+ set_store_command->device = device->handle; -+ set_store_command->allocation = host_alloc->allocation; -+#ifdef _MAIN_KERNEL_ -+ set_store_command->gpadl = dxgalloc->gpadl.gpadl_handle; -+#else -+ set_store_command->gpadl = dxgalloc->gpadl; -+#endif -+ ret = dxgvmb_send_sync_msg_ntstatus(msg.channel, msg.hdr, msg.size); -+ if (ret < 0) -+ DXG_ERR("failed to set existing store: %x", ret); -+ -+cleanup: -+ if (kmem) -+ vunmap(kmem); -+ free_message(&msg, device->process); -+ if (ret) -+ DXG_TRACE("err: %d", ret); -+ return ret; -+} -+ -+static int -+process_allocation_handles(struct dxgprocess *process, -+ struct dxgdevice *device, -+ struct d3dkmt_createallocation *args, -+ struct dxgkvmb_command_createallocation_return *res, -+ struct dxgallocation **dxgalloc, -+ struct dxgresource *resource) -+{ -+ int ret = 0; -+ int i; -+ -+ hmgrtable_lock(&process->handle_table, DXGLOCK_EXCL); -+ if (args->flags.create_resource) { -+ ret = hmgrtable_assign_handle(&process->handle_table, resource, -+ HMGRENTRY_TYPE_DXGRESOURCE, -+ res->resource); -+ if (ret < 0) { -+ DXG_ERR("failed to assign resource handle %x", -+ res->resource.v); -+ } else { -+ resource->handle = res->resource; -+ resource->handle_valid = 1; -+ } -+ } -+ for (i = 0; i < args->alloc_count; i++) { -+ struct dxgkvmb_command_allocinfo_return *host_alloc; -+ -+ host_alloc = &res->allocation_info[i]; -+ ret = hmgrtable_assign_handle(&process->handle_table, -+ dxgalloc[i], -+ HMGRENTRY_TYPE_DXGALLOCATION, -+ host_alloc->allocation); -+ if (ret < 0) { -+ DXG_ERR("failed assign alloc handle %x %d %d", -+ host_alloc->allocation.v, -+ args->alloc_count, i); -+ break; -+ } -+ dxgalloc[i]->alloc_handle = host_alloc->allocation; -+ dxgalloc[i]->handle_valid = 1; -+ } -+ hmgrtable_unlock(&process->handle_table, DXGLOCK_EXCL); -+ -+ if (ret) -+ DXG_TRACE("err: %d", ret); -+ return ret; -+} -+ -+static int -+create_local_allocations(struct dxgprocess *process, -+ struct dxgdevice *device, -+ struct d3dkmt_createallocation *args, -+ struct d3dkmt_createallocation *__user input_args, -+ struct d3dddi_allocationinfo2 *alloc_info, -+ struct dxgkvmb_command_createallocation_return *result, -+ struct dxgresource *resource, -+ struct dxgallocation **dxgalloc, -+ u32 destroy_buffer_size) -+{ -+ int i; -+ int alloc_count = args->alloc_count; -+ u8 *alloc_private_data = NULL; -+ int ret = 0; -+ int ret1; -+ struct dxgkvmb_command_destroyallocation *destroy_buf; -+ struct dxgvmbusmsg msg = {.hdr = NULL}; -+ -+ ret = init_message(&msg, device->adapter, process, -+ destroy_buffer_size); -+ if (ret) -+ goto cleanup; -+ destroy_buf = (void *)msg.msg; -+ -+ /* Prepare the command to destroy allocation in case of failure */ -+ command_vgpu_to_host_init2(&destroy_buf->hdr, -+ DXGK_VMBCOMMAND_DESTROYALLOCATION, -+ process->host_handle); -+ destroy_buf->device = args->device; -+ destroy_buf->resource = args->resource; -+ destroy_buf->alloc_count = alloc_count; -+ destroy_buf->flags.assume_not_in_use = 1; -+ for (i = 0; i < alloc_count; i++) { -+ DXG_TRACE("host allocation: %d %x", -+ i, result->allocation_info[i].allocation.v); -+ destroy_buf->allocations[i] = -+ result->allocation_info[i].allocation; -+ } -+ -+ if (args->flags.create_resource) { -+ DXG_TRACE("new resource: %x", result->resource.v); -+ ret = copy_to_user(&input_args->resource, &result->resource, -+ sizeof(struct d3dkmthandle)); -+ if (ret) { -+ DXG_ERR("failed to copy resource handle"); -+ ret = -EINVAL; -+ goto cleanup; -+ } -+ } -+ -+ alloc_private_data = (u8 *) result + -+ sizeof(struct dxgkvmb_command_createallocation_return) + -+ sizeof(struct dxgkvmb_command_allocinfo_return) * (alloc_count - 1); -+ -+ for (i = 0; i < alloc_count; i++) { -+ struct dxgkvmb_command_allocinfo_return *host_alloc; -+ struct d3dddi_allocationinfo2 *user_alloc; -+ -+ host_alloc = &result->allocation_info[i]; -+ user_alloc = &alloc_info[i]; -+ dxgalloc[i]->num_pages = -+ host_alloc->allocation_size >> PAGE_SHIFT; -+ if (user_alloc->sysmem) { -+ ret = create_existing_sysmem(device, host_alloc, -+ dxgalloc[i], -+ args->flags.read_only != 0, -+ user_alloc->sysmem); -+ if (ret < 0) -+ goto cleanup; -+ } -+ dxgalloc[i]->cached = host_alloc->allocation_flags.cached; -+ if (host_alloc->priv_drv_data_size) { -+ ret = copy_to_user(user_alloc->priv_drv_data, -+ alloc_private_data, -+ host_alloc->priv_drv_data_size); -+ if (ret) { -+ DXG_ERR("failed to copy private data"); -+ ret = -EINVAL; -+ goto cleanup; -+ } -+ alloc_private_data += host_alloc->priv_drv_data_size; -+ } -+ ret = copy_to_user(&args->allocation_info[i].allocation, -+ &host_alloc->allocation, -+ sizeof(struct d3dkmthandle)); -+ if (ret) { -+ DXG_ERR("failed to copy alloc handle"); -+ ret = -EINVAL; -+ goto cleanup; -+ } -+ } -+ -+ ret = process_allocation_handles(process, device, args, result, -+ dxgalloc, resource); -+ if (ret < 0) -+ goto cleanup; -+ -+ ret = copy_to_user(&input_args->global_share, &args->global_share, -+ sizeof(struct d3dkmthandle)); -+ if (ret) { -+ DXG_ERR("failed to copy global share"); -+ ret = -EINVAL; -+ } -+ -+cleanup: -+ -+ if (ret < 0) { -+ /* Free local handles before freeing the handles in the host */ -+ dxgdevice_acquire_alloc_list_lock(device); -+ if (dxgalloc) -+ for (i = 0; i < alloc_count; i++) -+ if (dxgalloc[i]) -+ dxgallocation_free_handle(dxgalloc[i]); -+ if (resource && args->flags.create_resource) -+ dxgresource_free_handle(resource); -+ dxgdevice_release_alloc_list_lock(device); -+ -+ /* Destroy allocations in the host to unmap gpadls */ -+ ret1 = dxgvmb_send_sync_msg_ntstatus(msg.channel, msg.hdr, -+ msg.size); -+ if (ret1 < 0) -+ DXG_ERR("failed to destroy allocations: %x", -+ ret1); -+ -+ dxgdevice_acquire_alloc_list_lock(device); -+ if (dxgalloc) { -+ for (i = 0; i < alloc_count; i++) { -+ if (dxgalloc[i]) { -+ dxgalloc[i]->alloc_handle.v = 0; -+ dxgallocation_destroy(dxgalloc[i]); -+ dxgalloc[i] = NULL; -+ } -+ } -+ } -+ if (resource && args->flags.create_resource) { -+ /* -+ * Prevent the resource memory from freeing. -+ * It will be freed in the top level function. -+ */ -+ kref_get(&resource->resource_kref); -+ dxgresource_destroy(resource); -+ } -+ dxgdevice_release_alloc_list_lock(device); -+ } -+ -+ free_message(&msg, process); -+ if (ret) -+ DXG_TRACE("err: %d", ret); -+ return ret; -+} -+ -+int dxgvmb_send_create_allocation(struct dxgprocess *process, -+ struct dxgdevice *device, -+ struct d3dkmt_createallocation *args, -+ struct d3dkmt_createallocation *__user -+ input_args, -+ struct dxgresource *resource, -+ struct dxgallocation **dxgalloc, -+ struct d3dddi_allocationinfo2 *alloc_info, -+ struct d3dkmt_createstandardallocation -+ *standard_alloc) -+{ -+ struct dxgkvmb_command_createallocation *command = NULL; -+ struct dxgkvmb_command_createallocation_return *result = NULL; -+ int ret = -EINVAL; -+ int i; -+ u32 result_size = 0; -+ u32 cmd_size = 0; -+ u32 destroy_buffer_size = 0; -+ u32 priv_drv_data_size; -+ struct dxgvmbusmsg msg = {.hdr = NULL}; -+ -+ if (args->private_runtime_data_size >= DXG_MAX_VM_BUS_PACKET_SIZE || -+ args->priv_drv_data_size >= DXG_MAX_VM_BUS_PACKET_SIZE) { -+ ret = -EOVERFLOW; -+ goto cleanup; -+ } -+ -+ /* -+ * Preallocate the buffer, which will be used for destruction in case -+ * of a failure -+ */ -+ destroy_buffer_size = sizeof(struct dxgkvmb_command_destroyallocation) + -+ args->alloc_count * sizeof(struct d3dkmthandle); -+ -+ /* Compute the total private driver size */ -+ -+ priv_drv_data_size = 0; -+ -+ for (i = 0; i < args->alloc_count; i++) { -+ if (alloc_info[i].priv_drv_data_size >= -+ DXG_MAX_VM_BUS_PACKET_SIZE) { -+ ret = -EOVERFLOW; -+ goto cleanup; -+ } else { -+ priv_drv_data_size += alloc_info[i].priv_drv_data_size; -+ } -+ if (priv_drv_data_size >= DXG_MAX_VM_BUS_PACKET_SIZE) { -+ ret = -EOVERFLOW; -+ goto cleanup; -+ } -+ } -+ -+ /* -+ * Private driver data for the result includes only per allocation -+ * private data -+ */ -+ result_size = sizeof(struct dxgkvmb_command_createallocation_return) + -+ (args->alloc_count - 1) * -+ sizeof(struct dxgkvmb_command_allocinfo_return) + -+ priv_drv_data_size; -+ result = vzalloc(result_size); -+ if (result == NULL) { -+ ret = -ENOMEM; -+ goto cleanup; -+ } -+ -+ /* Private drv data for the command includes the global private data */ -+ priv_drv_data_size += args->priv_drv_data_size; -+ -+ cmd_size = sizeof(struct dxgkvmb_command_createallocation) + -+ args->alloc_count * -+ sizeof(struct dxgkvmb_command_createallocation_allocinfo) + -+ args->private_runtime_data_size + priv_drv_data_size; -+ if (cmd_size > DXG_MAX_VM_BUS_PACKET_SIZE) { -+ ret = -EOVERFLOW; -+ goto cleanup; -+ } -+ -+ DXG_TRACE("command size, driver_data_size %d %d %ld %ld", -+ cmd_size, priv_drv_data_size, -+ sizeof(struct dxgkvmb_command_createallocation), -+ sizeof(struct dxgkvmb_command_createallocation_allocinfo)); -+ -+ ret = init_message(&msg, device->adapter, process, -+ cmd_size); -+ if (ret) -+ goto cleanup; -+ command = (void *)msg.msg; -+ -+ command_vgpu_to_host_init2(&command->hdr, -+ DXGK_VMBCOMMAND_CREATEALLOCATION, -+ process->host_handle); -+ command->device = args->device; -+ command->flags = args->flags; -+ command->resource = args->resource; -+ command->private_runtime_resource_handle = -+ args->private_runtime_resource_handle; -+ command->alloc_count = args->alloc_count; -+ command->private_runtime_data_size = args->private_runtime_data_size; -+ command->priv_drv_data_size = args->priv_drv_data_size; -+ -+ ret = copy_private_data(args, command, alloc_info, standard_alloc); -+ if (ret < 0) -+ goto cleanup; -+ -+ ret = dxgvmb_send_sync_msg(msg.channel, msg.hdr, msg.size, -+ result, result_size); -+ if (ret < 0) { -+ DXG_ERR("send_create_allocation failed %x", ret); -+ goto cleanup; -+ } -+ -+ ret = create_local_allocations(process, device, args, input_args, -+ alloc_info, result, resource, dxgalloc, -+ destroy_buffer_size); -+cleanup: -+ -+ if (result) -+ vfree(result); -+ free_message(&msg, process); -+ -+ if (ret) -+ DXG_TRACE("err: %d", ret); -+ return ret; -+} -+ -+int dxgvmb_send_destroy_allocation(struct dxgprocess *process, -+ struct dxgdevice *device, -+ struct d3dkmt_destroyallocation2 *args, -+ struct d3dkmthandle *alloc_handles) -+{ -+ struct dxgkvmb_command_destroyallocation *destroy_buffer; -+ u32 destroy_buffer_size; -+ int ret; -+ int allocations_size = args->alloc_count * sizeof(struct d3dkmthandle); -+ struct dxgvmbusmsg msg = {.hdr = NULL}; -+ -+ destroy_buffer_size = sizeof(struct dxgkvmb_command_destroyallocation) + -+ allocations_size; -+ -+ ret = init_message(&msg, device->adapter, process, -+ destroy_buffer_size); -+ if (ret) -+ goto cleanup; -+ destroy_buffer = (void *)msg.msg; -+ -+ command_vgpu_to_host_init2(&destroy_buffer->hdr, -+ DXGK_VMBCOMMAND_DESTROYALLOCATION, -+ process->host_handle); -+ destroy_buffer->device = args->device; -+ destroy_buffer->resource = args->resource; -+ destroy_buffer->alloc_count = args->alloc_count; -+ destroy_buffer->flags = args->flags; -+ if (allocations_size) -+ memcpy(destroy_buffer->allocations, alloc_handles, -+ allocations_size); -+ -+ ret = dxgvmb_send_sync_msg_ntstatus(msg.channel, msg.hdr, msg.size); -+ -+cleanup: -+ -+ free_message(&msg, process); -+ if (ret) -+ DXG_TRACE("err: %d", ret); -+ return ret; -+} -+ -+int dxgvmb_send_get_stdalloc_data(struct dxgdevice *device, -+ enum d3dkmdt_standardallocationtype alloctype, -+ struct d3dkmdt_gdisurfacedata *alloc_data, -+ u32 physical_adapter_index, -+ u32 *alloc_priv_driver_size, -+ void *priv_alloc_data, -+ u32 *res_priv_data_size, -+ void *priv_res_data) -+{ -+ struct dxgkvmb_command_getstandardallocprivdata *command; -+ struct dxgkvmb_command_getstandardallocprivdata_return *result = NULL; -+ u32 result_size = sizeof(*result); -+ int ret; -+ struct dxgvmbusmsgres msg = {.hdr = NULL}; -+ -+ if (priv_alloc_data) -+ result_size += *alloc_priv_driver_size; -+ if (priv_res_data) -+ result_size += *res_priv_data_size; -+ ret = init_message_res(&msg, device->adapter, device->process, -+ sizeof(*command), result_size); -+ if (ret) -+ goto cleanup; -+ command = msg.msg; -+ result = msg.res; -+ -+ command_vgpu_to_host_init2(&command->hdr, -+ DXGK_VMBCOMMAND_DDIGETSTANDARDALLOCATIONDRIVERDATA, -+ device->process->host_handle); -+ -+ command->alloc_type = alloctype; -+ command->priv_driver_data_size = *alloc_priv_driver_size; -+ command->physical_adapter_index = physical_adapter_index; -+ command->priv_driver_resource_size = *res_priv_data_size; -+ switch (alloctype) { -+ case _D3DKMDT_STANDARDALLOCATION_GDISURFACE: -+ command->gdi_surface = *alloc_data; -+ break; -+ case _D3DKMDT_STANDARDALLOCATION_SHAREDPRIMARYSURFACE: -+ case _D3DKMDT_STANDARDALLOCATION_SHADOWSURFACE: -+ case _D3DKMDT_STANDARDALLOCATION_STAGINGSURFACE: -+ default: -+ DXG_ERR("Invalid standard alloc type"); -+ goto cleanup; -+ } -+ -+ ret = dxgvmb_send_sync_msg(msg.channel, msg.hdr, msg.size, -+ result, msg.res_size); -+ if (ret < 0) -+ goto cleanup; -+ -+ ret = ntstatus2int(result->status); -+ if (ret < 0) -+ goto cleanup; -+ -+ if (*alloc_priv_driver_size && -+ result->priv_driver_data_size != *alloc_priv_driver_size) { -+ DXG_ERR("Priv data size mismatch"); -+ goto cleanup; -+ } -+ if (*res_priv_data_size && -+ result->priv_driver_resource_size != *res_priv_data_size) { -+ DXG_ERR("Resource priv data size mismatch"); -+ goto cleanup; -+ } -+ *alloc_priv_driver_size = result->priv_driver_data_size; -+ *res_priv_data_size = result->priv_driver_resource_size; -+ if (priv_alloc_data) { -+ memcpy(priv_alloc_data, &result[1], -+ result->priv_driver_data_size); -+ } -+ if (priv_res_data) { -+ memcpy(priv_res_data, -+ (char *)(&result[1]) + result->priv_driver_data_size, -+ result->priv_driver_resource_size); -+ } -+ -+cleanup: -+ -+ free_message((struct dxgvmbusmsg *)&msg, device->process); -+ if (ret) -+ DXG_TRACE("err: %d", ret); -+ return ret; -+} -+ - int dxgvmb_send_query_adapter_info(struct dxgprocess *process, - struct dxgadapter *adapter, - struct d3dkmt_queryadapterinfo *args) -diff --git a/drivers/hv/dxgkrnl/dxgvmbus.h b/drivers/hv/dxgkrnl/dxgvmbus.h -index 111111111111..222222222222 100644 ---- a/drivers/hv/dxgkrnl/dxgvmbus.h -+++ b/drivers/hv/dxgkrnl/dxgvmbus.h -@@ -173,6 +173,14 @@ struct dxgkvmb_command_setiospaceregion { - u32 shared_page_gpadl; - }; - -+/* Returns ntstatus */ -+struct dxgkvmb_command_setexistingsysmemstore { -+ struct dxgkvmb_command_vgpu_to_host hdr; -+ struct d3dkmthandle device; -+ struct d3dkmthandle allocation; -+ u32 gpadl; -+}; -+ - struct dxgkvmb_command_createprocess { - struct dxgkvmb_command_vm_to_host hdr; - void *process; -@@ -269,6 +277,121 @@ struct dxgkvmb_command_flushdevice { - enum dxgdevice_flushschedulerreason reason; - }; - -+struct dxgkvmb_command_createallocation_allocinfo { -+ u32 flags; -+ u32 priv_drv_data_size; -+ u32 vidpn_source_id; -+}; -+ -+struct dxgkvmb_command_createallocation { -+ struct dxgkvmb_command_vgpu_to_host hdr; -+ struct d3dkmthandle device; -+ struct d3dkmthandle resource; -+ u32 private_runtime_data_size; -+ u32 priv_drv_data_size; -+ u32 alloc_count; -+ struct d3dkmt_createallocationflags flags; -+ u64 private_runtime_resource_handle; -+ bool make_resident; -+/* dxgkvmb_command_createallocation_allocinfo alloc_info[alloc_count]; */ -+/* u8 private_rutime_data[private_runtime_data_size] */ -+/* u8 priv_drv_data[] for each alloc_info */ -+}; -+ -+struct dxgkvmb_command_getstandardallocprivdata { -+ struct dxgkvmb_command_vgpu_to_host hdr; -+ enum d3dkmdt_standardallocationtype alloc_type; -+ u32 priv_driver_data_size; -+ u32 priv_driver_resource_size; -+ u32 physical_adapter_index; -+ union { -+ struct d3dkmdt_sharedprimarysurfacedata primary; -+ struct d3dkmdt_shadowsurfacedata shadow; -+ struct d3dkmdt_stagingsurfacedata staging; -+ struct d3dkmdt_gdisurfacedata gdi_surface; -+ }; -+}; -+ -+struct dxgkvmb_command_getstandardallocprivdata_return { -+ struct ntstatus status; -+ u32 priv_driver_data_size; -+ u32 priv_driver_resource_size; -+ union { -+ struct d3dkmdt_sharedprimarysurfacedata primary; -+ struct d3dkmdt_shadowsurfacedata shadow; -+ struct d3dkmdt_stagingsurfacedata staging; -+ struct d3dkmdt_gdisurfacedata gdi_surface; -+ }; -+/* char alloc_priv_data[priv_driver_data_size]; */ -+/* char resource_priv_data[priv_driver_resource_size]; */ -+}; -+ -+struct dxgkarg_describeallocation { -+ u64 allocation; -+ u32 width; -+ u32 height; -+ u32 format; -+ u32 multisample_method; -+ struct d3dddi_rational refresh_rate; -+ u32 private_driver_attribute; -+ u32 flags; -+ u32 rotation; -+}; -+ -+struct dxgkvmb_allocflags { -+ union { -+ u32 flags; -+ struct { -+ u32 primary:1; -+ u32 cdd_primary:1; -+ u32 dod_primary:1; -+ u32 overlay:1; -+ u32 reserved6:1; -+ u32 capture:1; -+ u32 reserved0:4; -+ u32 reserved1:1; -+ u32 existing_sysmem:1; -+ u32 stereo:1; -+ u32 direct_flip:1; -+ u32 hardware_protected:1; -+ u32 reserved2:1; -+ u32 reserved3:1; -+ u32 reserved4:1; -+ u32 protected:1; -+ u32 cached:1; -+ u32 independent_primary:1; -+ u32 reserved:11; -+ }; -+ }; -+}; -+ -+struct dxgkvmb_command_allocinfo_return { -+ struct d3dkmthandle allocation; -+ u32 priv_drv_data_size; -+ struct dxgkvmb_allocflags allocation_flags; -+ u64 allocation_size; -+ struct dxgkarg_describeallocation driver_info; -+}; -+ -+struct dxgkvmb_command_createallocation_return { -+ struct d3dkmt_createallocationflags flags; -+ struct d3dkmthandle resource; -+ struct d3dkmthandle global_share; -+ u32 vgpu_flags; -+ struct dxgkvmb_command_allocinfo_return allocation_info[1]; -+ /* Private driver data for allocations */ -+}; -+ -+/* The command returns ntstatus */ -+struct dxgkvmb_command_destroyallocation { -+ struct dxgkvmb_command_vgpu_to_host hdr; -+ struct d3dkmthandle device; -+ struct d3dkmthandle resource; -+ u32 alloc_count; -+ struct d3dddicb_destroyallocation2flags flags; -+ struct d3dkmthandle allocations[1]; -+}; -+ - struct dxgkvmb_command_createcontextvirtual { - struct dxgkvmb_command_vgpu_to_host hdr; - struct d3dkmthandle context; -diff --git a/drivers/hv/dxgkrnl/ioctl.c b/drivers/hv/dxgkrnl/ioctl.c -index 111111111111..222222222222 100644 ---- a/drivers/hv/dxgkrnl/ioctl.c -+++ b/drivers/hv/dxgkrnl/ioctl.c -@@ -714,6 +714,633 @@ dxgkio_destroy_context(struct dxgprocess *process, void *__user inargs) - return ret; - } - -+static int -+get_standard_alloc_priv_data(struct dxgdevice *device, -+ struct d3dkmt_createstandardallocation *alloc_info, -+ u32 *standard_alloc_priv_data_size, -+ void **standard_alloc_priv_data, -+ u32 *standard_res_priv_data_size, -+ void **standard_res_priv_data) -+{ -+ int ret; -+ struct d3dkmdt_gdisurfacedata gdi_data = { }; -+ u32 priv_data_size = 0; -+ u32 res_priv_data_size = 0; -+ void *priv_data = NULL; -+ void *res_priv_data = NULL; -+ -+ gdi_data.type = _D3DKMDT_GDISURFACE_TEXTURE_CROSSADAPTER; -+ gdi_data.width = alloc_info->existing_heap_data.size; -+ gdi_data.height = 1; -+ gdi_data.format = _D3DDDIFMT_UNKNOWN; -+ -+ *standard_alloc_priv_data_size = 0; -+ ret = dxgvmb_send_get_stdalloc_data(device, -+ _D3DKMDT_STANDARDALLOCATION_GDISURFACE, -+ &gdi_data, 0, -+ &priv_data_size, NULL, -+ &res_priv_data_size, -+ NULL); -+ if (ret < 0) -+ goto cleanup; -+ DXG_TRACE("Priv data size: %d", priv_data_size); -+ if (priv_data_size == 0) { -+ ret = -EINVAL; -+ goto cleanup; -+ } -+ priv_data = vzalloc(priv_data_size); -+ if (priv_data == NULL) { -+ ret = -ENOMEM; -+ DXG_ERR("failed to allocate memory for priv data: %d", -+ priv_data_size); -+ goto cleanup; -+ } -+ if (res_priv_data_size) { -+ res_priv_data = vzalloc(res_priv_data_size); -+ if (res_priv_data == NULL) { -+ ret = -ENOMEM; -+ dev_err(DXGDEV, -+ "failed to alloc memory for res priv data: %d", -+ res_priv_data_size); -+ goto cleanup; -+ } -+ } -+ ret = dxgvmb_send_get_stdalloc_data(device, -+ _D3DKMDT_STANDARDALLOCATION_GDISURFACE, -+ &gdi_data, 0, -+ &priv_data_size, -+ priv_data, -+ &res_priv_data_size, -+ res_priv_data); -+ if (ret < 0) -+ goto cleanup; -+ *standard_alloc_priv_data_size = priv_data_size; -+ *standard_alloc_priv_data = priv_data; -+ *standard_res_priv_data_size = res_priv_data_size; -+ *standard_res_priv_data = res_priv_data; -+ priv_data = NULL; -+ res_priv_data = NULL; -+ -+cleanup: -+ if (priv_data) -+ vfree(priv_data); -+ if (res_priv_data) -+ vfree(res_priv_data); -+ if (ret) -+ DXG_TRACE("err: %d", ret); -+ return ret; -+} -+ -+static int -+dxgkio_create_allocation(struct dxgprocess *process, void *__user inargs) -+{ -+ struct d3dkmt_createallocation args; -+ int ret; -+ struct dxgadapter *adapter = NULL; -+ struct dxgdevice *device = NULL; -+ struct d3dddi_allocationinfo2 *alloc_info = NULL; -+ struct d3dkmt_createstandardallocation standard_alloc; -+ u32 alloc_info_size = 0; -+ struct dxgresource *resource = NULL; -+ struct dxgallocation **dxgalloc = NULL; -+ bool resource_mutex_acquired = false; -+ u32 standard_alloc_priv_data_size = 0; -+ void *standard_alloc_priv_data = NULL; -+ u32 res_priv_data_size = 0; -+ void *res_priv_data = NULL; -+ int i; -+ -+ ret = copy_from_user(&args, inargs, sizeof(args)); -+ if (ret) { -+ DXG_ERR("failed to copy input args"); -+ ret = -EINVAL; -+ goto cleanup; -+ } -+ -+ if (args.alloc_count > D3DKMT_CREATEALLOCATION_MAX || -+ args.alloc_count == 0) { -+ DXG_ERR("invalid number of allocations to create"); -+ ret = -EINVAL; -+ goto cleanup; -+ } -+ -+ alloc_info_size = sizeof(struct d3dddi_allocationinfo2) * -+ args.alloc_count; -+ alloc_info = vzalloc(alloc_info_size); -+ if (alloc_info == NULL) { -+ ret = -ENOMEM; -+ goto cleanup; -+ } -+ ret = copy_from_user(alloc_info, args.allocation_info, -+ alloc_info_size); -+ if (ret) { -+ DXG_ERR("failed to copy alloc info"); -+ ret = -EINVAL; -+ goto cleanup; -+ } -+ -+ for (i = 0; i < args.alloc_count; i++) { -+ if (args.flags.standard_allocation) { -+ if (alloc_info[i].priv_drv_data_size != 0) { -+ DXG_ERR("private data size not zero"); -+ ret = -EINVAL; -+ goto cleanup; -+ } -+ } -+ if (alloc_info[i].priv_drv_data_size >= -+ DXG_MAX_VM_BUS_PACKET_SIZE) { -+ DXG_ERR("private data size too big: %d %d %ld", -+ i, alloc_info[i].priv_drv_data_size, -+ sizeof(alloc_info[0])); -+ ret = -EINVAL; -+ goto cleanup; -+ } -+ } -+ -+ if (args.flags.existing_section || args.flags.create_protected) { -+ DXG_ERR("invalid allocation flags"); -+ ret = -EINVAL; -+ goto cleanup; -+ } -+ -+ if (args.flags.standard_allocation) { -+ if (args.standard_allocation == NULL) { -+ DXG_ERR("invalid standard allocation"); -+ ret = -EINVAL; -+ goto cleanup; -+ } -+ ret = copy_from_user(&standard_alloc, -+ args.standard_allocation, -+ sizeof(standard_alloc)); -+ if (ret) { -+ DXG_ERR("failed to copy std alloc data"); -+ ret = -EINVAL; -+ goto cleanup; -+ } -+ if (standard_alloc.type == -+ _D3DKMT_STANDARDALLOCATIONTYPE_EXISTINGHEAP) { -+ if (alloc_info[0].sysmem == NULL || -+ (unsigned long)alloc_info[0].sysmem & -+ (PAGE_SIZE - 1)) { -+ DXG_ERR("invalid sysmem pointer"); -+ ret = STATUS_INVALID_PARAMETER; -+ goto cleanup; -+ } -+ if (!args.flags.existing_sysmem) { -+ DXG_ERR("expect existing_sysmem flag"); -+ ret = STATUS_INVALID_PARAMETER; -+ goto cleanup; -+ } -+ } else if (standard_alloc.type == -+ _D3DKMT_STANDARDALLOCATIONTYPE_CROSSADAPTER) { -+ if (args.flags.existing_sysmem) { -+ DXG_ERR("existing_sysmem flag invalid"); -+ ret = STATUS_INVALID_PARAMETER; -+ goto cleanup; -+ -+ } -+ if (alloc_info[0].sysmem != NULL) { -+ DXG_ERR("sysmem should be NULL"); -+ ret = STATUS_INVALID_PARAMETER; -+ goto cleanup; -+ } -+ } else { -+ DXG_ERR("invalid standard allocation type"); -+ ret = STATUS_INVALID_PARAMETER; -+ goto cleanup; -+ } -+ -+ if (args.priv_drv_data_size != 0 || -+ args.alloc_count != 1 || -+ standard_alloc.existing_heap_data.size == 0 || -+ standard_alloc.existing_heap_data.size & (PAGE_SIZE - 1)) { -+ DXG_ERR("invalid standard allocation"); -+ ret = -EINVAL; -+ goto cleanup; -+ } -+ args.priv_drv_data_size = -+ sizeof(struct d3dkmt_createstandardallocation); -+ } -+ -+ if (args.flags.create_shared && !args.flags.create_resource) { -+ DXG_ERR("create_resource must be set for create_shared"); -+ ret = -EINVAL; -+ goto cleanup; -+ } -+ -+ /* -+ * The call acquires reference on the device. It is safe to access the -+ * adapter, because the device holds reference on it. -+ */ -+ device = dxgprocess_device_by_handle(process, args.device); -+ if (device == NULL) { -+ ret = -EINVAL; -+ goto cleanup; -+ } -+ -+ ret = dxgdevice_acquire_lock_shared(device); -+ if (ret < 0) { -+ kref_put(&device->device_kref, dxgdevice_release); -+ device = NULL; -+ goto cleanup; -+ } -+ -+ adapter = device->adapter; -+ ret = dxgadapter_acquire_lock_shared(adapter); -+ if (ret < 0) { -+ adapter = NULL; -+ goto cleanup; -+ } -+ -+ if (args.flags.standard_allocation) { -+ ret = get_standard_alloc_priv_data(device, -+ &standard_alloc, -+ &standard_alloc_priv_data_size, -+ &standard_alloc_priv_data, -+ &res_priv_data_size, -+ &res_priv_data); -+ if (ret < 0) -+ goto cleanup; -+ DXG_TRACE("Alloc private data: %d", -+ standard_alloc_priv_data_size); -+ } -+ -+ if (args.flags.create_resource) { -+ resource = dxgresource_create(device); -+ if (resource == NULL) { -+ ret = -ENOMEM; -+ goto cleanup; -+ } -+ resource->private_runtime_handle = -+ args.private_runtime_resource_handle; -+ } else { -+ if (args.resource.v) { -+ /* Adding new allocations to the given resource */ -+ -+ dxgprocess_ht_lock_shared_down(process); -+ resource = hmgrtable_get_object_by_type( -+ &process->handle_table, -+ HMGRENTRY_TYPE_DXGRESOURCE, -+ args.resource); -+ kref_get(&resource->resource_kref); -+ dxgprocess_ht_lock_shared_up(process); -+ -+ if (resource == NULL || resource->device != device) { -+ DXG_ERR("invalid resource handle %x", -+ args.resource.v); -+ ret = -EINVAL; -+ goto cleanup; -+ } -+ /* Synchronize with resource destruction */ -+ mutex_lock(&resource->resource_mutex); -+ if (!dxgresource_is_active(resource)) { -+ mutex_unlock(&resource->resource_mutex); -+ ret = -EINVAL; -+ goto cleanup; -+ } -+ resource_mutex_acquired = true; -+ } -+ } -+ -+ dxgalloc = vzalloc(sizeof(struct dxgallocation *) * args.alloc_count); -+ if (dxgalloc == NULL) { -+ ret = -ENOMEM; -+ goto cleanup; -+ } -+ -+ for (i = 0; i < args.alloc_count; i++) { -+ struct dxgallocation *alloc; -+ u32 priv_data_size; -+ -+ if (args.flags.standard_allocation) -+ priv_data_size = standard_alloc_priv_data_size; -+ else -+ priv_data_size = alloc_info[i].priv_drv_data_size; -+ -+ if (alloc_info[i].sysmem && !args.flags.standard_allocation) { -+ if ((unsigned long) -+ alloc_info[i].sysmem & (PAGE_SIZE - 1)) { -+ DXG_ERR("invalid sysmem alloc %d, %p", -+ i, alloc_info[i].sysmem); -+ ret = -EINVAL; -+ goto cleanup; -+ } -+ } -+ if ((alloc_info[0].sysmem == NULL) != -+ (alloc_info[i].sysmem == NULL)) { -+ DXG_ERR("All allocs must have sysmem pointer"); -+ ret = -EINVAL; -+ goto cleanup; -+ } -+ -+ dxgalloc[i] = dxgallocation_create(process); -+ if (dxgalloc[i] == NULL) { -+ ret = -ENOMEM; -+ goto cleanup; -+ } -+ alloc = dxgalloc[i]; -+ -+ if (resource) { -+ ret = dxgresource_add_alloc(resource, alloc); -+ if (ret < 0) -+ goto cleanup; -+ } else { -+ dxgdevice_add_alloc(device, alloc); -+ } -+ if (args.flags.create_shared) { -+ /* Remember alloc private data to use it during open */ -+ alloc->priv_drv_data = vzalloc(priv_data_size + -+ offsetof(struct privdata, data)); -+ if (alloc->priv_drv_data == NULL) { -+ ret = -ENOMEM; -+ goto cleanup; -+ } -+ if (args.flags.standard_allocation) { -+ memcpy(alloc->priv_drv_data->data, -+ standard_alloc_priv_data, -+ priv_data_size); -+ } else { -+ ret = copy_from_user( -+ alloc->priv_drv_data->data, -+ alloc_info[i].priv_drv_data, -+ priv_data_size); -+ if (ret) { -+ dev_err(DXGDEV, -+ "failed to copy priv data"); -+ ret = -EFAULT; -+ goto cleanup; -+ } -+ } -+ alloc->priv_drv_data->data_size = priv_data_size; -+ } -+ } -+ -+ ret = dxgvmb_send_create_allocation(process, device, &args, inargs, -+ resource, dxgalloc, alloc_info, -+ &standard_alloc); -+cleanup: -+ -+ if (resource_mutex_acquired) { -+ mutex_unlock(&resource->resource_mutex); -+ kref_put(&resource->resource_kref, dxgresource_release); -+ } -+ if (ret < 0) { -+ if (dxgalloc) { -+ for (i = 0; i < args.alloc_count; i++) { -+ if (dxgalloc[i]) -+ dxgallocation_destroy(dxgalloc[i]); -+ } -+ } -+ if (resource && args.flags.create_resource) { -+ dxgresource_destroy(resource); -+ } -+ } -+ if (dxgalloc) -+ vfree(dxgalloc); -+ if (standard_alloc_priv_data) -+ vfree(standard_alloc_priv_data); -+ if (res_priv_data) -+ vfree(res_priv_data); -+ if (alloc_info) -+ vfree(alloc_info); -+ -+ if (adapter) -+ dxgadapter_release_lock_shared(adapter); -+ -+ if (device) { -+ dxgdevice_release_lock_shared(device); -+ kref_put(&device->device_kref, dxgdevice_release); -+ } -+ -+ DXG_TRACE("ioctl:%s %d", errorstr(ret), ret); -+ return ret; -+} -+ -+static int validate_alloc(struct dxgallocation *alloc0, -+ struct dxgallocation *alloc, -+ struct dxgdevice *device, -+ struct d3dkmthandle alloc_handle) -+{ -+ u32 fail_reason; -+ -+ if (alloc == NULL) { -+ fail_reason = 1; -+ goto cleanup; -+ } -+ if (alloc->resource_owner != alloc0->resource_owner) { -+ fail_reason = 2; -+ goto cleanup; -+ } -+ if (alloc->resource_owner) { -+ if (alloc->owner.resource != alloc0->owner.resource) { -+ fail_reason = 3; -+ goto cleanup; -+ } -+ if (alloc->owner.resource->device != device) { -+ fail_reason = 4; -+ goto cleanup; -+ } -+ } else { -+ if (alloc->owner.device != device) { -+ fail_reason = 6; -+ goto cleanup; -+ } -+ } -+ return 0; -+cleanup: -+ DXG_ERR("Alloc validation failed: reason: %d %x", -+ fail_reason, alloc_handle.v); -+ return -EINVAL; -+} -+ -+static int -+dxgkio_destroy_allocation(struct dxgprocess *process, void *__user inargs) -+{ -+ struct d3dkmt_destroyallocation2 args; -+ struct dxgdevice *device = NULL; -+ struct dxgadapter *adapter = NULL; -+ int ret; -+ struct d3dkmthandle *alloc_handles = NULL; -+ struct dxgallocation **allocs = NULL; -+ struct dxgresource *resource = NULL; -+ int i; -+ -+ ret = copy_from_user(&args, inargs, sizeof(args)); -+ if (ret) { -+ DXG_ERR("failed to copy input args"); -+ ret = -EINVAL; -+ goto cleanup; -+ } -+ -+ if (args.alloc_count > D3DKMT_CREATEALLOCATION_MAX || -+ ((args.alloc_count == 0) == (args.resource.v == 0))) { -+ DXG_ERR("invalid number of allocations"); -+ ret = -EINVAL; -+ goto cleanup; -+ } -+ -+ if (args.alloc_count) { -+ u32 handle_size = sizeof(struct d3dkmthandle) * -+ args.alloc_count; -+ -+ alloc_handles = vzalloc(handle_size); -+ if (alloc_handles == NULL) { -+ ret = -ENOMEM; -+ goto cleanup; -+ } -+ allocs = vzalloc(sizeof(struct dxgallocation *) * -+ args.alloc_count); -+ if (allocs == NULL) { -+ ret = -ENOMEM; -+ goto cleanup; -+ } -+ ret = copy_from_user(alloc_handles, args.allocations, -+ handle_size); -+ if (ret) { -+ DXG_ERR("failed to copy alloc handles"); -+ ret = -EINVAL; -+ goto cleanup; -+ } -+ } -+ -+ /* -+ * The call acquires reference on the device. It is safe to access the -+ * adapter, because the device holds reference on it. -+ */ -+ device = dxgprocess_device_by_handle(process, args.device); -+ if (device == NULL) { -+ ret = -EINVAL; -+ goto cleanup; -+ } -+ -+ /* Acquire the device lock to synchronize with the device destriction */ -+ ret = dxgdevice_acquire_lock_shared(device); -+ if (ret < 0) { -+ kref_put(&device->device_kref, dxgdevice_release); -+ device = NULL; -+ goto cleanup; -+ } -+ -+ adapter = device->adapter; -+ ret = dxgadapter_acquire_lock_shared(adapter); -+ if (ret < 0) { -+ adapter = NULL; -+ goto cleanup; -+ } -+ -+ /* -+ * Destroy the local allocation handles first. If the host handle -+ * is destroyed first, another object could be assigned to the process -+ * table at the same place as the allocation handle and it will fail. -+ */ -+ if (args.alloc_count) { -+ dxgprocess_ht_lock_exclusive_down(process); -+ for (i = 0; i < args.alloc_count; i++) { -+ allocs[i] = -+ hmgrtable_get_object_by_type(&process->handle_table, -+ HMGRENTRY_TYPE_DXGALLOCATION, -+ alloc_handles[i]); -+ ret = -+ validate_alloc(allocs[0], allocs[i], device, -+ alloc_handles[i]); -+ if (ret < 0) { -+ dxgprocess_ht_lock_exclusive_up(process); -+ goto cleanup; -+ } -+ } -+ dxgprocess_ht_lock_exclusive_up(process); -+ for (i = 0; i < args.alloc_count; i++) -+ dxgallocation_free_handle(allocs[i]); -+ } else { -+ struct dxgallocation *alloc; -+ -+ dxgprocess_ht_lock_exclusive_down(process); -+ resource = hmgrtable_get_object_by_type(&process->handle_table, -+ HMGRENTRY_TYPE_DXGRESOURCE, -+ args.resource); -+ if (resource == NULL) { -+ DXG_ERR("Invalid resource handle: %x", -+ args.resource.v); -+ ret = -EINVAL; -+ } else if (resource->device != device) { -+ DXG_ERR("Resource belongs to wrong device: %x", -+ args.resource.v); -+ ret = -EINVAL; -+ } else { -+ hmgrtable_free_handle(&process->handle_table, -+ HMGRENTRY_TYPE_DXGRESOURCE, -+ args.resource); -+ resource->object_state = DXGOBJECTSTATE_DESTROYED; -+ resource->handle.v = 0; -+ resource->handle_valid = 0; -+ } -+ dxgprocess_ht_lock_exclusive_up(process); -+ -+ if (ret < 0) -+ goto cleanup; -+ -+ dxgdevice_acquire_alloc_list_lock_shared(device); -+ list_for_each_entry(alloc, &resource->alloc_list_head, -+ alloc_list_entry) { -+ dxgallocation_free_handle(alloc); -+ } -+ dxgdevice_release_alloc_list_lock_shared(device); -+ } -+ -+ if (args.alloc_count && allocs[0]->resource_owner) -+ resource = allocs[0]->owner.resource; -+ -+ if (resource) { -+ kref_get(&resource->resource_kref); -+ mutex_lock(&resource->resource_mutex); -+ } -+ -+ ret = dxgvmb_send_destroy_allocation(process, device, &args, -+ alloc_handles); -+ -+ /* -+ * Destroy the allocations after the host destroyed it. -+ * The allocation gpadl teardown will wait until the host unmaps its -+ * gpadl. -+ */ -+ dxgdevice_acquire_alloc_list_lock(device); -+ if (args.alloc_count) { -+ for (i = 0; i < args.alloc_count; i++) { -+ if (allocs[i]) { -+ allocs[i]->alloc_handle.v = 0; -+ dxgallocation_destroy(allocs[i]); -+ } -+ } -+ } else { -+ dxgresource_destroy(resource); -+ } -+ dxgdevice_release_alloc_list_lock(device); -+ -+ if (resource) { -+ mutex_unlock(&resource->resource_mutex); -+ kref_put(&resource->resource_kref, dxgresource_release); -+ } -+ -+cleanup: -+ -+ if (adapter) -+ dxgadapter_release_lock_shared(adapter); -+ -+ if (device) { -+ dxgdevice_release_lock_shared(device); -+ kref_put(&device->device_kref, dxgdevice_release); -+ } -+ -+ if (alloc_handles) -+ vfree(alloc_handles); -+ -+ if (allocs) -+ vfree(allocs); -+ -+ DXG_TRACE("ioctl:%s %d", errorstr(ret), ret); -+ return ret; -+} -+ - static struct ioctl_desc ioctls[] = { - /* 0x00 */ {}, - /* 0x01 */ {dxgkio_open_adapter_from_luid, LX_DXOPENADAPTERFROMLUID}, -@@ -721,7 +1348,7 @@ static struct ioctl_desc ioctls[] = { - /* 0x03 */ {}, - /* 0x04 */ {dxgkio_create_context_virtual, LX_DXCREATECONTEXTVIRTUAL}, - /* 0x05 */ {dxgkio_destroy_context, LX_DXDESTROYCONTEXT}, --/* 0x06 */ {}, -+/* 0x06 */ {dxgkio_create_allocation, LX_DXCREATEALLOCATION}, - /* 0x07 */ {}, - /* 0x08 */ {}, - /* 0x09 */ {dxgkio_query_adapter_info, LX_DXQUERYADAPTERINFO}, -@@ -734,7 +1361,7 @@ static struct ioctl_desc ioctls[] = { - /* 0x10 */ {}, - /* 0x11 */ {}, - /* 0x12 */ {}, --/* 0x13 */ {}, -+/* 0x13 */ {dxgkio_destroy_allocation, LX_DXDESTROYALLOCATION2}, - /* 0x14 */ {dxgkio_enum_adapters, LX_DXENUMADAPTERS2}, - /* 0x15 */ {dxgkio_close_adapter, LX_DXCLOSEADAPTER}, - /* 0x16 */ {}, -diff --git a/drivers/hv/dxgkrnl/misc.h b/drivers/hv/dxgkrnl/misc.h -index 111111111111..222222222222 100644 ---- a/drivers/hv/dxgkrnl/misc.h -+++ b/drivers/hv/dxgkrnl/misc.h -@@ -30,6 +30,9 @@ extern const struct d3dkmthandle zerohandle; - * plistmutex (process list mutex) - * table_lock (handle table lock) - * context_list_lock -+ * alloc_list_lock -+ * resource_mutex -+ * shared_resource_list_lock - * core_lock (dxgadapter lock) - * device_lock (dxgdevice lock) - * process_adapter_mutex -diff --git a/include/uapi/misc/d3dkmthk.h b/include/uapi/misc/d3dkmthk.h -index 111111111111..222222222222 100644 ---- a/include/uapi/misc/d3dkmthk.h -+++ b/include/uapi/misc/d3dkmthk.h -@@ -58,6 +58,7 @@ struct winluid { - __u32 b; - }; - -+#define D3DKMT_CREATEALLOCATION_MAX 1024 - #define D3DKMT_ADAPTERS_MAX 64 - - struct d3dkmt_adapterinfo { -@@ -197,6 +198,205 @@ struct d3dkmt_createcontextvirtual { - struct d3dkmthandle context; - }; - -+enum d3dkmdt_gdisurfacetype { -+ _D3DKMDT_GDISURFACE_INVALID = 0, -+ _D3DKMDT_GDISURFACE_TEXTURE = 1, -+ _D3DKMDT_GDISURFACE_STAGING_CPUVISIBLE = 2, -+ _D3DKMDT_GDISURFACE_STAGING = 3, -+ _D3DKMDT_GDISURFACE_LOOKUPTABLE = 4, -+ _D3DKMDT_GDISURFACE_EXISTINGSYSMEM = 5, -+ _D3DKMDT_GDISURFACE_TEXTURE_CPUVISIBLE = 6, -+ _D3DKMDT_GDISURFACE_TEXTURE_CROSSADAPTER = 7, -+ _D3DKMDT_GDISURFACE_TEXTURE_CPUVISIBLE_CROSSADAPTER = 8, -+}; -+ -+struct d3dddi_rational { -+ __u32 numerator; -+ __u32 denominator; -+}; -+ -+enum d3dddiformat { -+ _D3DDDIFMT_UNKNOWN = 0, -+}; -+ -+struct d3dkmdt_gdisurfacedata { -+ __u32 width; -+ __u32 height; -+ __u32 format; -+ enum d3dkmdt_gdisurfacetype type; -+ __u32 flags; -+ __u32 pitch; -+}; -+ -+struct d3dkmdt_stagingsurfacedata { -+ __u32 width; -+ __u32 height; -+ __u32 pitch; -+}; -+ -+struct d3dkmdt_sharedprimarysurfacedata { -+ __u32 width; -+ __u32 height; -+ enum d3dddiformat format; -+ struct d3dddi_rational refresh_rate; -+ __u32 vidpn_source_id; -+}; -+ -+struct d3dkmdt_shadowsurfacedata { -+ __u32 width; -+ __u32 height; -+ enum d3dddiformat format; -+ __u32 pitch; -+}; -+ -+enum d3dkmdt_standardallocationtype { -+ _D3DKMDT_STANDARDALLOCATION_SHAREDPRIMARYSURFACE = 1, -+ _D3DKMDT_STANDARDALLOCATION_SHADOWSURFACE = 2, -+ _D3DKMDT_STANDARDALLOCATION_STAGINGSURFACE = 3, -+ _D3DKMDT_STANDARDALLOCATION_GDISURFACE = 4, -+}; -+ -+enum d3dkmt_standardallocationtype { -+ _D3DKMT_STANDARDALLOCATIONTYPE_EXISTINGHEAP = 1, -+ _D3DKMT_STANDARDALLOCATIONTYPE_CROSSADAPTER = 2, -+}; -+ -+struct d3dkmt_standardallocation_existingheap { -+ __u64 size; -+}; -+ -+struct d3dkmt_createstandardallocationflags { -+ union { -+ struct { -+ __u32 reserved:32; -+ }; -+ __u32 value; -+ }; -+}; -+ -+struct d3dkmt_createstandardallocation { -+ enum d3dkmt_standardallocationtype type; -+ __u32 reserved; -+ struct d3dkmt_standardallocation_existingheap existing_heap_data; -+ struct d3dkmt_createstandardallocationflags flags; -+ __u32 reserved1; -+}; -+ -+struct d3dddi_allocationinfo2 { -+ struct d3dkmthandle allocation; -+#ifdef __KERNEL__ -+ const void *sysmem; -+#else -+ __u64 sysmem; -+#endif -+#ifdef __KERNEL__ -+ void *priv_drv_data; -+#else -+ __u64 priv_drv_data; -+#endif -+ __u32 priv_drv_data_size; -+ __u32 vidpn_source_id; -+ union { -+ struct { -+ __u32 primary:1; -+ __u32 stereo:1; -+ __u32 override_priority:1; -+ __u32 reserved:29; -+ }; -+ __u32 value; -+ } flags; -+ __u64 gpu_virtual_address; -+ union { -+ __u32 priority; -+ __u64 unused; -+ }; -+ __u64 reserved[5]; -+}; -+ -+struct d3dkmt_createallocationflags { -+ union { -+ struct { -+ __u32 create_resource:1; -+ __u32 create_shared:1; -+ __u32 non_secure:1; -+ __u32 create_protected:1; -+ __u32 restrict_shared_access:1; -+ __u32 existing_sysmem:1; -+ __u32 nt_security_sharing:1; -+ __u32 read_only:1; -+ __u32 create_write_combined:1; -+ __u32 create_cached:1; -+ __u32 swap_chain_back_buffer:1; -+ __u32 cross_adapter:1; -+ __u32 open_cross_adapter:1; -+ __u32 partial_shared_creation:1; -+ __u32 zeroed:1; -+ __u32 write_watch:1; -+ __u32 standard_allocation:1; -+ __u32 existing_section:1; -+ __u32 reserved:14; -+ }; -+ __u32 value; -+ }; -+}; -+ -+struct d3dkmt_createallocation { -+ struct d3dkmthandle device; -+ struct d3dkmthandle resource; -+ struct d3dkmthandle global_share; -+ __u32 reserved; -+#ifdef __KERNEL__ -+ const void *private_runtime_data; -+#else -+ __u64 private_runtime_data; -+#endif -+ __u32 private_runtime_data_size; -+ __u32 reserved1; -+ union { -+#ifdef __KERNEL__ -+ struct d3dkmt_createstandardallocation *standard_allocation; -+ const void *priv_drv_data; -+#else -+ __u64 standard_allocation; -+ __u64 priv_drv_data; -+#endif -+ }; -+ __u32 priv_drv_data_size; -+ __u32 alloc_count; -+#ifdef __KERNEL__ -+ struct d3dddi_allocationinfo2 *allocation_info; -+#else -+ __u64 allocation_info; -+#endif -+ struct d3dkmt_createallocationflags flags; -+ __u32 reserved2; -+ __u64 private_runtime_resource_handle; -+}; -+ -+struct d3dddicb_destroyallocation2flags { -+ union { -+ struct { -+ __u32 assume_not_in_use:1; -+ __u32 synchronous_destroy:1; -+ __u32 reserved:29; -+ __u32 system_use_only:1; -+ }; -+ __u32 value; -+ }; -+}; -+ -+struct d3dkmt_destroyallocation2 { -+ struct d3dkmthandle device; -+ struct d3dkmthandle resource; -+#ifdef __KERNEL__ -+ const struct d3dkmthandle *allocations; -+#else -+ __u64 allocations; -+#endif -+ __u32 alloc_count; -+ struct d3dddicb_destroyallocation2flags flags; -+}; -+ - struct d3dkmt_adaptertype { - union { - struct { -@@ -279,8 +479,12 @@ struct d3dkmt_enumadapters3 { - _IOWR(0x47, 0x04, struct d3dkmt_createcontextvirtual) - #define LX_DXDESTROYCONTEXT \ - _IOWR(0x47, 0x05, struct d3dkmt_destroycontext) -+#define LX_DXCREATEALLOCATION \ -+ _IOWR(0x47, 0x06, struct d3dkmt_createallocation) - #define LX_DXQUERYADAPTERINFO \ - _IOWR(0x47, 0x09, struct d3dkmt_queryadapterinfo) -+#define LX_DXDESTROYALLOCATION2 \ -+ _IOWR(0x47, 0x13, struct d3dkmt_destroyallocation2) - #define LX_DXENUMADAPTERS2 \ - _IOWR(0x47, 0x14, struct d3dkmt_enumadapters2) - #define LX_DXCLOSEADAPTER \ --- -Armbian - diff --git a/patch/kernel/archive/wsl2-arm64-6.6/1676-drivers-hv-dxgkrnl-Creation-of-compute-device-sync-objects.patch b/patch/kernel/archive/wsl2-arm64-6.6/1676-drivers-hv-dxgkrnl-Creation-of-compute-device-sync-objects.patch deleted file mode 100644 index b53d55f96842..000000000000 --- a/patch/kernel/archive/wsl2-arm64-6.6/1676-drivers-hv-dxgkrnl-Creation-of-compute-device-sync-objects.patch +++ /dev/null @@ -1,1016 +0,0 @@ -From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001 -From: Iouri Tarassov -Date: Tue, 1 Feb 2022 14:38:32 -0800 -Subject: drivers: hv: dxgkrnl: Creation of compute device sync objects - -Implement ioctls to create and destroy compute devicesync objects: - - the LX_DXCREATESYNCHRONIZATIONOBJECT ioctl, - - the LX_DXDESTROYSYNCHRONIZATIONOBJECT ioctl. - -Compute device synchronization objects are used to synchronize -execution of compute device commands, which are queued to -different execution contexts (dxgcontext objects). - -There are several types of sync objects (mutex, monitored -fence, CPU event, fence). A "signal" or a "wait" operation -could be queued to an execution context. - -Monitored fence sync objects are particular important. -A monitored fence object has a fence value, which could be -monitored by the compute device or by CPU. Therefore, a CPU -virtual address is allocated during object creation to allow -an application to read the fence value. dxg_map_iospace and -dxg_unmap_iospace implement creation of the CPU virtual address. -This is done as follow: -- The host allocates a portion of the guest IO space, which is mapped - to the actual fence value memory on the host -- The host returns the guest IO space address to the guest -- The guest allocates a CPU virtual address and updates page tables - to point to the IO space address - -Signed-off-by: Iouri Tarassov -[kms: Forward port to v6.1] -Signed-off-by: Kelsey Steele ---- - drivers/hv/dxgkrnl/dxgadapter.c | 184 +++++++++ - drivers/hv/dxgkrnl/dxgkrnl.h | 80 ++++ - drivers/hv/dxgkrnl/dxgmodule.c | 1 + - drivers/hv/dxgkrnl/dxgprocess.c | 16 + - drivers/hv/dxgkrnl/dxgvmbus.c | 205 ++++++++++ - drivers/hv/dxgkrnl/dxgvmbus.h | 20 + - drivers/hv/dxgkrnl/ioctl.c | 130 +++++- - include/uapi/misc/d3dkmthk.h | 95 +++++ - 8 files changed, 729 insertions(+), 2 deletions(-) - -diff --git a/drivers/hv/dxgkrnl/dxgadapter.c b/drivers/hv/dxgkrnl/dxgadapter.c -index 111111111111..222222222222 100644 ---- a/drivers/hv/dxgkrnl/dxgadapter.c -+++ b/drivers/hv/dxgkrnl/dxgadapter.c -@@ -160,6 +160,24 @@ void dxgadapter_remove_process(struct dxgprocess_adapter *process_info) - list_del(&process_info->adapter_process_list_entry); - } - -+void dxgadapter_add_syncobj(struct dxgadapter *adapter, -+ struct dxgsyncobject *object) -+{ -+ down_write(&adapter->shared_resource_list_lock); -+ list_add_tail(&object->syncobj_list_entry, &adapter->syncobj_list_head); -+ up_write(&adapter->shared_resource_list_lock); -+} -+ -+void dxgadapter_remove_syncobj(struct dxgsyncobject *object) -+{ -+ down_write(&object->adapter->shared_resource_list_lock); -+ if (object->syncobj_list_entry.next) { -+ list_del(&object->syncobj_list_entry); -+ object->syncobj_list_entry.next = NULL; -+ } -+ up_write(&object->adapter->shared_resource_list_lock); -+} -+ - int dxgadapter_acquire_lock_exclusive(struct dxgadapter *adapter) - { - down_write(&adapter->core_lock); -@@ -213,6 +231,7 @@ struct dxgdevice *dxgdevice_create(struct dxgadapter *adapter, - init_rwsem(&device->context_list_lock); - init_rwsem(&device->alloc_list_lock); - INIT_LIST_HEAD(&device->pqueue_list_head); -+ INIT_LIST_HEAD(&device->syncobj_list_head); - device->object_state = DXGOBJECTSTATE_CREATED; - device->execution_state = _D3DKMT_DEVICEEXECUTION_ACTIVE; - -@@ -228,6 +247,7 @@ struct dxgdevice *dxgdevice_create(struct dxgadapter *adapter, - void dxgdevice_stop(struct dxgdevice *device) - { - struct dxgallocation *alloc; -+ struct dxgsyncobject *syncobj; - - DXG_TRACE("Destroying device: %p", device); - dxgdevice_acquire_alloc_list_lock(device); -@@ -235,6 +255,14 @@ void dxgdevice_stop(struct dxgdevice *device) - dxgallocation_stop(alloc); - } - dxgdevice_release_alloc_list_lock(device); -+ -+ hmgrtable_lock(&device->process->handle_table, DXGLOCK_EXCL); -+ list_for_each_entry(syncobj, &device->syncobj_list_head, -+ syncobj_list_entry) { -+ dxgsyncobject_stop(syncobj); -+ } -+ hmgrtable_unlock(&device->process->handle_table, DXGLOCK_EXCL); -+ DXG_TRACE("Device stopped: %p", device); - } - - void dxgdevice_mark_destroyed(struct dxgdevice *device) -@@ -263,6 +291,20 @@ void dxgdevice_destroy(struct dxgdevice *device) - - dxgdevice_acquire_alloc_list_lock(device); - -+ while (!list_empty(&device->syncobj_list_head)) { -+ struct dxgsyncobject *syncobj = -+ list_first_entry(&device->syncobj_list_head, -+ struct dxgsyncobject, -+ syncobj_list_entry); -+ list_del(&syncobj->syncobj_list_entry); -+ syncobj->syncobj_list_entry.next = NULL; -+ dxgdevice_release_alloc_list_lock(device); -+ -+ dxgsyncobject_destroy(process, syncobj); -+ -+ dxgdevice_acquire_alloc_list_lock(device); -+ } -+ - { - struct dxgallocation *alloc; - struct dxgallocation *tmp; -@@ -565,6 +607,30 @@ void dxgdevice_release(struct kref *refcount) - kfree(device); - } - -+void dxgdevice_add_syncobj(struct dxgdevice *device, -+ struct dxgsyncobject *syncobj) -+{ -+ dxgdevice_acquire_alloc_list_lock(device); -+ list_add_tail(&syncobj->syncobj_list_entry, &device->syncobj_list_head); -+ kref_get(&syncobj->syncobj_kref); -+ dxgdevice_release_alloc_list_lock(device); -+} -+ -+void dxgdevice_remove_syncobj(struct dxgsyncobject *entry) -+{ -+ struct dxgdevice *device = entry->device; -+ -+ dxgdevice_acquire_alloc_list_lock(device); -+ if (entry->syncobj_list_entry.next) { -+ list_del(&entry->syncobj_list_entry); -+ entry->syncobj_list_entry.next = NULL; -+ kref_put(&entry->syncobj_kref, dxgsyncobject_release); -+ } -+ dxgdevice_release_alloc_list_lock(device); -+ kref_put(&device->device_kref, dxgdevice_release); -+ entry->device = NULL; -+} -+ - struct dxgcontext *dxgcontext_create(struct dxgdevice *device) - { - struct dxgcontext *context; -@@ -812,3 +878,121 @@ void dxgprocess_adapter_remove_device(struct dxgdevice *device) - } - mutex_unlock(&device->adapter_info->device_list_mutex); - } -+ -+struct dxgsyncobject *dxgsyncobject_create(struct dxgprocess *process, -+ struct dxgdevice *device, -+ struct dxgadapter *adapter, -+ enum -+ d3dddi_synchronizationobject_type -+ type, -+ struct -+ d3dddi_synchronizationobject_flags -+ flags) -+{ -+ struct dxgsyncobject *syncobj; -+ -+ syncobj = kzalloc(sizeof(*syncobj), GFP_KERNEL); -+ if (syncobj == NULL) -+ goto cleanup; -+ syncobj->type = type; -+ syncobj->process = process; -+ switch (type) { -+ case _D3DDDI_MONITORED_FENCE: -+ case _D3DDDI_PERIODIC_MONITORED_FENCE: -+ syncobj->monitored_fence = 1; -+ break; -+ default: -+ break; -+ } -+ if (flags.shared) { -+ syncobj->shared = 1; -+ if (!flags.nt_security_sharing) { -+ DXG_ERR("nt_security_sharing must be set"); -+ goto cleanup; -+ } -+ } -+ -+ kref_init(&syncobj->syncobj_kref); -+ -+ if (syncobj->monitored_fence) { -+ syncobj->device = device; -+ syncobj->device_handle = device->handle; -+ kref_get(&device->device_kref); -+ dxgdevice_add_syncobj(device, syncobj); -+ } else { -+ dxgadapter_add_syncobj(adapter, syncobj); -+ } -+ syncobj->adapter = adapter; -+ kref_get(&adapter->adapter_kref); -+ -+ DXG_TRACE("Syncobj created: %p", syncobj); -+ return syncobj; -+cleanup: -+ if (syncobj) -+ kfree(syncobj); -+ return NULL; -+} -+ -+void dxgsyncobject_destroy(struct dxgprocess *process, -+ struct dxgsyncobject *syncobj) -+{ -+ int destroyed; -+ -+ DXG_TRACE("Destroying syncobj: %p", syncobj); -+ -+ dxgsyncobject_stop(syncobj); -+ -+ destroyed = test_and_set_bit(0, &syncobj->flags); -+ if (!destroyed) { -+ DXG_TRACE("Deleting handle: %x", syncobj->handle.v); -+ hmgrtable_lock(&process->handle_table, DXGLOCK_EXCL); -+ if (syncobj->handle.v) { -+ hmgrtable_free_handle(&process->handle_table, -+ HMGRENTRY_TYPE_DXGSYNCOBJECT, -+ syncobj->handle); -+ syncobj->handle.v = 0; -+ kref_put(&syncobj->syncobj_kref, dxgsyncobject_release); -+ } -+ hmgrtable_unlock(&process->handle_table, DXGLOCK_EXCL); -+ -+ if (syncobj->monitored_fence) -+ dxgdevice_remove_syncobj(syncobj); -+ else -+ dxgadapter_remove_syncobj(syncobj); -+ if (syncobj->adapter) { -+ kref_put(&syncobj->adapter->adapter_kref, -+ dxgadapter_release); -+ syncobj->adapter = NULL; -+ } -+ } -+ kref_put(&syncobj->syncobj_kref, dxgsyncobject_release); -+} -+ -+void dxgsyncobject_stop(struct dxgsyncobject *syncobj) -+{ -+ int stopped = test_and_set_bit(1, &syncobj->flags); -+ -+ if (!stopped) { -+ DXG_TRACE("Stopping syncobj"); -+ if (syncobj->monitored_fence) { -+ if (syncobj->mapped_address) { -+ int ret = -+ dxg_unmap_iospace(syncobj->mapped_address, -+ PAGE_SIZE); -+ -+ (void)ret; -+ DXG_TRACE("unmap fence %d %p", -+ ret, syncobj->mapped_address); -+ syncobj->mapped_address = NULL; -+ } -+ } -+ } -+} -+ -+void dxgsyncobject_release(struct kref *refcount) -+{ -+ struct dxgsyncobject *syncobj; -+ -+ syncobj = container_of(refcount, struct dxgsyncobject, syncobj_kref); -+ kfree(syncobj); -+} -diff --git a/drivers/hv/dxgkrnl/dxgkrnl.h b/drivers/hv/dxgkrnl/dxgkrnl.h -index 111111111111..222222222222 100644 ---- a/drivers/hv/dxgkrnl/dxgkrnl.h -+++ b/drivers/hv/dxgkrnl/dxgkrnl.h -@@ -38,6 +38,7 @@ struct dxgdevice; - struct dxgcontext; - struct dxgallocation; - struct dxgresource; -+struct dxgsyncobject; - - /* - * Driver private data. -@@ -100,6 +101,56 @@ int dxgvmbuschannel_init(struct dxgvmbuschannel *ch, struct hv_device *hdev); - void dxgvmbuschannel_destroy(struct dxgvmbuschannel *ch); - void dxgvmbuschannel_receive(void *ctx); - -+/* -+ * This is GPU synchronization object, which is used to synchronize execution -+ * between GPU contextx/hardware queues or for tracking GPU execution progress. -+ * A dxgsyncobject is created when somebody creates a syncobject or opens a -+ * shared syncobject. -+ * A syncobject belongs to an adapter, unless it is a cross-adapter object. -+ * Cross adapter syncobjects are currently not implemented. -+ * -+ * D3DDDI_MONITORED_FENCE and D3DDDI_PERIODIC_MONITORED_FENCE are called -+ * "device" syncobject, because the belong to a device (dxgdevice). -+ * Device syncobjects are inserted to a list in dxgdevice. -+ * -+ */ -+struct dxgsyncobject { -+ struct kref syncobj_kref; -+ enum d3dddi_synchronizationobject_type type; -+ /* -+ * List entry in dxgdevice for device sync objects. -+ * List entry in dxgadapter for other objects -+ */ -+ struct list_head syncobj_list_entry; -+ /* Adapter, the syncobject belongs to. NULL for stopped sync obejcts. */ -+ struct dxgadapter *adapter; -+ /* -+ * Pointer to the device, which was used to create the object. -+ * This is NULL for non-device syncbjects -+ */ -+ struct dxgdevice *device; -+ struct dxgprocess *process; -+ /* CPU virtual address of the fence value for "device" syncobjects */ -+ void *mapped_address; -+ /* Handle in the process handle table */ -+ struct d3dkmthandle handle; -+ /* Cached handle of the device. Used to avoid device dereference. */ -+ struct d3dkmthandle device_handle; -+ union { -+ struct { -+ /* Must be the first bit */ -+ u32 destroyed:1; -+ /* Must be the second bit */ -+ u32 stopped:1; -+ /* device syncobject */ -+ u32 monitored_fence:1; -+ u32 shared:1; -+ u32 reserved:27; -+ }; -+ long flags; -+ }; -+}; -+ - /* - * The structure defines an offered vGPU vm bus channel. - */ -@@ -109,6 +160,20 @@ struct dxgvgpuchannel { - struct hv_device *hdev; - }; - -+struct dxgsyncobject *dxgsyncobject_create(struct dxgprocess *process, -+ struct dxgdevice *device, -+ struct dxgadapter *adapter, -+ enum -+ d3dddi_synchronizationobject_type -+ type, -+ struct -+ d3dddi_synchronizationobject_flags -+ flags); -+void dxgsyncobject_destroy(struct dxgprocess *process, -+ struct dxgsyncobject *syncobj); -+void dxgsyncobject_stop(struct dxgsyncobject *syncobj); -+void dxgsyncobject_release(struct kref *refcount); -+ - struct dxgglobal { - struct dxgdriver *drvdata; - struct dxgvmbuschannel channel; -@@ -271,6 +336,8 @@ struct dxgadapter { - struct list_head adapter_list_entry; - /* The list of dxgprocess_adapter entries */ - struct list_head adapter_process_list_head; -+ /* List of all non-device dxgsyncobject objects */ -+ struct list_head syncobj_list_head; - /* This lock protects shared resource and syncobject lists */ - struct rw_semaphore shared_resource_list_lock; - struct pci_dev *pci_dev; -@@ -296,6 +363,9 @@ void dxgadapter_release_lock_shared(struct dxgadapter *adapter); - int dxgadapter_acquire_lock_exclusive(struct dxgadapter *adapter); - void dxgadapter_acquire_lock_forced(struct dxgadapter *adapter); - void dxgadapter_release_lock_exclusive(struct dxgadapter *adapter); -+void dxgadapter_add_syncobj(struct dxgadapter *adapter, -+ struct dxgsyncobject *so); -+void dxgadapter_remove_syncobj(struct dxgsyncobject *so); - void dxgadapter_add_process(struct dxgadapter *adapter, - struct dxgprocess_adapter *process_info); - void dxgadapter_remove_process(struct dxgprocess_adapter *process_info); -@@ -325,6 +395,7 @@ struct dxgdevice { - struct list_head resource_list_head; - /* List of paging queues. Protected by process handle table lock. */ - struct list_head pqueue_list_head; -+ struct list_head syncobj_list_head; - struct d3dkmthandle handle; - enum d3dkmt_deviceexecution_state execution_state; - u32 handle_valid; -@@ -345,6 +416,8 @@ void dxgdevice_remove_alloc_safe(struct dxgdevice *dev, - struct dxgallocation *a); - void dxgdevice_add_resource(struct dxgdevice *dev, struct dxgresource *res); - void dxgdevice_remove_resource(struct dxgdevice *dev, struct dxgresource *res); -+void dxgdevice_add_syncobj(struct dxgdevice *dev, struct dxgsyncobject *so); -+void dxgdevice_remove_syncobj(struct dxgsyncobject *so); - bool dxgdevice_is_active(struct dxgdevice *dev); - void dxgdevice_acquire_context_list_lock(struct dxgdevice *dev); - void dxgdevice_release_context_list_lock(struct dxgdevice *dev); -@@ -455,6 +528,7 @@ void dxgallocation_free_handle(struct dxgallocation *a); - long dxgk_compat_ioctl(struct file *f, unsigned int p1, unsigned long p2); - long dxgk_unlocked_ioctl(struct file *f, unsigned int p1, unsigned long p2); - -+int dxg_unmap_iospace(void *va, u32 size); - /* - * The convention is that VNBus instance id is a GUID, but the host sets - * the lower part of the value to the host adapter LUID. The function -@@ -514,6 +588,12 @@ int dxgvmb_send_create_allocation(struct dxgprocess *pr, struct dxgdevice *dev, - int dxgvmb_send_destroy_allocation(struct dxgprocess *pr, struct dxgdevice *dev, - struct d3dkmt_destroyallocation2 *args, - struct d3dkmthandle *alloc_handles); -+int dxgvmb_send_create_sync_object(struct dxgprocess *pr, -+ struct dxgadapter *adapter, -+ struct d3dkmt_createsynchronizationobject2 -+ *args, struct dxgsyncobject *so); -+int dxgvmb_send_destroy_sync_object(struct dxgprocess *pr, -+ struct d3dkmthandle h); - int dxgvmb_send_query_adapter_info(struct dxgprocess *process, - struct dxgadapter *adapter, - struct d3dkmt_queryadapterinfo *args); -diff --git a/drivers/hv/dxgkrnl/dxgmodule.c b/drivers/hv/dxgkrnl/dxgmodule.c -index 111111111111..222222222222 100644 ---- a/drivers/hv/dxgkrnl/dxgmodule.c -+++ b/drivers/hv/dxgkrnl/dxgmodule.c -@@ -162,6 +162,7 @@ int dxgglobal_create_adapter(struct pci_dev *dev, guid_t *guid, - init_rwsem(&adapter->core_lock); - - INIT_LIST_HEAD(&adapter->adapter_process_list_head); -+ INIT_LIST_HEAD(&adapter->syncobj_list_head); - init_rwsem(&adapter->shared_resource_list_lock); - adapter->pci_dev = dev; - guid_to_luid(guid, &adapter->luid); -diff --git a/drivers/hv/dxgkrnl/dxgprocess.c b/drivers/hv/dxgkrnl/dxgprocess.c -index 111111111111..222222222222 100644 ---- a/drivers/hv/dxgkrnl/dxgprocess.c -+++ b/drivers/hv/dxgkrnl/dxgprocess.c -@@ -59,6 +59,7 @@ void dxgprocess_destroy(struct dxgprocess *process) - enum hmgrentry_type t; - struct d3dkmthandle h; - void *o; -+ struct dxgsyncobject *syncobj; - struct dxgprocess_adapter *entry; - struct dxgprocess_adapter *tmp; - -@@ -84,6 +85,21 @@ void dxgprocess_destroy(struct dxgprocess *process) - } - } - -+ i = 0; -+ while (hmgrtable_next_entry(&process->handle_table, &i, &t, &h, &o)) { -+ switch (t) { -+ case HMGRENTRY_TYPE_DXGSYNCOBJECT: -+ DXG_TRACE("Destroy syncobj: %p %d", o, i); -+ syncobj = o; -+ syncobj->handle.v = 0; -+ dxgsyncobject_destroy(process, syncobj); -+ break; -+ default: -+ DXG_ERR("invalid entry in handle table %d", t); -+ break; -+ } -+ } -+ - hmgrtable_destroy(&process->handle_table); - hmgrtable_destroy(&process->local_handle_table); - } -diff --git a/drivers/hv/dxgkrnl/dxgvmbus.c b/drivers/hv/dxgkrnl/dxgvmbus.c -index 111111111111..222222222222 100644 ---- a/drivers/hv/dxgkrnl/dxgvmbus.c -+++ b/drivers/hv/dxgkrnl/dxgvmbus.c -@@ -495,6 +495,88 @@ dxgvmb_send_sync_msg_ntstatus(struct dxgvmbuschannel *channel, - return ret; - } - -+static int check_iospace_address(unsigned long address, u32 size) -+{ -+ struct dxgglobal *dxgglobal = dxggbl(); -+ -+ if (address < dxgglobal->mmiospace_base || -+ size > dxgglobal->mmiospace_size || -+ address >= (dxgglobal->mmiospace_base + -+ dxgglobal->mmiospace_size - size)) { -+ DXG_ERR("invalid iospace address %lx", address); -+ return -EINVAL; -+ } -+ return 0; -+} -+ -+int dxg_unmap_iospace(void *va, u32 size) -+{ -+ int ret = 0; -+ -+ DXG_TRACE("Unmapping io space: %p %x", va, size); -+ -+ /* -+ * When an app calls exit(), dxgkrnl is called to close the device -+ * with current->mm equal to NULL. -+ */ -+ if (current->mm) { -+ ret = vm_munmap((unsigned long)va, size); -+ if (ret) { -+ DXG_ERR("vm_munmap failed %d", ret); -+ return -ENOTRECOVERABLE; -+ } -+ } -+ return 0; -+} -+ -+static u8 *dxg_map_iospace(u64 iospace_address, u32 size, -+ unsigned long protection, bool cached) -+{ -+ struct vm_area_struct *vma; -+ unsigned long va; -+ int ret = 0; -+ -+ DXG_TRACE("Mapping io space: %llx %x %lx", -+ iospace_address, size, protection); -+ if (check_iospace_address(iospace_address, size) < 0) { -+ DXG_ERR("invalid address to map"); -+ return NULL; -+ } -+ -+ va = vm_mmap(NULL, 0, size, protection, MAP_SHARED | MAP_ANONYMOUS, 0); -+ if ((long)va <= 0) { -+ DXG_ERR("vm_mmap failed %lx %d", va, size); -+ return NULL; -+ } -+ -+ mmap_read_lock(current->mm); -+ vma = find_vma(current->mm, (unsigned long)va); -+ if (vma) { -+ pgprot_t prot = vma->vm_page_prot; -+ -+ if (!cached) -+ prot = pgprot_writecombine(prot); -+ DXG_TRACE("vma: %lx %lx %lx", -+ vma->vm_start, vma->vm_end, va); -+ vma->vm_pgoff = iospace_address >> PAGE_SHIFT; -+ ret = io_remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff, -+ size, prot); -+ if (ret) -+ DXG_ERR("io_remap_pfn_range failed: %d", ret); -+ } else { -+ DXG_ERR("failed to find vma: %p %lx", vma, va); -+ ret = -ENOMEM; -+ } -+ mmap_read_unlock(current->mm); -+ -+ if (ret) { -+ dxg_unmap_iospace((void *)va, size); -+ return NULL; -+ } -+ DXG_TRACE("Mapped VA: %lx", va); -+ return (u8 *) va; -+} -+ - /* - * Global messages to the host - */ -@@ -613,6 +695,39 @@ int dxgvmb_send_destroy_process(struct d3dkmthandle process) - return ret; - } - -+int dxgvmb_send_destroy_sync_object(struct dxgprocess *process, -+ struct d3dkmthandle sync_object) -+{ -+ struct dxgkvmb_command_destroysyncobject *command; -+ int ret; -+ struct dxgvmbusmsg msg = {.hdr = NULL}; -+ -+ ret = init_message(&msg, NULL, process, sizeof(*command)); -+ if (ret) -+ return ret; -+ command = (void *)msg.msg; -+ -+ ret = dxgglobal_acquire_channel_lock(); -+ if (ret < 0) -+ goto cleanup; -+ -+ command_vm_to_host_init2(&command->hdr, -+ DXGK_VMBCOMMAND_DESTROYSYNCOBJECT, -+ process->host_handle); -+ command->sync_object = sync_object; -+ -+ ret = dxgvmb_send_sync_msg_ntstatus(dxgglobal_get_dxgvmbuschannel(), -+ msg.hdr, msg.size); -+ -+ dxgglobal_release_channel_lock(); -+ -+cleanup: -+ free_message(&msg, process); -+ if (ret) -+ DXG_TRACE("err: %d", ret); -+ return ret; -+} -+ - /* - * Virtual GPU messages to the host - */ -@@ -1023,7 +1138,11 @@ int create_existing_sysmem(struct dxgdevice *device, - ret = -ENOMEM; - goto cleanup; - } -+#ifdef _MAIN_KERNEL_ - DXG_TRACE("New gpadl %d", dxgalloc->gpadl.gpadl_handle); -+#else -+ DXG_TRACE("New gpadl %d", dxgalloc->gpadl); -+#endif - - command_vgpu_to_host_init2(&set_store_command->hdr, - DXGK_VMBCOMMAND_SETEXISTINGSYSMEMSTORE, -@@ -1501,6 +1620,92 @@ int dxgvmb_send_get_stdalloc_data(struct dxgdevice *device, - return ret; - } - -+static void set_result(struct d3dkmt_createsynchronizationobject2 *args, -+ u64 fence_gpu_va, u8 *va) -+{ -+ args->info.periodic_monitored_fence.fence_gpu_virtual_address = -+ fence_gpu_va; -+ args->info.periodic_monitored_fence.fence_cpu_virtual_address = va; -+} -+ -+int -+dxgvmb_send_create_sync_object(struct dxgprocess *process, -+ struct dxgadapter *adapter, -+ struct d3dkmt_createsynchronizationobject2 *args, -+ struct dxgsyncobject *syncobj) -+{ -+ struct dxgkvmb_command_createsyncobject_return result = { }; -+ struct dxgkvmb_command_createsyncobject *command; -+ int ret; -+ u8 *va = 0; -+ struct dxgvmbusmsg msg = {.hdr = NULL}; -+ -+ ret = init_message(&msg, adapter, process, sizeof(*command)); -+ if (ret) -+ goto cleanup; -+ command = (void *)msg.msg; -+ -+ command_vgpu_to_host_init2(&command->hdr, -+ DXGK_VMBCOMMAND_CREATESYNCOBJECT, -+ process->host_handle); -+ command->args = *args; -+ command->client_hint = 1; /* CLIENTHINT_UMD */ -+ -+ ret = dxgvmb_send_sync_msg(msg.channel, msg.hdr, msg.size, &result, -+ sizeof(result)); -+ if (ret < 0) { -+ DXG_ERR("failed %d", ret); -+ goto cleanup; -+ } -+ args->sync_object = result.sync_object; -+ if (syncobj->shared) { -+ if (result.global_sync_object.v == 0) { -+ DXG_ERR("shared handle is 0"); -+ ret = -EINVAL; -+ goto cleanup; -+ } -+ args->info.shared_handle = result.global_sync_object; -+ } -+ -+ if (syncobj->monitored_fence) { -+ va = dxg_map_iospace(result.fence_storage_address, PAGE_SIZE, -+ PROT_READ | PROT_WRITE, true); -+ if (va == NULL) { -+ ret = -ENOMEM; -+ goto cleanup; -+ } -+ if (args->info.type == _D3DDDI_MONITORED_FENCE) { -+ args->info.monitored_fence.fence_gpu_virtual_address = -+ result.fence_gpu_va; -+ args->info.monitored_fence.fence_cpu_virtual_address = -+ va; -+ { -+ unsigned long value; -+ -+ DXG_TRACE("fence cpu va: %p", va); -+ ret = copy_from_user(&value, va, -+ sizeof(u64)); -+ if (ret) { -+ DXG_ERR("failed to read fence"); -+ ret = -EINVAL; -+ } else { -+ DXG_TRACE("fence value:%lx", -+ value); -+ } -+ } -+ } else { -+ set_result(args, result.fence_gpu_va, va); -+ } -+ syncobj->mapped_address = va; -+ } -+ -+cleanup: -+ free_message(&msg, process); -+ if (ret) -+ DXG_TRACE("err: %d", ret); -+ return ret; -+} -+ - int dxgvmb_send_query_adapter_info(struct dxgprocess *process, - struct dxgadapter *adapter, - struct d3dkmt_queryadapterinfo *args) -diff --git a/drivers/hv/dxgkrnl/dxgvmbus.h b/drivers/hv/dxgkrnl/dxgvmbus.h -index 111111111111..222222222222 100644 ---- a/drivers/hv/dxgkrnl/dxgvmbus.h -+++ b/drivers/hv/dxgkrnl/dxgvmbus.h -@@ -410,4 +410,24 @@ struct dxgkvmb_command_destroycontext { - struct d3dkmthandle context; - }; - -+struct dxgkvmb_command_createsyncobject { -+ struct dxgkvmb_command_vgpu_to_host hdr; -+ struct d3dkmt_createsynchronizationobject2 args; -+ u32 client_hint; -+}; -+ -+struct dxgkvmb_command_createsyncobject_return { -+ struct d3dkmthandle sync_object; -+ struct d3dkmthandle global_sync_object; -+ u64 fence_gpu_va; -+ u64 fence_storage_address; -+ u32 fence_storage_offset; -+}; -+ -+/* The command returns ntstatus */ -+struct dxgkvmb_command_destroysyncobject { -+ struct dxgkvmb_command_vm_to_host hdr; -+ struct d3dkmthandle sync_object; -+}; -+ - #endif /* _DXGVMBUS_H */ -diff --git a/drivers/hv/dxgkrnl/ioctl.c b/drivers/hv/dxgkrnl/ioctl.c -index 111111111111..222222222222 100644 ---- a/drivers/hv/dxgkrnl/ioctl.c -+++ b/drivers/hv/dxgkrnl/ioctl.c -@@ -1341,6 +1341,132 @@ dxgkio_destroy_allocation(struct dxgprocess *process, void *__user inargs) - return ret; - } - -+static int -+dxgkio_create_sync_object(struct dxgprocess *process, void *__user inargs) -+{ -+ int ret; -+ struct d3dkmt_createsynchronizationobject2 args; -+ struct dxgdevice *device = NULL; -+ struct dxgadapter *adapter = NULL; -+ struct dxgsyncobject *syncobj = NULL; -+ bool device_lock_acquired = false; -+ -+ ret = copy_from_user(&args, inargs, sizeof(args)); -+ if (ret) { -+ DXG_ERR("failed to copy input args"); -+ ret = -EINVAL; -+ goto cleanup; -+ } -+ -+ device = dxgprocess_device_by_handle(process, args.device); -+ if (device == NULL) { -+ ret = -EINVAL; -+ goto cleanup; -+ } -+ -+ ret = dxgdevice_acquire_lock_shared(device); -+ if (ret < 0) -+ goto cleanup; -+ -+ device_lock_acquired = true; -+ -+ adapter = device->adapter; -+ ret = dxgadapter_acquire_lock_shared(adapter); -+ if (ret < 0) { -+ adapter = NULL; -+ goto cleanup; -+ } -+ -+ syncobj = dxgsyncobject_create(process, device, adapter, args.info.type, -+ args.info.flags); -+ if (syncobj == NULL) { -+ ret = -EINVAL; -+ goto cleanup; -+ } -+ -+ ret = dxgvmb_send_create_sync_object(process, adapter, &args, syncobj); -+ if (ret < 0) -+ goto cleanup; -+ -+ ret = copy_to_user(inargs, &args, sizeof(args)); -+ if (ret) { -+ DXG_ERR("failed to copy output args"); -+ ret = -EINVAL; -+ goto cleanup; -+ } -+ -+ hmgrtable_lock(&process->handle_table, DXGLOCK_EXCL); -+ ret = hmgrtable_assign_handle(&process->handle_table, syncobj, -+ HMGRENTRY_TYPE_DXGSYNCOBJECT, -+ args.sync_object); -+ if (ret >= 0) -+ syncobj->handle = args.sync_object; -+ hmgrtable_unlock(&process->handle_table, DXGLOCK_EXCL); -+ -+cleanup: -+ -+ if (ret < 0) { -+ if (syncobj) { -+ dxgsyncobject_destroy(process, syncobj); -+ if (args.sync_object.v) -+ dxgvmb_send_destroy_sync_object(process, -+ args.sync_object); -+ } -+ } -+ if (adapter) -+ dxgadapter_release_lock_shared(adapter); -+ if (device_lock_acquired) -+ dxgdevice_release_lock_shared(device); -+ if (device) -+ kref_put(&device->device_kref, dxgdevice_release); -+ -+ DXG_TRACE("ioctl:%s %d", errorstr(ret), ret); -+ return ret; -+} -+ -+static int -+dxgkio_destroy_sync_object(struct dxgprocess *process, void *__user inargs) -+{ -+ struct d3dkmt_destroysynchronizationobject args; -+ struct dxgsyncobject *syncobj = NULL; -+ int ret; -+ -+ ret = copy_from_user(&args, inargs, sizeof(args)); -+ if (ret) { -+ DXG_ERR("failed to copy input args"); -+ ret = -EINVAL; -+ goto cleanup; -+ } -+ -+ DXG_TRACE("handle 0x%x", args.sync_object.v); -+ hmgrtable_lock(&process->handle_table, DXGLOCK_EXCL); -+ syncobj = hmgrtable_get_object_by_type(&process->handle_table, -+ HMGRENTRY_TYPE_DXGSYNCOBJECT, -+ args.sync_object); -+ if (syncobj) { -+ DXG_TRACE("syncobj 0x%p", syncobj); -+ syncobj->handle.v = 0; -+ hmgrtable_free_handle(&process->handle_table, -+ HMGRENTRY_TYPE_DXGSYNCOBJECT, -+ args.sync_object); -+ } -+ hmgrtable_unlock(&process->handle_table, DXGLOCK_EXCL); -+ -+ if (syncobj == NULL) { -+ ret = -EINVAL; -+ goto cleanup; -+ } -+ -+ dxgsyncobject_destroy(process, syncobj); -+ -+ ret = dxgvmb_send_destroy_sync_object(process, args.sync_object); -+ -+cleanup: -+ -+ DXG_TRACE("ioctl:%s %d", errorstr(ret), ret); -+ return ret; -+} -+ - static struct ioctl_desc ioctls[] = { - /* 0x00 */ {}, - /* 0x01 */ {dxgkio_open_adapter_from_luid, LX_DXOPENADAPTERFROMLUID}, -@@ -1358,7 +1484,7 @@ static struct ioctl_desc ioctls[] = { - /* 0x0d */ {}, - /* 0x0e */ {}, - /* 0x0f */ {}, --/* 0x10 */ {}, -+/* 0x10 */ {dxgkio_create_sync_object, LX_DXCREATESYNCHRONIZATIONOBJECT}, - /* 0x11 */ {}, - /* 0x12 */ {}, - /* 0x13 */ {dxgkio_destroy_allocation, LX_DXDESTROYALLOCATION2}, -@@ -1371,7 +1497,7 @@ static struct ioctl_desc ioctls[] = { - /* 0x1a */ {}, - /* 0x1b */ {}, - /* 0x1c */ {}, --/* 0x1d */ {}, -+/* 0x1d */ {dxgkio_destroy_sync_object, LX_DXDESTROYSYNCHRONIZATIONOBJECT}, - /* 0x1e */ {}, - /* 0x1f */ {}, - /* 0x20 */ {}, -diff --git a/include/uapi/misc/d3dkmthk.h b/include/uapi/misc/d3dkmthk.h -index 111111111111..222222222222 100644 ---- a/include/uapi/misc/d3dkmthk.h -+++ b/include/uapi/misc/d3dkmthk.h -@@ -256,6 +256,97 @@ enum d3dkmdt_standardallocationtype { - _D3DKMDT_STANDARDALLOCATION_GDISURFACE = 4, - }; - -+struct d3dddi_synchronizationobject_flags { -+ union { -+ struct { -+ __u32 shared:1; -+ __u32 nt_security_sharing:1; -+ __u32 cross_adapter:1; -+ __u32 top_of_pipeline:1; -+ __u32 no_signal:1; -+ __u32 no_wait:1; -+ __u32 no_signal_max_value_on_tdr:1; -+ __u32 no_gpu_access:1; -+ __u32 reserved:23; -+ }; -+ __u32 value; -+ }; -+}; -+ -+enum d3dddi_synchronizationobject_type { -+ _D3DDDI_SYNCHRONIZATION_MUTEX = 1, -+ _D3DDDI_SEMAPHORE = 2, -+ _D3DDDI_FENCE = 3, -+ _D3DDDI_CPU_NOTIFICATION = 4, -+ _D3DDDI_MONITORED_FENCE = 5, -+ _D3DDDI_PERIODIC_MONITORED_FENCE = 6, -+ _D3DDDI_SYNCHRONIZATION_TYPE_LIMIT -+}; -+ -+struct d3dddi_synchronizationobjectinfo2 { -+ enum d3dddi_synchronizationobject_type type; -+ struct d3dddi_synchronizationobject_flags flags; -+ union { -+ struct { -+ __u32 initial_state; -+ } synchronization_mutex; -+ -+ struct { -+ __u32 max_count; -+ __u32 initial_count; -+ } semaphore; -+ -+ struct { -+ __u64 fence_value; -+ } fence; -+ -+ struct { -+ __u64 event; -+ } cpu_notification; -+ -+ struct { -+ __u64 initial_fence_value; -+#ifdef __KERNEL__ -+ void *fence_cpu_virtual_address; -+#else -+ __u64 *fence_cpu_virtual_address; -+#endif -+ __u64 fence_gpu_virtual_address; -+ __u32 engine_affinity; -+ } monitored_fence; -+ -+ struct { -+ struct d3dkmthandle adapter; -+ __u32 vidpn_target_id; -+ __u64 time; -+#ifdef __KERNEL__ -+ void *fence_cpu_virtual_address; -+#else -+ __u64 fence_cpu_virtual_address; -+#endif -+ __u64 fence_gpu_virtual_address; -+ __u32 engine_affinity; -+ } periodic_monitored_fence; -+ -+ struct { -+ __u64 reserved[8]; -+ } reserved; -+ }; -+ struct d3dkmthandle shared_handle; -+}; -+ -+struct d3dkmt_createsynchronizationobject2 { -+ struct d3dkmthandle device; -+ __u32 reserved; -+ struct d3dddi_synchronizationobjectinfo2 info; -+ struct d3dkmthandle sync_object; -+ __u32 reserved1; -+}; -+ -+struct d3dkmt_destroysynchronizationobject { -+ struct d3dkmthandle sync_object; -+}; -+ - enum d3dkmt_standardallocationtype { - _D3DKMT_STANDARDALLOCATIONTYPE_EXISTINGHEAP = 1, - _D3DKMT_STANDARDALLOCATIONTYPE_CROSSADAPTER = 2, -@@ -483,6 +574,8 @@ struct d3dkmt_enumadapters3 { - _IOWR(0x47, 0x06, struct d3dkmt_createallocation) - #define LX_DXQUERYADAPTERINFO \ - _IOWR(0x47, 0x09, struct d3dkmt_queryadapterinfo) -+#define LX_DXCREATESYNCHRONIZATIONOBJECT \ -+ _IOWR(0x47, 0x10, struct d3dkmt_createsynchronizationobject2) - #define LX_DXDESTROYALLOCATION2 \ - _IOWR(0x47, 0x13, struct d3dkmt_destroyallocation2) - #define LX_DXENUMADAPTERS2 \ -@@ -491,6 +584,8 @@ struct d3dkmt_enumadapters3 { - _IOWR(0x47, 0x15, struct d3dkmt_closeadapter) - #define LX_DXDESTROYDEVICE \ - _IOWR(0x47, 0x19, struct d3dkmt_destroydevice) -+#define LX_DXDESTROYSYNCHRONIZATIONOBJECT \ -+ _IOWR(0x47, 0x1d, struct d3dkmt_destroysynchronizationobject) - #define LX_DXENUMADAPTERS3 \ - _IOWR(0x47, 0x3e, struct d3dkmt_enumadapters3) - --- -Armbian - diff --git a/patch/kernel/archive/wsl2-arm64-6.6/1677-drivers-hv-dxgkrnl-Operations-using-sync-objects.patch b/patch/kernel/archive/wsl2-arm64-6.6/1677-drivers-hv-dxgkrnl-Operations-using-sync-objects.patch deleted file mode 100644 index 4ea7e161dae8..000000000000 --- a/patch/kernel/archive/wsl2-arm64-6.6/1677-drivers-hv-dxgkrnl-Operations-using-sync-objects.patch +++ /dev/null @@ -1,1689 +0,0 @@ -From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001 -From: Iouri Tarassov -Date: Tue, 1 Feb 2022 13:59:23 -0800 -Subject: drivers: hv: dxgkrnl: Operations using sync objects - -Implement ioctls to submit operations with compute device -sync objects: - - the LX_DXSIGNALSYNCHRONIZATIONOBJECT ioctl. - The ioctl is used to submit a signal to a sync object. - - the LX_DXWAITFORSYNCHRONIZATIONOBJECT ioctl. - The ioctl is used to submit a wait for a sync object - - the LX_DXSIGNALSYNCHRONIZATIONOBJECTFROMCPU ioctl - The ioctl is used to signal to a monitored fence sync object - from a CPU thread. - - the LX_DXSIGNALSYNCHRONIZATIONOBJECTFROMGPU ioctl. - The ioctl is used to submit a signal to a monitored fence - sync object.. - - the LX_DXSIGNALSYNCHRONIZATIONOBJECTFROMGPU2 ioctl. - The ioctl is used to submit a signal to a monitored fence - sync object. - - the LX_DXWAITFORSYNCHRONIZATIONOBJECTFROMGPU ioctl. - The ioctl is used to submit a wait for a monitored fence - sync object. - -Compute device synchronization objects are used to synchronize -execution of DMA buffers between different execution contexts. -Operations with sync objects include "signal" and "wait". A wait -for a sync object is satisfied when the sync object is signaled. - -A signal operation could be submitted to a compute device context or -the sync object could be signaled by a CPU thread. - -To improve performance, submitting operations to the host is done -asynchronously when the host supports it. - -Signed-off-by: Iouri Tarassov -[kms: Forward port to v6.1] -Signed-off-by: Kelsey Steele ---- - drivers/hv/dxgkrnl/dxgadapter.c | 38 +- - drivers/hv/dxgkrnl/dxgkrnl.h | 62 + - drivers/hv/dxgkrnl/dxgmodule.c | 102 +- - drivers/hv/dxgkrnl/dxgvmbus.c | 219 ++- - drivers/hv/dxgkrnl/dxgvmbus.h | 48 + - drivers/hv/dxgkrnl/ioctl.c | 702 +++++++++- - drivers/hv/dxgkrnl/misc.h | 2 + - include/uapi/misc/d3dkmthk.h | 159 +++ - 8 files changed, 1311 insertions(+), 21 deletions(-) - -diff --git a/drivers/hv/dxgkrnl/dxgadapter.c b/drivers/hv/dxgkrnl/dxgadapter.c -index 111111111111..222222222222 100644 ---- a/drivers/hv/dxgkrnl/dxgadapter.c -+++ b/drivers/hv/dxgkrnl/dxgadapter.c -@@ -249,7 +249,7 @@ void dxgdevice_stop(struct dxgdevice *device) - struct dxgallocation *alloc; - struct dxgsyncobject *syncobj; - -- DXG_TRACE("Destroying device: %p", device); -+ DXG_TRACE("Stopping device: %p", device); - dxgdevice_acquire_alloc_list_lock(device); - list_for_each_entry(alloc, &device->alloc_list_head, alloc_list_entry) { - dxgallocation_stop(alloc); -@@ -743,15 +743,13 @@ void dxgallocation_destroy(struct dxgallocation *alloc) - } - #ifdef _MAIN_KERNEL_ - if (alloc->gpadl.gpadl_handle) { -- DXG_TRACE("Teardown gpadl %d", -- alloc->gpadl.gpadl_handle); -+ DXG_TRACE("Teardown gpadl %d", alloc->gpadl.gpadl_handle); - vmbus_teardown_gpadl(dxgglobal_get_vmbus(), &alloc->gpadl); - alloc->gpadl.gpadl_handle = 0; - } - else - if (alloc->gpadl) { -- DXG_TRACE("Teardown gpadl %d", -- alloc->gpadl); -+ DXG_TRACE("Teardown gpadl %d", alloc->gpadl); - vmbus_teardown_gpadl(dxgglobal_get_vmbus(), alloc->gpadl); - alloc->gpadl = 0; - } -@@ -901,6 +899,13 @@ struct dxgsyncobject *dxgsyncobject_create(struct dxgprocess *process, - case _D3DDDI_PERIODIC_MONITORED_FENCE: - syncobj->monitored_fence = 1; - break; -+ case _D3DDDI_CPU_NOTIFICATION: -+ syncobj->cpu_event = 1; -+ syncobj->host_event = kzalloc(sizeof(*syncobj->host_event), -+ GFP_KERNEL); -+ if (syncobj->host_event == NULL) -+ goto cleanup; -+ break; - default: - break; - } -@@ -928,6 +933,8 @@ struct dxgsyncobject *dxgsyncobject_create(struct dxgprocess *process, - DXG_TRACE("Syncobj created: %p", syncobj); - return syncobj; - cleanup: -+ if (syncobj->host_event) -+ kfree(syncobj->host_event); - if (syncobj) - kfree(syncobj); - return NULL; -@@ -937,6 +944,7 @@ void dxgsyncobject_destroy(struct dxgprocess *process, - struct dxgsyncobject *syncobj) - { - int destroyed; -+ struct dxghosteventcpu *host_event; - - DXG_TRACE("Destroying syncobj: %p", syncobj); - -@@ -955,6 +963,16 @@ void dxgsyncobject_destroy(struct dxgprocess *process, - } - hmgrtable_unlock(&process->handle_table, DXGLOCK_EXCL); - -+ if (syncobj->cpu_event) { -+ host_event = syncobj->host_event; -+ if (host_event->cpu_event) { -+ eventfd_ctx_put(host_event->cpu_event); -+ if (host_event->hdr.event_id) -+ dxgglobal_remove_host_event( -+ &host_event->hdr); -+ host_event->cpu_event = NULL; -+ } -+ } - if (syncobj->monitored_fence) - dxgdevice_remove_syncobj(syncobj); - else -@@ -971,16 +989,14 @@ void dxgsyncobject_destroy(struct dxgprocess *process, - void dxgsyncobject_stop(struct dxgsyncobject *syncobj) - { - int stopped = test_and_set_bit(1, &syncobj->flags); -+ int ret; - - if (!stopped) { - DXG_TRACE("Stopping syncobj"); - if (syncobj->monitored_fence) { - if (syncobj->mapped_address) { -- int ret = -- dxg_unmap_iospace(syncobj->mapped_address, -- PAGE_SIZE); -- -- (void)ret; -+ ret = dxg_unmap_iospace(syncobj->mapped_address, -+ PAGE_SIZE); - DXG_TRACE("unmap fence %d %p", - ret, syncobj->mapped_address); - syncobj->mapped_address = NULL; -@@ -994,5 +1010,7 @@ void dxgsyncobject_release(struct kref *refcount) - struct dxgsyncobject *syncobj; - - syncobj = container_of(refcount, struct dxgsyncobject, syncobj_kref); -+ if (syncobj->host_event) -+ kfree(syncobj->host_event); - kfree(syncobj); - } -diff --git a/drivers/hv/dxgkrnl/dxgkrnl.h b/drivers/hv/dxgkrnl/dxgkrnl.h -index 111111111111..222222222222 100644 ---- a/drivers/hv/dxgkrnl/dxgkrnl.h -+++ b/drivers/hv/dxgkrnl/dxgkrnl.h -@@ -101,6 +101,29 @@ int dxgvmbuschannel_init(struct dxgvmbuschannel *ch, struct hv_device *hdev); - void dxgvmbuschannel_destroy(struct dxgvmbuschannel *ch); - void dxgvmbuschannel_receive(void *ctx); - -+/* -+ * The structure describes an event, which will be signaled by -+ * a message from host. -+ */ -+enum dxghosteventtype { -+ dxghostevent_cpu_event = 1, -+}; -+ -+struct dxghostevent { -+ struct list_head host_event_list_entry; -+ u64 event_id; -+ enum dxghosteventtype event_type; -+}; -+ -+struct dxghosteventcpu { -+ struct dxghostevent hdr; -+ struct dxgprocess *process; -+ struct eventfd_ctx *cpu_event; -+ struct completion *completion_event; -+ bool destroy_after_signal; -+ bool remove_from_list; -+}; -+ - /* - * This is GPU synchronization object, which is used to synchronize execution - * between GPU contextx/hardware queues or for tracking GPU execution progress. -@@ -130,6 +153,8 @@ struct dxgsyncobject { - */ - struct dxgdevice *device; - struct dxgprocess *process; -+ /* Used by D3DDDI_CPU_NOTIFICATION objects */ -+ struct dxghosteventcpu *host_event; - /* CPU virtual address of the fence value for "device" syncobjects */ - void *mapped_address; - /* Handle in the process handle table */ -@@ -144,6 +169,7 @@ struct dxgsyncobject { - u32 stopped:1; - /* device syncobject */ - u32 monitored_fence:1; -+ u32 cpu_event:1; - u32 shared:1; - u32 reserved:27; - }; -@@ -206,6 +232,11 @@ struct dxgglobal { - /* protects the dxgprocess_adapter lists */ - struct mutex process_adapter_mutex; - -+ /* list of events, waiting to be signaled by the host */ -+ struct list_head host_event_list_head; -+ spinlock_t host_event_list_mutex; -+ atomic64_t host_event_id; -+ - bool global_channel_initialized; - bool async_msg_enabled; - bool misc_registered; -@@ -228,6 +259,11 @@ struct vmbus_channel *dxgglobal_get_vmbus(void); - struct dxgvmbuschannel *dxgglobal_get_dxgvmbuschannel(void); - void dxgglobal_acquire_process_adapter_lock(void); - void dxgglobal_release_process_adapter_lock(void); -+void dxgglobal_add_host_event(struct dxghostevent *hostevent); -+void dxgglobal_remove_host_event(struct dxghostevent *hostevent); -+u64 dxgglobal_new_host_event_id(void); -+void dxgglobal_signal_host_event(u64 event_id); -+struct dxghostevent *dxgglobal_get_host_event(u64 event_id); - int dxgglobal_acquire_channel_lock(void); - void dxgglobal_release_channel_lock(void); - -@@ -594,6 +630,31 @@ int dxgvmb_send_create_sync_object(struct dxgprocess *pr, - *args, struct dxgsyncobject *so); - int dxgvmb_send_destroy_sync_object(struct dxgprocess *pr, - struct d3dkmthandle h); -+int dxgvmb_send_signal_sync_object(struct dxgprocess *process, -+ struct dxgadapter *adapter, -+ struct d3dddicb_signalflags flags, -+ u64 legacy_fence_value, -+ struct d3dkmthandle context, -+ u32 object_count, -+ struct d3dkmthandle *object, -+ u32 context_count, -+ struct d3dkmthandle *contexts, -+ u32 fence_count, u64 *fences, -+ struct eventfd_ctx *cpu_event, -+ struct d3dkmthandle device); -+int dxgvmb_send_wait_sync_object_gpu(struct dxgprocess *process, -+ struct dxgadapter *adapter, -+ struct d3dkmthandle context, -+ u32 object_count, -+ struct d3dkmthandle *objects, -+ u64 *fences, -+ bool legacy_fence); -+int dxgvmb_send_wait_sync_object_cpu(struct dxgprocess *process, -+ struct dxgadapter *adapter, -+ struct -+ d3dkmt_waitforsynchronizationobjectfromcpu -+ *args, -+ u64 cpu_event); - int dxgvmb_send_query_adapter_info(struct dxgprocess *process, - struct dxgadapter *adapter, - struct d3dkmt_queryadapterinfo *args); -@@ -609,6 +670,7 @@ int dxgvmb_send_async_msg(struct dxgvmbuschannel *channel, - void *command, - u32 cmd_size); - -+void signal_host_cpu_event(struct dxghostevent *eventhdr); - int ntstatus2int(struct ntstatus status); - - #ifdef DEBUG -diff --git a/drivers/hv/dxgkrnl/dxgmodule.c b/drivers/hv/dxgkrnl/dxgmodule.c -index 111111111111..222222222222 100644 ---- a/drivers/hv/dxgkrnl/dxgmodule.c -+++ b/drivers/hv/dxgkrnl/dxgmodule.c -@@ -123,6 +123,102 @@ static struct dxgadapter *find_adapter(struct winluid *luid) - return adapter; - } - -+void dxgglobal_add_host_event(struct dxghostevent *event) -+{ -+ struct dxgglobal *dxgglobal = dxggbl(); -+ -+ spin_lock_irq(&dxgglobal->host_event_list_mutex); -+ list_add_tail(&event->host_event_list_entry, -+ &dxgglobal->host_event_list_head); -+ spin_unlock_irq(&dxgglobal->host_event_list_mutex); -+} -+ -+void dxgglobal_remove_host_event(struct dxghostevent *event) -+{ -+ struct dxgglobal *dxgglobal = dxggbl(); -+ -+ spin_lock_irq(&dxgglobal->host_event_list_mutex); -+ if (event->host_event_list_entry.next != NULL) { -+ list_del(&event->host_event_list_entry); -+ event->host_event_list_entry.next = NULL; -+ } -+ spin_unlock_irq(&dxgglobal->host_event_list_mutex); -+} -+ -+void signal_host_cpu_event(struct dxghostevent *eventhdr) -+{ -+ struct dxghosteventcpu *event = (struct dxghosteventcpu *)eventhdr; -+ -+ if (event->remove_from_list || -+ event->destroy_after_signal) { -+ list_del(&eventhdr->host_event_list_entry); -+ eventhdr->host_event_list_entry.next = NULL; -+ } -+ if (event->cpu_event) { -+ DXG_TRACE("signal cpu event"); -+ eventfd_signal(event->cpu_event, 1); -+ if (event->destroy_after_signal) -+ eventfd_ctx_put(event->cpu_event); -+ } else { -+ DXG_TRACE("signal completion"); -+ complete(event->completion_event); -+ } -+ if (event->destroy_after_signal) { -+ DXG_TRACE("destroying event %p", event); -+ kfree(event); -+ } -+} -+ -+void dxgglobal_signal_host_event(u64 event_id) -+{ -+ struct dxghostevent *event; -+ unsigned long flags; -+ struct dxgglobal *dxgglobal = dxggbl(); -+ -+ DXG_TRACE("Signaling host event %lld", event_id); -+ -+ spin_lock_irqsave(&dxgglobal->host_event_list_mutex, flags); -+ list_for_each_entry(event, &dxgglobal->host_event_list_head, -+ host_event_list_entry) { -+ if (event->event_id == event_id) { -+ DXG_TRACE("found event to signal"); -+ if (event->event_type == dxghostevent_cpu_event) -+ signal_host_cpu_event(event); -+ else -+ DXG_ERR("Unknown host event type"); -+ break; -+ } -+ } -+ spin_unlock_irqrestore(&dxgglobal->host_event_list_mutex, flags); -+} -+ -+struct dxghostevent *dxgglobal_get_host_event(u64 event_id) -+{ -+ struct dxghostevent *entry; -+ struct dxghostevent *event = NULL; -+ struct dxgglobal *dxgglobal = dxggbl(); -+ -+ spin_lock_irq(&dxgglobal->host_event_list_mutex); -+ list_for_each_entry(entry, &dxgglobal->host_event_list_head, -+ host_event_list_entry) { -+ if (entry->event_id == event_id) { -+ list_del(&entry->host_event_list_entry); -+ entry->host_event_list_entry.next = NULL; -+ event = entry; -+ break; -+ } -+ } -+ spin_unlock_irq(&dxgglobal->host_event_list_mutex); -+ return event; -+} -+ -+u64 dxgglobal_new_host_event_id(void) -+{ -+ struct dxgglobal *dxgglobal = dxggbl(); -+ -+ return atomic64_inc_return(&dxgglobal->host_event_id); -+} -+ - void dxgglobal_acquire_process_adapter_lock(void) - { - struct dxgglobal *dxgglobal = dxggbl(); -@@ -720,12 +816,16 @@ static struct dxgglobal *dxgglobal_create(void) - INIT_LIST_HEAD(&dxgglobal->vgpu_ch_list_head); - INIT_LIST_HEAD(&dxgglobal->adapter_list_head); - init_rwsem(&dxgglobal->adapter_list_lock); -- - init_rwsem(&dxgglobal->channel_lock); - -+ INIT_LIST_HEAD(&dxgglobal->host_event_list_head); -+ spin_lock_init(&dxgglobal->host_event_list_mutex); -+ atomic64_set(&dxgglobal->host_event_id, 1); -+ - #ifdef DEBUG - dxgk_validate_ioctls(); - #endif -+ - return dxgglobal; - } - -diff --git a/drivers/hv/dxgkrnl/dxgvmbus.c b/drivers/hv/dxgkrnl/dxgvmbus.c -index 111111111111..222222222222 100644 ---- a/drivers/hv/dxgkrnl/dxgvmbus.c -+++ b/drivers/hv/dxgkrnl/dxgvmbus.c -@@ -281,6 +281,22 @@ static void command_vm_to_host_init1(struct dxgkvmb_command_vm_to_host *command, - command->channel_type = DXGKVMB_VM_TO_HOST; - } - -+static void signal_guest_event(struct dxgkvmb_command_host_to_vm *packet, -+ u32 packet_length) -+{ -+ struct dxgkvmb_command_signalguestevent *command = (void *)packet; -+ -+ if (packet_length < sizeof(struct dxgkvmb_command_signalguestevent)) { -+ DXG_ERR("invalid signal guest event packet size"); -+ return; -+ } -+ if (command->event == 0) { -+ DXG_ERR("invalid event pointer"); -+ return; -+ } -+ dxgglobal_signal_host_event(command->event); -+} -+ - static void process_inband_packet(struct dxgvmbuschannel *channel, - struct vmpacket_descriptor *desc) - { -@@ -297,6 +313,7 @@ static void process_inband_packet(struct dxgvmbuschannel *channel, - switch (packet->command_type) { - case DXGK_VMBCOMMAND_SIGNALGUESTEVENT: - case DXGK_VMBCOMMAND_SIGNALGUESTEVENTPASSIVE: -+ signal_guest_event(packet, packet_length); - break; - case DXGK_VMBCOMMAND_SENDWNFNOTIFICATION: - break; -@@ -959,7 +976,7 @@ dxgvmb_send_create_context(struct dxgadapter *adapter, - command->priv_drv_data, - args->priv_drv_data_size); - if (ret) { -- dev_err(DXGDEV, -+ DXG_ERR( - "Faled to copy private data to user"); - ret = -EINVAL; - dxgvmb_send_destroy_context(adapter, process, -@@ -1706,6 +1723,206 @@ dxgvmb_send_create_sync_object(struct dxgprocess *process, - return ret; - } - -+int dxgvmb_send_signal_sync_object(struct dxgprocess *process, -+ struct dxgadapter *adapter, -+ struct d3dddicb_signalflags flags, -+ u64 legacy_fence_value, -+ struct d3dkmthandle context, -+ u32 object_count, -+ struct d3dkmthandle __user *objects, -+ u32 context_count, -+ struct d3dkmthandle __user *contexts, -+ u32 fence_count, -+ u64 __user *fences, -+ struct eventfd_ctx *cpu_event_handle, -+ struct d3dkmthandle device) -+{ -+ int ret; -+ struct dxgkvmb_command_signalsyncobject *command; -+ u32 object_size = object_count * sizeof(struct d3dkmthandle); -+ u32 context_size = context_count * sizeof(struct d3dkmthandle); -+ u32 fence_size = fences ? fence_count * sizeof(u64) : 0; -+ u8 *current_pos; -+ u32 cmd_size = sizeof(struct dxgkvmb_command_signalsyncobject) + -+ object_size + context_size + fence_size; -+ struct dxgvmbusmsg msg = {.hdr = NULL}; -+ struct dxgglobal *dxgglobal = dxggbl(); -+ -+ if (context.v) -+ cmd_size += sizeof(struct d3dkmthandle); -+ -+ ret = init_message(&msg, adapter, process, cmd_size); -+ if (ret) -+ goto cleanup; -+ command = (void *)msg.msg; -+ -+ command_vgpu_to_host_init2(&command->hdr, -+ DXGK_VMBCOMMAND_SIGNALSYNCOBJECT, -+ process->host_handle); -+ -+ if (flags.enqueue_cpu_event) -+ command->cpu_event_handle = (u64) cpu_event_handle; -+ else -+ command->device = device; -+ command->flags = flags; -+ command->fence_value = legacy_fence_value; -+ command->object_count = object_count; -+ command->context_count = context_count; -+ current_pos = (u8 *) &command[1]; -+ ret = copy_from_user(current_pos, objects, object_size); -+ if (ret) { -+ DXG_ERR("Failed to read objects %p %d", -+ objects, object_size); -+ ret = -EINVAL; -+ goto cleanup; -+ } -+ current_pos += object_size; -+ if (context.v) { -+ command->context_count++; -+ *(struct d3dkmthandle *) current_pos = context; -+ current_pos += sizeof(struct d3dkmthandle); -+ } -+ if (context_size) { -+ ret = copy_from_user(current_pos, contexts, context_size); -+ if (ret) { -+ DXG_ERR("Failed to read contexts %p %d", -+ contexts, context_size); -+ ret = -EINVAL; -+ goto cleanup; -+ } -+ current_pos += context_size; -+ } -+ if (fence_size) { -+ ret = copy_from_user(current_pos, fences, fence_size); -+ if (ret) { -+ DXG_ERR("Failed to read fences %p %d", -+ fences, fence_size); -+ ret = -EINVAL; -+ goto cleanup; -+ } -+ } -+ -+ if (dxgglobal->async_msg_enabled) { -+ command->hdr.async_msg = 1; -+ ret = dxgvmb_send_async_msg(msg.channel, msg.hdr, msg.size); -+ } else { -+ ret = dxgvmb_send_sync_msg_ntstatus(msg.channel, msg.hdr, -+ msg.size); -+ } -+ -+cleanup: -+ free_message(&msg, process); -+ if (ret) -+ DXG_TRACE("err: %d", ret); -+ return ret; -+} -+ -+int dxgvmb_send_wait_sync_object_cpu(struct dxgprocess *process, -+ struct dxgadapter *adapter, -+ struct -+ d3dkmt_waitforsynchronizationobjectfromcpu -+ *args, -+ u64 cpu_event) -+{ -+ int ret = -EINVAL; -+ struct dxgkvmb_command_waitforsyncobjectfromcpu *command; -+ u32 object_size = args->object_count * sizeof(struct d3dkmthandle); -+ u32 fence_size = args->object_count * sizeof(u64); -+ u8 *current_pos; -+ u32 cmd_size = sizeof(*command) + object_size + fence_size; -+ struct dxgvmbusmsg msg = {.hdr = NULL}; -+ -+ ret = init_message(&msg, adapter, process, cmd_size); -+ if (ret) -+ goto cleanup; -+ command = (void *)msg.msg; -+ -+ command_vgpu_to_host_init2(&command->hdr, -+ DXGK_VMBCOMMAND_WAITFORSYNCOBJECTFROMCPU, -+ process->host_handle); -+ command->device = args->device; -+ command->flags = args->flags; -+ command->object_count = args->object_count; -+ command->guest_event_pointer = (u64) cpu_event; -+ current_pos = (u8 *) &command[1]; -+ -+ ret = copy_from_user(current_pos, args->objects, object_size); -+ if (ret) { -+ DXG_ERR("failed to copy objects"); -+ ret = -EINVAL; -+ goto cleanup; -+ } -+ current_pos += object_size; -+ ret = copy_from_user(current_pos, args->fence_values, -+ fence_size); -+ if (ret) { -+ DXG_ERR("failed to copy fences"); -+ ret = -EINVAL; -+ goto cleanup; -+ } -+ -+ ret = dxgvmb_send_sync_msg_ntstatus(msg.channel, msg.hdr, msg.size); -+ -+cleanup: -+ free_message(&msg, process); -+ if (ret) -+ DXG_TRACE("err: %d", ret); -+ return ret; -+} -+ -+int dxgvmb_send_wait_sync_object_gpu(struct dxgprocess *process, -+ struct dxgadapter *adapter, -+ struct d3dkmthandle context, -+ u32 object_count, -+ struct d3dkmthandle *objects, -+ u64 *fences, -+ bool legacy_fence) -+{ -+ int ret; -+ struct dxgkvmb_command_waitforsyncobjectfromgpu *command; -+ u32 fence_size = object_count * sizeof(u64); -+ u32 object_size = object_count * sizeof(struct d3dkmthandle); -+ u8 *current_pos; -+ u32 cmd_size = object_size + fence_size - sizeof(u64) + -+ sizeof(struct dxgkvmb_command_waitforsyncobjectfromgpu); -+ struct dxgvmbusmsg msg = {.hdr = NULL}; -+ struct dxgglobal *dxgglobal = dxggbl(); -+ -+ if (object_count == 0 || object_count > D3DDDI_MAX_OBJECT_WAITED_ON) { -+ ret = -EINVAL; -+ goto cleanup; -+ } -+ ret = init_message(&msg, adapter, process, cmd_size); -+ if (ret) -+ goto cleanup; -+ command = (void *)msg.msg; -+ -+ command_vgpu_to_host_init2(&command->hdr, -+ DXGK_VMBCOMMAND_WAITFORSYNCOBJECTFROMGPU, -+ process->host_handle); -+ command->context = context; -+ command->object_count = object_count; -+ command->legacy_fence_object = legacy_fence; -+ current_pos = (u8 *) command->fence_values; -+ memcpy(current_pos, fences, fence_size); -+ current_pos += fence_size; -+ memcpy(current_pos, objects, object_size); -+ -+ if (dxgglobal->async_msg_enabled) { -+ command->hdr.async_msg = 1; -+ ret = dxgvmb_send_async_msg(msg.channel, msg.hdr, msg.size); -+ } else { -+ ret = dxgvmb_send_sync_msg_ntstatus(msg.channel, msg.hdr, -+ msg.size); -+ } -+ -+cleanup: -+ free_message(&msg, process); -+ if (ret) -+ DXG_TRACE("err: %d", ret); -+ return ret; -+} -+ - int dxgvmb_send_query_adapter_info(struct dxgprocess *process, - struct dxgadapter *adapter, - struct d3dkmt_queryadapterinfo *args) -diff --git a/drivers/hv/dxgkrnl/dxgvmbus.h b/drivers/hv/dxgkrnl/dxgvmbus.h -index 111111111111..222222222222 100644 ---- a/drivers/hv/dxgkrnl/dxgvmbus.h -+++ b/drivers/hv/dxgkrnl/dxgvmbus.h -@@ -165,6 +165,13 @@ struct dxgkvmb_command_host_to_vm { - enum dxgkvmb_commandtype_host_to_vm command_type; - }; - -+struct dxgkvmb_command_signalguestevent { -+ struct dxgkvmb_command_host_to_vm hdr; -+ u64 event; -+ u64 process_id; -+ bool dereference_event; -+}; -+ - /* Returns ntstatus */ - struct dxgkvmb_command_setiospaceregion { - struct dxgkvmb_command_vm_to_host hdr; -@@ -430,4 +437,45 @@ struct dxgkvmb_command_destroysyncobject { - struct d3dkmthandle sync_object; - }; - -+/* The command returns ntstatus */ -+struct dxgkvmb_command_signalsyncobject { -+ struct dxgkvmb_command_vgpu_to_host hdr; -+ u32 object_count; -+ struct d3dddicb_signalflags flags; -+ u32 context_count; -+ u64 fence_value; -+ union { -+ /* Pointer to the guest event object */ -+ u64 cpu_event_handle; -+ /* Non zero when signal from CPU is done */ -+ struct d3dkmthandle device; -+ }; -+ /* struct d3dkmthandle ObjectHandleArray[object_count] */ -+ /* struct d3dkmthandle ContextArray[context_count] */ -+ /* u64 MonitoredFenceValueArray[object_count] */ -+}; -+ -+/* The command returns ntstatus */ -+struct dxgkvmb_command_waitforsyncobjectfromcpu { -+ struct dxgkvmb_command_vgpu_to_host hdr; -+ struct d3dkmthandle device; -+ u32 object_count; -+ struct d3dddi_waitforsynchronizationobjectfromcpu_flags flags; -+ u64 guest_event_pointer; -+ bool dereference_event; -+ /* struct d3dkmthandle ObjectHandleArray[object_count] */ -+ /* u64 FenceValueArray [object_count] */ -+}; -+ -+/* The command returns ntstatus */ -+struct dxgkvmb_command_waitforsyncobjectfromgpu { -+ struct dxgkvmb_command_vgpu_to_host hdr; -+ struct d3dkmthandle context; -+ /* Must be 1 when bLegacyFenceObject is TRUE */ -+ u32 object_count; -+ bool legacy_fence_object; -+ u64 fence_values[1]; -+ /* struct d3dkmthandle ObjectHandles[object_count] */ -+}; -+ - #endif /* _DXGVMBUS_H */ -diff --git a/drivers/hv/dxgkrnl/ioctl.c b/drivers/hv/dxgkrnl/ioctl.c -index 111111111111..222222222222 100644 ---- a/drivers/hv/dxgkrnl/ioctl.c -+++ b/drivers/hv/dxgkrnl/ioctl.c -@@ -759,7 +759,7 @@ get_standard_alloc_priv_data(struct dxgdevice *device, - res_priv_data = vzalloc(res_priv_data_size); - if (res_priv_data == NULL) { - ret = -ENOMEM; -- dev_err(DXGDEV, -+ DXG_ERR( - "failed to alloc memory for res priv data: %d", - res_priv_data_size); - goto cleanup; -@@ -1065,7 +1065,7 @@ dxgkio_create_allocation(struct dxgprocess *process, void *__user inargs) - alloc_info[i].priv_drv_data, - priv_data_size); - if (ret) { -- dev_err(DXGDEV, -+ DXG_ERR( - "failed to copy priv data"); - ret = -EFAULT; - goto cleanup; -@@ -1348,8 +1348,10 @@ dxgkio_create_sync_object(struct dxgprocess *process, void *__user inargs) - struct d3dkmt_createsynchronizationobject2 args; - struct dxgdevice *device = NULL; - struct dxgadapter *adapter = NULL; -+ struct eventfd_ctx *event = NULL; - struct dxgsyncobject *syncobj = NULL; - bool device_lock_acquired = false; -+ struct dxghosteventcpu *host_event = NULL; - - ret = copy_from_user(&args, inargs, sizeof(args)); - if (ret) { -@@ -1384,6 +1386,27 @@ dxgkio_create_sync_object(struct dxgprocess *process, void *__user inargs) - goto cleanup; - } - -+ if (args.info.type == _D3DDDI_CPU_NOTIFICATION) { -+ event = eventfd_ctx_fdget((int) -+ args.info.cpu_notification.event); -+ if (IS_ERR(event)) { -+ DXG_ERR("failed to reference the event"); -+ event = NULL; -+ ret = -EINVAL; -+ goto cleanup; -+ } -+ host_event = syncobj->host_event; -+ host_event->hdr.event_id = dxgglobal_new_host_event_id(); -+ host_event->cpu_event = event; -+ host_event->remove_from_list = false; -+ host_event->destroy_after_signal = false; -+ host_event->hdr.event_type = dxghostevent_cpu_event; -+ dxgglobal_add_host_event(&host_event->hdr); -+ args.info.cpu_notification.event = host_event->hdr.event_id; -+ DXG_TRACE("creating CPU notification event: %lld", -+ args.info.cpu_notification.event); -+ } -+ - ret = dxgvmb_send_create_sync_object(process, adapter, &args, syncobj); - if (ret < 0) - goto cleanup; -@@ -1411,7 +1434,10 @@ dxgkio_create_sync_object(struct dxgprocess *process, void *__user inargs) - if (args.sync_object.v) - dxgvmb_send_destroy_sync_object(process, - args.sync_object); -+ event = NULL; - } -+ if (event) -+ eventfd_ctx_put(event); - } - if (adapter) - dxgadapter_release_lock_shared(adapter); -@@ -1467,6 +1493,659 @@ dxgkio_destroy_sync_object(struct dxgprocess *process, void *__user inargs) - return ret; - } - -+static int -+dxgkio_signal_sync_object(struct dxgprocess *process, void *__user inargs) -+{ -+ struct d3dkmt_signalsynchronizationobject2 args; -+ struct d3dkmt_signalsynchronizationobject2 *__user in_args = inargs; -+ struct dxgdevice *device = NULL; -+ struct dxgadapter *adapter = NULL; -+ int ret; -+ u32 fence_count = 1; -+ struct eventfd_ctx *event = NULL; -+ struct dxghosteventcpu *host_event = NULL; -+ bool host_event_added = false; -+ u64 host_event_id = 0; -+ -+ ret = copy_from_user(&args, inargs, sizeof(args)); -+ if (ret) { -+ DXG_ERR("failed to copy input args"); -+ ret = -EINVAL; -+ goto cleanup; -+ } -+ -+ if (args.context_count >= D3DDDI_MAX_BROADCAST_CONTEXT || -+ args.object_count > D3DDDI_MAX_OBJECT_SIGNALED) { -+ ret = -EINVAL; -+ goto cleanup; -+ } -+ -+ if (args.flags.enqueue_cpu_event) { -+ host_event = kzalloc(sizeof(*host_event), GFP_KERNEL); -+ if (host_event == NULL) { -+ ret = -ENOMEM; -+ goto cleanup; -+ } -+ host_event->process = process; -+ event = eventfd_ctx_fdget((int)args.cpu_event_handle); -+ if (IS_ERR(event)) { -+ DXG_ERR("failed to reference the event"); -+ event = NULL; -+ ret = -EINVAL; -+ goto cleanup; -+ } -+ fence_count = 0; -+ host_event->cpu_event = event; -+ host_event_id = dxgglobal_new_host_event_id(); -+ host_event->hdr.event_type = dxghostevent_cpu_event; -+ host_event->hdr.event_id = host_event_id; -+ host_event->remove_from_list = true; -+ host_event->destroy_after_signal = true; -+ dxgglobal_add_host_event(&host_event->hdr); -+ host_event_added = true; -+ } -+ -+ device = dxgprocess_device_by_object_handle(process, -+ HMGRENTRY_TYPE_DXGCONTEXT, -+ args.context); -+ if (device == NULL) { -+ ret = -EINVAL; -+ goto cleanup; -+ } -+ -+ adapter = device->adapter; -+ ret = dxgadapter_acquire_lock_shared(adapter); -+ if (ret < 0) { -+ adapter = NULL; -+ goto cleanup; -+ } -+ -+ ret = dxgvmb_send_signal_sync_object(process, adapter, -+ args.flags, args.fence.fence_value, -+ args.context, args.object_count, -+ in_args->object_array, -+ args.context_count, -+ in_args->contexts, fence_count, -+ NULL, (void *)host_event_id, -+ zerohandle); -+ -+ /* -+ * When the send operation succeeds, the host event will be destroyed -+ * after signal from the host -+ */ -+ -+cleanup: -+ -+ if (ret < 0) { -+ if (host_event_added) { -+ /* The event might be signaled and destroyed by host */ -+ host_event = (struct dxghosteventcpu *) -+ dxgglobal_get_host_event(host_event_id); -+ if (host_event) { -+ eventfd_ctx_put(event); -+ event = NULL; -+ kfree(host_event); -+ host_event = NULL; -+ } -+ } -+ if (event) -+ eventfd_ctx_put(event); -+ if (host_event) -+ kfree(host_event); -+ } -+ if (adapter) -+ dxgadapter_release_lock_shared(adapter); -+ if (device) -+ kref_put(&device->device_kref, dxgdevice_release); -+ -+ DXG_TRACE("ioctl:%s %d", errorstr(ret), ret); -+ return ret; -+} -+ -+static int -+dxgkio_signal_sync_object_cpu(struct dxgprocess *process, void *__user inargs) -+{ -+ struct d3dkmt_signalsynchronizationobjectfromcpu args; -+ struct dxgdevice *device = NULL; -+ struct dxgadapter *adapter = NULL; -+ int ret; -+ -+ ret = copy_from_user(&args, inargs, sizeof(args)); -+ if (ret) { -+ DXG_ERR("failed to copy input args"); -+ ret = -EINVAL; -+ goto cleanup; -+ } -+ if (args.object_count == 0 || -+ args.object_count > D3DDDI_MAX_OBJECT_SIGNALED) { -+ DXG_TRACE("Too many syncobjects : %d", args.object_count); -+ ret = -EINVAL; -+ goto cleanup; -+ } -+ -+ device = dxgprocess_device_by_handle(process, args.device); -+ if (device == NULL) { -+ ret = -EINVAL; -+ goto cleanup; -+ } -+ -+ adapter = device->adapter; -+ ret = dxgadapter_acquire_lock_shared(adapter); -+ if (ret < 0) { -+ adapter = NULL; -+ goto cleanup; -+ } -+ -+ ret = dxgvmb_send_signal_sync_object(process, adapter, -+ args.flags, 0, zerohandle, -+ args.object_count, args.objects, 0, -+ NULL, args.object_count, -+ args.fence_values, NULL, -+ args.device); -+ -+cleanup: -+ -+ if (adapter) -+ dxgadapter_release_lock_shared(adapter); -+ if (device) -+ kref_put(&device->device_kref, dxgdevice_release); -+ -+ DXG_TRACE("ioctl:%s %d", errorstr(ret), ret); -+ return ret; -+} -+ -+static int -+dxgkio_signal_sync_object_gpu(struct dxgprocess *process, void *__user inargs) -+{ -+ struct d3dkmt_signalsynchronizationobjectfromgpu args; -+ struct d3dkmt_signalsynchronizationobjectfromgpu *__user user_args = -+ inargs; -+ struct dxgdevice *device = NULL; -+ struct dxgadapter *adapter = NULL; -+ struct d3dddicb_signalflags flags = { }; -+ int ret; -+ -+ ret = copy_from_user(&args, inargs, sizeof(args)); -+ if (ret) { -+ DXG_ERR("failed to copy input args"); -+ ret = -EINVAL; -+ goto cleanup; -+ } -+ -+ if (args.object_count == 0 || -+ args.object_count > DXG_MAX_VM_BUS_PACKET_SIZE) { -+ ret = -EINVAL; -+ goto cleanup; -+ } -+ -+ device = dxgprocess_device_by_object_handle(process, -+ HMGRENTRY_TYPE_DXGCONTEXT, -+ args.context); -+ if (device == NULL) { -+ ret = -EINVAL; -+ goto cleanup; -+ } -+ -+ adapter = device->adapter; -+ ret = dxgadapter_acquire_lock_shared(adapter); -+ if (ret < 0) { -+ adapter = NULL; -+ goto cleanup; -+ } -+ -+ ret = dxgvmb_send_signal_sync_object(process, adapter, -+ flags, 0, zerohandle, -+ args.object_count, -+ args.objects, 1, -+ &user_args->context, -+ args.object_count, -+ args.monitored_fence_values, NULL, -+ zerohandle); -+ -+cleanup: -+ -+ if (adapter) -+ dxgadapter_release_lock_shared(adapter); -+ if (device) -+ kref_put(&device->device_kref, dxgdevice_release); -+ -+ DXG_TRACE("ioctl:%s %d", errorstr(ret), ret); -+ return ret; -+} -+ -+static int -+dxgkio_signal_sync_object_gpu2(struct dxgprocess *process, void *__user inargs) -+{ -+ struct d3dkmt_signalsynchronizationobjectfromgpu2 args; -+ struct dxgdevice *device = NULL; -+ struct dxgadapter *adapter = NULL; -+ struct d3dkmthandle context_handle; -+ struct eventfd_ctx *event = NULL; -+ u64 *fences = NULL; -+ u32 fence_count = 0; -+ int ret; -+ struct dxghosteventcpu *host_event = NULL; -+ bool host_event_added = false; -+ u64 host_event_id = 0; -+ -+ ret = copy_from_user(&args, inargs, sizeof(args)); -+ if (ret) { -+ DXG_ERR("failed to copy input args"); -+ ret = -EINVAL; -+ goto cleanup; -+ } -+ -+ if (args.flags.enqueue_cpu_event) { -+ if (args.object_count != 0 || args.cpu_event_handle == 0) { -+ DXG_ERR("Bad input in EnqueueCpuEvent: %d %lld", -+ args.object_count, args.cpu_event_handle); -+ ret = -EINVAL; -+ goto cleanup; -+ } -+ } else if (args.object_count == 0 || -+ args.object_count > DXG_MAX_VM_BUS_PACKET_SIZE || -+ args.context_count == 0 || -+ args.context_count > DXG_MAX_VM_BUS_PACKET_SIZE) { -+ DXG_ERR("Invalid input: %d %d", -+ args.object_count, args.context_count); -+ ret = -EINVAL; -+ goto cleanup; -+ } -+ -+ ret = copy_from_user(&context_handle, args.contexts, -+ sizeof(struct d3dkmthandle)); -+ if (ret) { -+ DXG_ERR("failed to copy context handle"); -+ ret = -EINVAL; -+ goto cleanup; -+ } -+ -+ if (args.flags.enqueue_cpu_event) { -+ host_event = kzalloc(sizeof(*host_event), GFP_KERNEL); -+ if (host_event == NULL) { -+ ret = -ENOMEM; -+ goto cleanup; -+ } -+ host_event->process = process; -+ event = eventfd_ctx_fdget((int)args.cpu_event_handle); -+ if (IS_ERR(event)) { -+ DXG_ERR("failed to reference the event"); -+ event = NULL; -+ ret = -EINVAL; -+ goto cleanup; -+ } -+ fence_count = 0; -+ host_event->cpu_event = event; -+ host_event_id = dxgglobal_new_host_event_id(); -+ host_event->hdr.event_id = host_event_id; -+ host_event->hdr.event_type = dxghostevent_cpu_event; -+ host_event->remove_from_list = true; -+ host_event->destroy_after_signal = true; -+ dxgglobal_add_host_event(&host_event->hdr); -+ host_event_added = true; -+ } else { -+ fences = args.monitored_fence_values; -+ fence_count = args.object_count; -+ } -+ -+ device = dxgprocess_device_by_object_handle(process, -+ HMGRENTRY_TYPE_DXGCONTEXT, -+ context_handle); -+ if (device == NULL) { -+ ret = -EINVAL; -+ goto cleanup; -+ } -+ -+ adapter = device->adapter; -+ ret = dxgadapter_acquire_lock_shared(adapter); -+ if (ret < 0) { -+ adapter = NULL; -+ goto cleanup; -+ } -+ -+ ret = dxgvmb_send_signal_sync_object(process, adapter, -+ args.flags, 0, zerohandle, -+ args.object_count, args.objects, -+ args.context_count, args.contexts, -+ fence_count, fences, -+ (void *)host_event_id, zerohandle); -+ -+cleanup: -+ -+ if (ret < 0) { -+ if (host_event_added) { -+ /* The event might be signaled and destroyed by host */ -+ host_event = (struct dxghosteventcpu *) -+ dxgglobal_get_host_event(host_event_id); -+ if (host_event) { -+ eventfd_ctx_put(event); -+ event = NULL; -+ kfree(host_event); -+ host_event = NULL; -+ } -+ } -+ if (event) -+ eventfd_ctx_put(event); -+ if (host_event) -+ kfree(host_event); -+ } -+ if (adapter) -+ dxgadapter_release_lock_shared(adapter); -+ if (device) -+ kref_put(&device->device_kref, dxgdevice_release); -+ -+ DXG_TRACE("ioctl:%s %d", errorstr(ret), ret); -+ return ret; -+} -+ -+static int -+dxgkio_wait_sync_object(struct dxgprocess *process, void *__user inargs) -+{ -+ struct d3dkmt_waitforsynchronizationobject2 args; -+ struct dxgdevice *device = NULL; -+ struct dxgadapter *adapter = NULL; -+ int ret; -+ -+ ret = copy_from_user(&args, inargs, sizeof(args)); -+ if (ret) { -+ DXG_ERR("failed to copy input args"); -+ ret = -EINVAL; -+ goto cleanup; -+ } -+ -+ if (args.object_count > D3DDDI_MAX_OBJECT_WAITED_ON || -+ args.object_count == 0) { -+ ret = -EINVAL; -+ goto cleanup; -+ } -+ -+ device = dxgprocess_device_by_object_handle(process, -+ HMGRENTRY_TYPE_DXGCONTEXT, -+ args.context); -+ if (device == NULL) { -+ ret = -EINVAL; -+ goto cleanup; -+ } -+ -+ adapter = device->adapter; -+ ret = dxgadapter_acquire_lock_shared(adapter); -+ if (ret < 0) { -+ adapter = NULL; -+ goto cleanup; -+ } -+ -+ DXG_TRACE("Fence value: %lld", args.fence.fence_value); -+ ret = dxgvmb_send_wait_sync_object_gpu(process, adapter, -+ args.context, args.object_count, -+ args.object_array, -+ &args.fence.fence_value, true); -+ -+cleanup: -+ -+ if (adapter) -+ dxgadapter_release_lock_shared(adapter); -+ if (device) -+ kref_put(&device->device_kref, dxgdevice_release); -+ -+ DXG_TRACE("ioctl:%s %d", errorstr(ret), ret); -+ return ret; -+} -+ -+static int -+dxgkio_wait_sync_object_cpu(struct dxgprocess *process, void *__user inargs) -+{ -+ struct d3dkmt_waitforsynchronizationobjectfromcpu args; -+ struct dxgdevice *device = NULL; -+ struct dxgadapter *adapter = NULL; -+ struct eventfd_ctx *event = NULL; -+ struct dxghosteventcpu host_event = { }; -+ struct dxghosteventcpu *async_host_event = NULL; -+ struct completion local_event = { }; -+ u64 event_id = 0; -+ int ret; -+ bool host_event_added = false; -+ -+ ret = copy_from_user(&args, inargs, sizeof(args)); -+ if (ret) { -+ DXG_ERR("failed to copy input args"); -+ ret = -EINVAL; -+ goto cleanup; -+ } -+ -+ if (args.object_count > DXG_MAX_VM_BUS_PACKET_SIZE || -+ args.object_count == 0) { -+ ret = -EINVAL; -+ goto cleanup; -+ } -+ -+ if (args.async_event) { -+ async_host_event = kzalloc(sizeof(*async_host_event), -+ GFP_KERNEL); -+ if (async_host_event == NULL) { -+ ret = -EINVAL; -+ goto cleanup; -+ } -+ async_host_event->process = process; -+ event = eventfd_ctx_fdget((int)args.async_event); -+ if (IS_ERR(event)) { -+ DXG_ERR("failed to reference the event"); -+ event = NULL; -+ ret = -EINVAL; -+ goto cleanup; -+ } -+ async_host_event->cpu_event = event; -+ async_host_event->hdr.event_id = dxgglobal_new_host_event_id(); -+ async_host_event->destroy_after_signal = true; -+ async_host_event->hdr.event_type = dxghostevent_cpu_event; -+ dxgglobal_add_host_event(&async_host_event->hdr); -+ event_id = async_host_event->hdr.event_id; -+ host_event_added = true; -+ } else { -+ init_completion(&local_event); -+ host_event.completion_event = &local_event; -+ host_event.hdr.event_id = dxgglobal_new_host_event_id(); -+ host_event.hdr.event_type = dxghostevent_cpu_event; -+ dxgglobal_add_host_event(&host_event.hdr); -+ event_id = host_event.hdr.event_id; -+ } -+ -+ device = dxgprocess_device_by_handle(process, args.device); -+ if (device == NULL) { -+ ret = -EINVAL; -+ goto cleanup; -+ } -+ -+ adapter = device->adapter; -+ ret = dxgadapter_acquire_lock_shared(adapter); -+ if (ret < 0) { -+ adapter = NULL; -+ goto cleanup; -+ } -+ -+ ret = dxgvmb_send_wait_sync_object_cpu(process, adapter, -+ &args, event_id); -+ if (ret < 0) -+ goto cleanup; -+ -+ if (args.async_event == 0) { -+ dxgadapter_release_lock_shared(adapter); -+ adapter = NULL; -+ ret = wait_for_completion_interruptible(&local_event); -+ if (ret) { -+ DXG_ERR("wait_completion_interruptible: %d", -+ ret); -+ ret = -ERESTARTSYS; -+ } -+ } -+ -+cleanup: -+ -+ if (adapter) -+ dxgadapter_release_lock_shared(adapter); -+ if (device) -+ kref_put(&device->device_kref, dxgdevice_release); -+ if (host_event.hdr.event_id) -+ dxgglobal_remove_host_event(&host_event.hdr); -+ if (ret < 0) { -+ if (host_event_added) { -+ async_host_event = (struct dxghosteventcpu *) -+ dxgglobal_get_host_event(event_id); -+ if (async_host_event) { -+ if (async_host_event->hdr.event_type == -+ dxghostevent_cpu_event) { -+ eventfd_ctx_put(event); -+ event = NULL; -+ kfree(async_host_event); -+ async_host_event = NULL; -+ } else { -+ DXG_ERR("Invalid event type"); -+ DXGKRNL_ASSERT(0); -+ } -+ } -+ } -+ if (event) -+ eventfd_ctx_put(event); -+ if (async_host_event) -+ kfree(async_host_event); -+ } -+ -+ DXG_TRACE("ioctl:%s %d", errorstr(ret), ret); -+ return ret; -+} -+ -+static int -+dxgkio_wait_sync_object_gpu(struct dxgprocess *process, void *__user inargs) -+{ -+ struct d3dkmt_waitforsynchronizationobjectfromgpu args; -+ struct dxgcontext *context = NULL; -+ struct d3dkmthandle device_handle = {}; -+ struct dxgdevice *device = NULL; -+ struct dxgadapter *adapter = NULL; -+ struct dxgsyncobject *syncobj = NULL; -+ struct d3dkmthandle *objects = NULL; -+ u32 object_size; -+ u64 *fences = NULL; -+ int ret; -+ enum hmgrentry_type syncobj_type = HMGRENTRY_TYPE_FREE; -+ bool monitored_fence = false; -+ -+ ret = copy_from_user(&args, inargs, sizeof(args)); -+ if (ret) { -+ DXG_ERR("failed to copy input args"); -+ ret = -EINVAL; -+ goto cleanup; -+ } -+ -+ if (args.object_count > DXG_MAX_VM_BUS_PACKET_SIZE || -+ args.object_count == 0) { -+ DXG_ERR("Invalid object count: %d", args.object_count); -+ ret = -EINVAL; -+ goto cleanup; -+ } -+ -+ object_size = sizeof(struct d3dkmthandle) * args.object_count; -+ objects = vzalloc(object_size); -+ if (objects == NULL) { -+ ret = -ENOMEM; -+ goto cleanup; -+ } -+ ret = copy_from_user(objects, args.objects, object_size); -+ if (ret) { -+ DXG_ERR("failed to copy objects"); -+ ret = -EINVAL; -+ goto cleanup; -+ } -+ -+ hmgrtable_lock(&process->handle_table, DXGLOCK_SHARED); -+ context = hmgrtable_get_object_by_type(&process->handle_table, -+ HMGRENTRY_TYPE_DXGCONTEXT, -+ args.context); -+ if (context) { -+ device_handle = context->device_handle; -+ syncobj_type = -+ hmgrtable_get_object_type(&process->handle_table, -+ objects[0]); -+ } -+ if (device_handle.v == 0) { -+ DXG_ERR("Invalid context handle: %x", args.context.v); -+ ret = -EINVAL; -+ } else { -+ if (syncobj_type == HMGRENTRY_TYPE_MONITOREDFENCE) { -+ monitored_fence = true; -+ } else if (syncobj_type == HMGRENTRY_TYPE_DXGSYNCOBJECT) { -+ syncobj = -+ hmgrtable_get_object_by_type(&process->handle_table, -+ HMGRENTRY_TYPE_DXGSYNCOBJECT, -+ objects[0]); -+ if (syncobj == NULL) { -+ DXG_ERR("Invalid syncobj: %x", -+ objects[0].v); -+ ret = -EINVAL; -+ } else { -+ monitored_fence = syncobj->monitored_fence; -+ } -+ } else { -+ DXG_ERR("Invalid syncobj type: %x", -+ objects[0].v); -+ ret = -EINVAL; -+ } -+ } -+ hmgrtable_unlock(&process->handle_table, DXGLOCK_SHARED); -+ -+ if (ret < 0) -+ goto cleanup; -+ -+ if (monitored_fence) { -+ object_size = sizeof(u64) * args.object_count; -+ fences = vzalloc(object_size); -+ if (fences == NULL) { -+ ret = -ENOMEM; -+ goto cleanup; -+ } -+ ret = copy_from_user(fences, args.monitored_fence_values, -+ object_size); -+ if (ret) { -+ DXG_ERR("failed to copy fences"); -+ ret = -EINVAL; -+ goto cleanup; -+ } -+ } else { -+ fences = &args.fence_value; -+ } -+ -+ device = dxgprocess_device_by_handle(process, device_handle); -+ if (device == NULL) { -+ ret = -EINVAL; -+ goto cleanup; -+ } -+ -+ adapter = device->adapter; -+ ret = dxgadapter_acquire_lock_shared(adapter); -+ if (ret < 0) { -+ adapter = NULL; -+ goto cleanup; -+ } -+ -+ ret = dxgvmb_send_wait_sync_object_gpu(process, adapter, -+ args.context, args.object_count, -+ objects, fences, -+ !monitored_fence); -+ -+cleanup: -+ -+ if (adapter) -+ dxgadapter_release_lock_shared(adapter); -+ if (device) -+ kref_put(&device->device_kref, dxgdevice_release); -+ if (objects) -+ vfree(objects); -+ if (fences && fences != &args.fence_value) -+ vfree(fences); -+ -+ DXG_TRACE("ioctl:%s %d", errorstr(ret), ret); -+ return ret; -+} -+ - static struct ioctl_desc ioctls[] = { - /* 0x00 */ {}, - /* 0x01 */ {dxgkio_open_adapter_from_luid, LX_DXOPENADAPTERFROMLUID}, -@@ -1485,8 +2164,8 @@ static struct ioctl_desc ioctls[] = { - /* 0x0e */ {}, - /* 0x0f */ {}, - /* 0x10 */ {dxgkio_create_sync_object, LX_DXCREATESYNCHRONIZATIONOBJECT}, --/* 0x11 */ {}, --/* 0x12 */ {}, -+/* 0x11 */ {dxgkio_signal_sync_object, LX_DXSIGNALSYNCHRONIZATIONOBJECT}, -+/* 0x12 */ {dxgkio_wait_sync_object, LX_DXWAITFORSYNCHRONIZATIONOBJECT}, - /* 0x13 */ {dxgkio_destroy_allocation, LX_DXDESTROYALLOCATION2}, - /* 0x14 */ {dxgkio_enum_adapters, LX_DXENUMADAPTERS2}, - /* 0x15 */ {dxgkio_close_adapter, LX_DXCLOSEADAPTER}, -@@ -1517,17 +2196,22 @@ static struct ioctl_desc ioctls[] = { - /* 0x2e */ {}, - /* 0x2f */ {}, - /* 0x30 */ {}, --/* 0x31 */ {}, --/* 0x32 */ {}, --/* 0x33 */ {}, -+/* 0x31 */ {dxgkio_signal_sync_object_cpu, -+ LX_DXSIGNALSYNCHRONIZATIONOBJECTFROMCPU}, -+/* 0x32 */ {dxgkio_signal_sync_object_gpu, -+ LX_DXSIGNALSYNCHRONIZATIONOBJECTFROMGPU}, -+/* 0x33 */ {dxgkio_signal_sync_object_gpu2, -+ LX_DXSIGNALSYNCHRONIZATIONOBJECTFROMGPU2}, - /* 0x34 */ {}, - /* 0x35 */ {}, - /* 0x36 */ {}, - /* 0x37 */ {}, - /* 0x38 */ {}, - /* 0x39 */ {}, --/* 0x3a */ {}, --/* 0x3b */ {}, -+/* 0x3a */ {dxgkio_wait_sync_object_cpu, -+ LX_DXWAITFORSYNCHRONIZATIONOBJECTFROMCPU}, -+/* 0x3b */ {dxgkio_wait_sync_object_gpu, -+ LX_DXWAITFORSYNCHRONIZATIONOBJECTFROMGPU}, - /* 0x3c */ {}, - /* 0x3d */ {}, - /* 0x3e */ {dxgkio_enum_adapters3, LX_DXENUMADAPTERS3}, -diff --git a/drivers/hv/dxgkrnl/misc.h b/drivers/hv/dxgkrnl/misc.h -index 111111111111..222222222222 100644 ---- a/drivers/hv/dxgkrnl/misc.h -+++ b/drivers/hv/dxgkrnl/misc.h -@@ -25,6 +25,8 @@ extern const struct d3dkmthandle zerohandle; - * The locks here are in the order from lowest to highest. - * When a lower lock is held, the higher lock should not be acquired. - * -+ * device_list_mutex -+ * host_event_list_mutex - * channel_lock (VMBus channel lock) - * fd_mutex - * plistmutex (process list mutex) -diff --git a/include/uapi/misc/d3dkmthk.h b/include/uapi/misc/d3dkmthk.h -index 111111111111..222222222222 100644 ---- a/include/uapi/misc/d3dkmthk.h -+++ b/include/uapi/misc/d3dkmthk.h -@@ -60,6 +60,9 @@ struct winluid { - - #define D3DKMT_CREATEALLOCATION_MAX 1024 - #define D3DKMT_ADAPTERS_MAX 64 -+#define D3DDDI_MAX_BROADCAST_CONTEXT 64 -+#define D3DDDI_MAX_OBJECT_WAITED_ON 32 -+#define D3DDDI_MAX_OBJECT_SIGNALED 32 - - struct d3dkmt_adapterinfo { - struct d3dkmthandle adapter_handle; -@@ -343,6 +346,148 @@ struct d3dkmt_createsynchronizationobject2 { - __u32 reserved1; - }; - -+struct d3dkmt_waitforsynchronizationobject2 { -+ struct d3dkmthandle context; -+ __u32 object_count; -+ struct d3dkmthandle object_array[D3DDDI_MAX_OBJECT_WAITED_ON]; -+ union { -+ struct { -+ __u64 fence_value; -+ } fence; -+ __u64 reserved[8]; -+ }; -+}; -+ -+struct d3dddicb_signalflags { -+ union { -+ struct { -+ __u32 signal_at_submission:1; -+ __u32 enqueue_cpu_event:1; -+ __u32 allow_fence_rewind:1; -+ __u32 reserved:28; -+ __u32 DXGK_SIGNAL_FLAG_INTERNAL0:1; -+ }; -+ __u32 value; -+ }; -+}; -+ -+struct d3dkmt_signalsynchronizationobject2 { -+ struct d3dkmthandle context; -+ __u32 object_count; -+ struct d3dkmthandle object_array[D3DDDI_MAX_OBJECT_SIGNALED]; -+ struct d3dddicb_signalflags flags; -+ __u32 context_count; -+ struct d3dkmthandle contexts[D3DDDI_MAX_BROADCAST_CONTEXT]; -+ union { -+ struct { -+ __u64 fence_value; -+ } fence; -+ __u64 cpu_event_handle; -+ __u64 reserved[8]; -+ }; -+}; -+ -+struct d3dddi_waitforsynchronizationobjectfromcpu_flags { -+ union { -+ struct { -+ __u32 wait_any:1; -+ __u32 reserved:31; -+ }; -+ __u32 value; -+ }; -+}; -+ -+struct d3dkmt_waitforsynchronizationobjectfromcpu { -+ struct d3dkmthandle device; -+ __u32 object_count; -+#ifdef __KERNEL__ -+ struct d3dkmthandle *objects; -+ __u64 *fence_values; -+#else -+ __u64 objects; -+ __u64 fence_values; -+#endif -+ __u64 async_event; -+ struct d3dddi_waitforsynchronizationobjectfromcpu_flags flags; -+}; -+ -+struct d3dkmt_signalsynchronizationobjectfromcpu { -+ struct d3dkmthandle device; -+ __u32 object_count; -+#ifdef __KERNEL__ -+ struct d3dkmthandle *objects; -+ __u64 *fence_values; -+#else -+ __u64 objects; -+ __u64 fence_values; -+#endif -+ struct d3dddicb_signalflags flags; -+}; -+ -+struct d3dkmt_waitforsynchronizationobjectfromgpu { -+ struct d3dkmthandle context; -+ __u32 object_count; -+#ifdef __KERNEL__ -+ struct d3dkmthandle *objects; -+#else -+ __u64 objects; -+#endif -+ union { -+#ifdef __KERNEL__ -+ __u64 *monitored_fence_values; -+#else -+ __u64 monitored_fence_values; -+#endif -+ __u64 fence_value; -+ __u64 reserved[8]; -+ }; -+}; -+ -+struct d3dkmt_signalsynchronizationobjectfromgpu { -+ struct d3dkmthandle context; -+ __u32 object_count; -+#ifdef __KERNEL__ -+ struct d3dkmthandle *objects; -+#else -+ __u64 objects; -+#endif -+ union { -+#ifdef __KERNEL__ -+ __u64 *monitored_fence_values; -+#else -+ __u64 monitored_fence_values; -+#endif -+ __u64 reserved[8]; -+ }; -+}; -+ -+struct d3dkmt_signalsynchronizationobjectfromgpu2 { -+ __u32 object_count; -+ __u32 reserved1; -+#ifdef __KERNEL__ -+ struct d3dkmthandle *objects; -+#else -+ __u64 objects; -+#endif -+ struct d3dddicb_signalflags flags; -+ __u32 context_count; -+#ifdef __KERNEL__ -+ struct d3dkmthandle *contexts; -+#else -+ __u64 contexts; -+#endif -+ union { -+ __u64 fence_value; -+ __u64 cpu_event_handle; -+#ifdef __KERNEL__ -+ __u64 *monitored_fence_values; -+#else -+ __u64 monitored_fence_values; -+#endif -+ __u64 reserved[8]; -+ }; -+}; -+ - struct d3dkmt_destroysynchronizationobject { - struct d3dkmthandle sync_object; - }; -@@ -576,6 +721,10 @@ struct d3dkmt_enumadapters3 { - _IOWR(0x47, 0x09, struct d3dkmt_queryadapterinfo) - #define LX_DXCREATESYNCHRONIZATIONOBJECT \ - _IOWR(0x47, 0x10, struct d3dkmt_createsynchronizationobject2) -+#define LX_DXSIGNALSYNCHRONIZATIONOBJECT \ -+ _IOWR(0x47, 0x11, struct d3dkmt_signalsynchronizationobject2) -+#define LX_DXWAITFORSYNCHRONIZATIONOBJECT \ -+ _IOWR(0x47, 0x12, struct d3dkmt_waitforsynchronizationobject2) - #define LX_DXDESTROYALLOCATION2 \ - _IOWR(0x47, 0x13, struct d3dkmt_destroyallocation2) - #define LX_DXENUMADAPTERS2 \ -@@ -586,6 +735,16 @@ struct d3dkmt_enumadapters3 { - _IOWR(0x47, 0x19, struct d3dkmt_destroydevice) - #define LX_DXDESTROYSYNCHRONIZATIONOBJECT \ - _IOWR(0x47, 0x1d, struct d3dkmt_destroysynchronizationobject) -+#define LX_DXSIGNALSYNCHRONIZATIONOBJECTFROMCPU \ -+ _IOWR(0x47, 0x31, struct d3dkmt_signalsynchronizationobjectfromcpu) -+#define LX_DXSIGNALSYNCHRONIZATIONOBJECTFROMGPU \ -+ _IOWR(0x47, 0x32, struct d3dkmt_signalsynchronizationobjectfromgpu) -+#define LX_DXSIGNALSYNCHRONIZATIONOBJECTFROMGPU2 \ -+ _IOWR(0x47, 0x33, struct d3dkmt_signalsynchronizationobjectfromgpu2) -+#define LX_DXWAITFORSYNCHRONIZATIONOBJECTFROMCPU \ -+ _IOWR(0x47, 0x3a, struct d3dkmt_waitforsynchronizationobjectfromcpu) -+#define LX_DXWAITFORSYNCHRONIZATIONOBJECTFROMGPU \ -+ _IOWR(0x47, 0x3b, struct d3dkmt_waitforsynchronizationobjectfromgpu) - #define LX_DXENUMADAPTERS3 \ - _IOWR(0x47, 0x3e, struct d3dkmt_enumadapters3) - --- -Armbian - diff --git a/patch/kernel/archive/wsl2-arm64-6.6/1678-drivers-hv-dxgkrnl-Sharing-of-dxgresource-objects.patch b/patch/kernel/archive/wsl2-arm64-6.6/1678-drivers-hv-dxgkrnl-Sharing-of-dxgresource-objects.patch deleted file mode 100644 index e777f4880d2e..000000000000 --- a/patch/kernel/archive/wsl2-arm64-6.6/1678-drivers-hv-dxgkrnl-Sharing-of-dxgresource-objects.patch +++ /dev/null @@ -1,1464 +0,0 @@ -From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001 -From: Iouri Tarassov -Date: Mon, 31 Jan 2022 17:52:31 -0800 -Subject: drivers: hv: dxgkrnl: Sharing of dxgresource objects - -Implement creation of shared resources and ioctls for sharing -dxgresource objects between processes in the virtual machine. - -A dxgresource object is a collection of dxgallocation objects. -The driver API allows addition/removal of allocations to a resource, -but has limitations on addition/removal of allocations to a shared -resource. When a resource is "sealed", addition/removal of allocations -is not allowed. - -Resources are shared using file descriptor (FD) handles. The name -"NT handle" is used to be compatible with Windows implementation. - -An FD handle is created by the LX_DXSHAREOBJECTS ioctl. The given FD -handle could be sent to another process using any Linux API. - -To use a shared resource object in other ioctls the object needs to be -opened using its FD handle. An resource object is opened by the -LX_DXOPENRESOURCEFROMNTHANDLE ioctl. This ioctl returns a d3dkmthandle -value, which can be used to reference the resource object. - -The LX_DXQUERYRESOURCEINFOFROMNTHANDLE ioctl is used to query private -driver data of a shared resource object. This private data needs to be -used to actually open the object using the LX_DXOPENRESOURCEFROMNTHANDLE -ioctl. - -Signed-off-by: Iouri Tarassov -[kms: Forward port to v6.1] -Signed-off-by: Kelsey Steele ---- - drivers/hv/dxgkrnl/dxgadapter.c | 81 + - drivers/hv/dxgkrnl/dxgkrnl.h | 77 + - drivers/hv/dxgkrnl/dxgmodule.c | 1 + - drivers/hv/dxgkrnl/dxgvmbus.c | 127 ++ - drivers/hv/dxgkrnl/dxgvmbus.h | 30 + - drivers/hv/dxgkrnl/ioctl.c | 792 +++++++++- - include/uapi/misc/d3dkmthk.h | 96 ++ - 7 files changed, 1200 insertions(+), 4 deletions(-) - -diff --git a/drivers/hv/dxgkrnl/dxgadapter.c b/drivers/hv/dxgkrnl/dxgadapter.c -index 111111111111..222222222222 100644 ---- a/drivers/hv/dxgkrnl/dxgadapter.c -+++ b/drivers/hv/dxgkrnl/dxgadapter.c -@@ -160,6 +160,17 @@ void dxgadapter_remove_process(struct dxgprocess_adapter *process_info) - list_del(&process_info->adapter_process_list_entry); - } - -+void dxgadapter_remove_shared_resource(struct dxgadapter *adapter, -+ struct dxgsharedresource *object) -+{ -+ down_write(&adapter->shared_resource_list_lock); -+ if (object->shared_resource_list_entry.next) { -+ list_del(&object->shared_resource_list_entry); -+ object->shared_resource_list_entry.next = NULL; -+ } -+ up_write(&adapter->shared_resource_list_lock); -+} -+ - void dxgadapter_add_syncobj(struct dxgadapter *adapter, - struct dxgsyncobject *object) - { -@@ -489,6 +500,69 @@ void dxgdevice_remove_resource(struct dxgdevice *device, - } - } - -+struct dxgsharedresource *dxgsharedresource_create(struct dxgadapter *adapter) -+{ -+ struct dxgsharedresource *resource; -+ -+ resource = kzalloc(sizeof(*resource), GFP_KERNEL); -+ if (resource) { -+ INIT_LIST_HEAD(&resource->resource_list_head); -+ kref_init(&resource->sresource_kref); -+ mutex_init(&resource->fd_mutex); -+ resource->adapter = adapter; -+ } -+ return resource; -+} -+ -+void dxgsharedresource_destroy(struct kref *refcount) -+{ -+ struct dxgsharedresource *resource; -+ -+ resource = container_of(refcount, struct dxgsharedresource, -+ sresource_kref); -+ if (resource->runtime_private_data) -+ vfree(resource->runtime_private_data); -+ if (resource->resource_private_data) -+ vfree(resource->resource_private_data); -+ if (resource->alloc_private_data_sizes) -+ vfree(resource->alloc_private_data_sizes); -+ if (resource->alloc_private_data) -+ vfree(resource->alloc_private_data); -+ kfree(resource); -+} -+ -+void dxgsharedresource_add_resource(struct dxgsharedresource *shared_resource, -+ struct dxgresource *resource) -+{ -+ down_write(&shared_resource->adapter->shared_resource_list_lock); -+ DXG_TRACE("Adding resource: %p %p", shared_resource, resource); -+ list_add_tail(&resource->shared_resource_list_entry, -+ &shared_resource->resource_list_head); -+ kref_get(&shared_resource->sresource_kref); -+ kref_get(&resource->resource_kref); -+ resource->shared_owner = shared_resource; -+ up_write(&shared_resource->adapter->shared_resource_list_lock); -+} -+ -+void dxgsharedresource_remove_resource(struct dxgsharedresource -+ *shared_resource, -+ struct dxgresource *resource) -+{ -+ struct dxgadapter *adapter = shared_resource->adapter; -+ -+ down_write(&adapter->shared_resource_list_lock); -+ DXG_TRACE("Removing resource: %p %p", shared_resource, resource); -+ if (resource->shared_resource_list_entry.next) { -+ list_del(&resource->shared_resource_list_entry); -+ resource->shared_resource_list_entry.next = NULL; -+ kref_put(&shared_resource->sresource_kref, -+ dxgsharedresource_destroy); -+ resource->shared_owner = NULL; -+ kref_put(&resource->resource_kref, dxgresource_release); -+ } -+ up_write(&adapter->shared_resource_list_lock); -+} -+ - struct dxgresource *dxgresource_create(struct dxgdevice *device) - { - struct dxgresource *resource; -@@ -532,6 +606,7 @@ void dxgresource_destroy(struct dxgresource *resource) - struct d3dkmt_destroyallocation2 args = { }; - int destroyed = test_and_set_bit(0, &resource->flags); - struct dxgdevice *device = resource->device; -+ struct dxgsharedresource *shared_resource; - - if (!destroyed) { - dxgresource_free_handle(resource); -@@ -547,6 +622,12 @@ void dxgresource_destroy(struct dxgresource *resource) - dxgallocation_destroy(alloc); - } - dxgdevice_remove_resource(device, resource); -+ shared_resource = resource->shared_owner; -+ if (shared_resource) { -+ dxgsharedresource_remove_resource(shared_resource, -+ resource); -+ resource->shared_owner = NULL; -+ } - } - kref_put(&resource->resource_kref, dxgresource_release); - } -diff --git a/drivers/hv/dxgkrnl/dxgkrnl.h b/drivers/hv/dxgkrnl/dxgkrnl.h -index 111111111111..222222222222 100644 ---- a/drivers/hv/dxgkrnl/dxgkrnl.h -+++ b/drivers/hv/dxgkrnl/dxgkrnl.h -@@ -38,6 +38,7 @@ struct dxgdevice; - struct dxgcontext; - struct dxgallocation; - struct dxgresource; -+struct dxgsharedresource; - struct dxgsyncobject; - - /* -@@ -372,6 +373,8 @@ struct dxgadapter { - struct list_head adapter_list_entry; - /* The list of dxgprocess_adapter entries */ - struct list_head adapter_process_list_head; -+ /* List of all dxgsharedresource objects */ -+ struct list_head shared_resource_list_head; - /* List of all non-device dxgsyncobject objects */ - struct list_head syncobj_list_head; - /* This lock protects shared resource and syncobject lists */ -@@ -405,6 +408,8 @@ void dxgadapter_remove_syncobj(struct dxgsyncobject *so); - void dxgadapter_add_process(struct dxgadapter *adapter, - struct dxgprocess_adapter *process_info); - void dxgadapter_remove_process(struct dxgprocess_adapter *process_info); -+void dxgadapter_remove_shared_resource(struct dxgadapter *adapter, -+ struct dxgsharedresource *object); - - /* - * The object represent the device object. -@@ -484,6 +489,64 @@ void dxgcontext_destroy_safe(struct dxgprocess *pr, struct dxgcontext *ctx); - void dxgcontext_release(struct kref *refcount); - bool dxgcontext_is_active(struct dxgcontext *ctx); - -+/* -+ * A shared resource object is created to track the list of dxgresource objects, -+ * which are opened for the same underlying shared resource. -+ * Objects are shared by using a file descriptor handle. -+ * FD is created by calling dxgk_share_objects and providing shandle to -+ * dxgsharedresource. The FD points to a dxgresource object, which is created -+ * by calling dxgk_open_resource_nt. dxgresource object is referenced by the -+ * FD. -+ * -+ * The object is referenced by every dxgresource in its list. -+ * -+ */ -+struct dxgsharedresource { -+ /* Every dxgresource object in the resource list takes a reference */ -+ struct kref sresource_kref; -+ struct dxgadapter *adapter; -+ /* List of dxgresource objects, opened for the shared resource. */ -+ /* Protected by dxgadapter::shared_resource_list_lock */ -+ struct list_head resource_list_head; -+ /* Entry in the list of dxgsharedresource in dxgadapter */ -+ /* Protected by dxgadapter::shared_resource_list_lock */ -+ struct list_head shared_resource_list_entry; -+ struct mutex fd_mutex; -+ /* Referenced by file descriptors */ -+ int host_shared_handle_nt_reference; -+ /* Corresponding global handle in the host */ -+ struct d3dkmthandle host_shared_handle; -+ /* -+ * When the sync object is shared by NT handle, this is the -+ * corresponding handle in the host -+ */ -+ struct d3dkmthandle host_shared_handle_nt; -+ /* Values below are computed when the resource is sealed */ -+ u32 runtime_private_data_size; -+ u32 alloc_private_data_size; -+ u32 resource_private_data_size; -+ u32 allocation_count; -+ union { -+ struct { -+ /* Cannot add new allocations */ -+ u32 sealed:1; -+ u32 reserved:31; -+ }; -+ long flags; -+ }; -+ u32 *alloc_private_data_sizes; -+ u8 *alloc_private_data; -+ u8 *runtime_private_data; -+ u8 *resource_private_data; -+}; -+ -+struct dxgsharedresource *dxgsharedresource_create(struct dxgadapter *adapter); -+void dxgsharedresource_destroy(struct kref *refcount); -+void dxgsharedresource_add_resource(struct dxgsharedresource *sres, -+ struct dxgresource *res); -+void dxgsharedresource_remove_resource(struct dxgsharedresource *sres, -+ struct dxgresource *res); -+ - struct dxgresource { - struct kref resource_kref; - enum dxgobjectstate object_state; -@@ -504,6 +567,8 @@ struct dxgresource { - }; - long flags; - }; -+ /* Owner of the shared resource */ -+ struct dxgsharedresource *shared_owner; - }; - - struct dxgresource *dxgresource_create(struct dxgdevice *dev); -@@ -658,6 +723,18 @@ int dxgvmb_send_wait_sync_object_cpu(struct dxgprocess *process, - int dxgvmb_send_query_adapter_info(struct dxgprocess *process, - struct dxgadapter *adapter, - struct d3dkmt_queryadapterinfo *args); -+int dxgvmb_send_create_nt_shared_object(struct dxgprocess *process, -+ struct d3dkmthandle object, -+ struct d3dkmthandle *shared_handle); -+int dxgvmb_send_destroy_nt_shared_object(struct d3dkmthandle shared_handle); -+int dxgvmb_send_open_resource(struct dxgprocess *process, -+ struct dxgadapter *adapter, -+ struct d3dkmthandle device, -+ struct d3dkmthandle global_share, -+ u32 allocation_count, -+ u32 total_priv_drv_data_size, -+ struct d3dkmthandle *resource_handle, -+ struct d3dkmthandle *alloc_handles); - int dxgvmb_send_get_stdalloc_data(struct dxgdevice *device, - enum d3dkmdt_standardallocationtype t, - struct d3dkmdt_gdisurfacedata *data, -diff --git a/drivers/hv/dxgkrnl/dxgmodule.c b/drivers/hv/dxgkrnl/dxgmodule.c -index 111111111111..222222222222 100644 ---- a/drivers/hv/dxgkrnl/dxgmodule.c -+++ b/drivers/hv/dxgkrnl/dxgmodule.c -@@ -258,6 +258,7 @@ int dxgglobal_create_adapter(struct pci_dev *dev, guid_t *guid, - init_rwsem(&adapter->core_lock); - - INIT_LIST_HEAD(&adapter->adapter_process_list_head); -+ INIT_LIST_HEAD(&adapter->shared_resource_list_head); - INIT_LIST_HEAD(&adapter->syncobj_list_head); - init_rwsem(&adapter->shared_resource_list_lock); - adapter->pci_dev = dev; -diff --git a/drivers/hv/dxgkrnl/dxgvmbus.c b/drivers/hv/dxgkrnl/dxgvmbus.c -index 111111111111..222222222222 100644 ---- a/drivers/hv/dxgkrnl/dxgvmbus.c -+++ b/drivers/hv/dxgkrnl/dxgvmbus.c -@@ -712,6 +712,79 @@ int dxgvmb_send_destroy_process(struct d3dkmthandle process) - return ret; - } - -+int dxgvmb_send_create_nt_shared_object(struct dxgprocess *process, -+ struct d3dkmthandle object, -+ struct d3dkmthandle *shared_handle) -+{ -+ struct dxgkvmb_command_createntsharedobject *command; -+ int ret; -+ struct dxgvmbusmsg msg; -+ -+ ret = init_message(&msg, NULL, process, sizeof(*command)); -+ if (ret) -+ return ret; -+ command = (void *)msg.msg; -+ -+ command_vm_to_host_init2(&command->hdr, -+ DXGK_VMBCOMMAND_CREATENTSHAREDOBJECT, -+ process->host_handle); -+ command->object = object; -+ -+ ret = dxgglobal_acquire_channel_lock(); -+ if (ret < 0) -+ goto cleanup; -+ -+ ret = dxgvmb_send_sync_msg(dxgglobal_get_dxgvmbuschannel(), -+ msg.hdr, msg.size, shared_handle, -+ sizeof(*shared_handle)); -+ -+ dxgglobal_release_channel_lock(); -+ -+ if (ret < 0) -+ goto cleanup; -+ if (shared_handle->v == 0) { -+ DXG_ERR("failed to create NT shared object"); -+ ret = -ENOTRECOVERABLE; -+ } -+ -+cleanup: -+ free_message(&msg, process); -+ if (ret) -+ DXG_TRACE("err: %d", ret); -+ return ret; -+} -+ -+int dxgvmb_send_destroy_nt_shared_object(struct d3dkmthandle shared_handle) -+{ -+ struct dxgkvmb_command_destroyntsharedobject *command; -+ int ret; -+ struct dxgvmbusmsg msg; -+ -+ ret = init_message(&msg, NULL, NULL, sizeof(*command)); -+ if (ret) -+ return ret; -+ command = (void *)msg.msg; -+ -+ command_vm_to_host_init1(&command->hdr, -+ DXGK_VMBCOMMAND_DESTROYNTSHAREDOBJECT); -+ command->shared_handle = shared_handle; -+ -+ ret = dxgglobal_acquire_channel_lock(); -+ if (ret < 0) -+ goto cleanup; -+ -+ ret = dxgvmb_send_sync_msg_ntstatus(dxgglobal_get_dxgvmbuschannel(), -+ msg.hdr, msg.size); -+ -+ dxgglobal_release_channel_lock(); -+ -+cleanup: -+ free_message(&msg, NULL); -+ if (ret) -+ DXG_TRACE("err: %d", ret); -+ return ret; -+} -+ - int dxgvmb_send_destroy_sync_object(struct dxgprocess *process, - struct d3dkmthandle sync_object) - { -@@ -1552,6 +1625,60 @@ int dxgvmb_send_destroy_allocation(struct dxgprocess *process, - return ret; - } - -+int dxgvmb_send_open_resource(struct dxgprocess *process, -+ struct dxgadapter *adapter, -+ struct d3dkmthandle device, -+ struct d3dkmthandle global_share, -+ u32 allocation_count, -+ u32 total_priv_drv_data_size, -+ struct d3dkmthandle *resource_handle, -+ struct d3dkmthandle *alloc_handles) -+{ -+ struct dxgkvmb_command_openresource *command; -+ struct dxgkvmb_command_openresource_return *result; -+ struct d3dkmthandle *handles; -+ int ret; -+ int i; -+ u32 result_size = allocation_count * sizeof(struct d3dkmthandle) + -+ sizeof(*result); -+ struct dxgvmbusmsgres msg = {.hdr = NULL}; -+ -+ ret = init_message_res(&msg, adapter, process, sizeof(*command), -+ result_size); -+ if (ret) -+ goto cleanup; -+ command = msg.msg; -+ result = msg.res; -+ -+ command_vgpu_to_host_init2(&command->hdr, DXGK_VMBCOMMAND_OPENRESOURCE, -+ process->host_handle); -+ command->device = device; -+ command->nt_security_sharing = 1; -+ command->global_share = global_share; -+ command->allocation_count = allocation_count; -+ command->total_priv_drv_data_size = total_priv_drv_data_size; -+ -+ ret = dxgvmb_send_sync_msg(msg.channel, msg.hdr, msg.size, -+ result, msg.res_size); -+ if (ret < 0) -+ goto cleanup; -+ -+ ret = ntstatus2int(result->status); -+ if (ret < 0) -+ goto cleanup; -+ -+ *resource_handle = result->resource; -+ handles = (struct d3dkmthandle *) &result[1]; -+ for (i = 0; i < allocation_count; i++) -+ alloc_handles[i] = handles[i]; -+ -+cleanup: -+ free_message((struct dxgvmbusmsg *)&msg, process); -+ if (ret) -+ DXG_TRACE("err: %d", ret); -+ return ret; -+} -+ - int dxgvmb_send_get_stdalloc_data(struct dxgdevice *device, - enum d3dkmdt_standardallocationtype alloctype, - struct d3dkmdt_gdisurfacedata *alloc_data, -diff --git a/drivers/hv/dxgkrnl/dxgvmbus.h b/drivers/hv/dxgkrnl/dxgvmbus.h -index 111111111111..222222222222 100644 ---- a/drivers/hv/dxgkrnl/dxgvmbus.h -+++ b/drivers/hv/dxgkrnl/dxgvmbus.h -@@ -172,6 +172,21 @@ struct dxgkvmb_command_signalguestevent { - bool dereference_event; - }; - -+/* -+ * The command returns struct d3dkmthandle of a shared object for the -+ * given pre-process object -+ */ -+struct dxgkvmb_command_createntsharedobject { -+ struct dxgkvmb_command_vm_to_host hdr; -+ struct d3dkmthandle object; -+}; -+ -+/* The command returns ntstatus */ -+struct dxgkvmb_command_destroyntsharedobject { -+ struct dxgkvmb_command_vm_to_host hdr; -+ struct d3dkmthandle shared_handle; -+}; -+ - /* Returns ntstatus */ - struct dxgkvmb_command_setiospaceregion { - struct dxgkvmb_command_vm_to_host hdr; -@@ -305,6 +320,21 @@ struct dxgkvmb_command_createallocation { - /* u8 priv_drv_data[] for each alloc_info */ - }; - -+struct dxgkvmb_command_openresource { -+ struct dxgkvmb_command_vgpu_to_host hdr; -+ struct d3dkmthandle device; -+ bool nt_security_sharing; -+ struct d3dkmthandle global_share; -+ u32 allocation_count; -+ u32 total_priv_drv_data_size; -+}; -+ -+struct dxgkvmb_command_openresource_return { -+ struct d3dkmthandle resource; -+ struct ntstatus status; -+/* struct d3dkmthandle allocation[allocation_count]; */ -+}; -+ - struct dxgkvmb_command_getstandardallocprivdata { - struct dxgkvmb_command_vgpu_to_host hdr; - enum d3dkmdt_standardallocationtype alloc_type; -diff --git a/drivers/hv/dxgkrnl/ioctl.c b/drivers/hv/dxgkrnl/ioctl.c -index 111111111111..222222222222 100644 ---- a/drivers/hv/dxgkrnl/ioctl.c -+++ b/drivers/hv/dxgkrnl/ioctl.c -@@ -36,8 +36,35 @@ static char *errorstr(int ret) - } - #endif - -+static int dxgsharedresource_release(struct inode *inode, struct file *file) -+{ -+ struct dxgsharedresource *resource = file->private_data; -+ -+ DXG_TRACE("Release resource: %p", resource); -+ mutex_lock(&resource->fd_mutex); -+ kref_get(&resource->sresource_kref); -+ resource->host_shared_handle_nt_reference--; -+ if (resource->host_shared_handle_nt_reference == 0) { -+ if (resource->host_shared_handle_nt.v) { -+ dxgvmb_send_destroy_nt_shared_object( -+ resource->host_shared_handle_nt); -+ DXG_TRACE("Resource host_handle_nt destroyed: %x", -+ resource->host_shared_handle_nt.v); -+ resource->host_shared_handle_nt.v = 0; -+ } -+ kref_put(&resource->sresource_kref, dxgsharedresource_destroy); -+ } -+ mutex_unlock(&resource->fd_mutex); -+ kref_put(&resource->sresource_kref, dxgsharedresource_destroy); -+ return 0; -+} -+ -+static const struct file_operations dxg_resource_fops = { -+ .release = dxgsharedresource_release, -+}; -+ - static int dxgkio_open_adapter_from_luid(struct dxgprocess *process, -- void *__user inargs) -+ void *__user inargs) - { - struct d3dkmt_openadapterfromluid args; - int ret; -@@ -212,6 +239,98 @@ dxgkp_enum_adapters(struct dxgprocess *process, - return ret; - } - -+static int dxgsharedresource_seal(struct dxgsharedresource *shared_resource) -+{ -+ int ret = 0; -+ int i = 0; -+ u8 *private_data; -+ u32 data_size; -+ struct dxgresource *resource; -+ struct dxgallocation *alloc; -+ -+ DXG_TRACE("Sealing resource: %p", shared_resource); -+ -+ down_write(&shared_resource->adapter->shared_resource_list_lock); -+ if (shared_resource->sealed) { -+ DXG_TRACE("Resource already sealed"); -+ goto cleanup; -+ } -+ shared_resource->sealed = 1; -+ if (!list_empty(&shared_resource->resource_list_head)) { -+ resource = -+ list_first_entry(&shared_resource->resource_list_head, -+ struct dxgresource, -+ shared_resource_list_entry); -+ DXG_TRACE("First resource: %p", resource); -+ mutex_lock(&resource->resource_mutex); -+ list_for_each_entry(alloc, &resource->alloc_list_head, -+ alloc_list_entry) { -+ DXG_TRACE("Resource alloc: %p %d", alloc, -+ alloc->priv_drv_data->data_size); -+ shared_resource->allocation_count++; -+ shared_resource->alloc_private_data_size += -+ alloc->priv_drv_data->data_size; -+ if (shared_resource->alloc_private_data_size < -+ alloc->priv_drv_data->data_size) { -+ DXG_ERR("alloc private data overflow"); -+ ret = -EINVAL; -+ goto cleanup1; -+ } -+ } -+ if (shared_resource->alloc_private_data_size == 0) { -+ ret = -EINVAL; -+ goto cleanup1; -+ } -+ shared_resource->alloc_private_data = -+ vzalloc(shared_resource->alloc_private_data_size); -+ if (shared_resource->alloc_private_data == NULL) { -+ ret = -EINVAL; -+ goto cleanup1; -+ } -+ shared_resource->alloc_private_data_sizes = -+ vzalloc(sizeof(u32)*shared_resource->allocation_count); -+ if (shared_resource->alloc_private_data_sizes == NULL) { -+ ret = -EINVAL; -+ goto cleanup1; -+ } -+ private_data = shared_resource->alloc_private_data; -+ data_size = shared_resource->alloc_private_data_size; -+ i = 0; -+ list_for_each_entry(alloc, &resource->alloc_list_head, -+ alloc_list_entry) { -+ u32 alloc_data_size = alloc->priv_drv_data->data_size; -+ -+ if (alloc_data_size) { -+ if (data_size < alloc_data_size) { -+ dev_err(DXGDEV, -+ "Invalid private data size"); -+ ret = -EINVAL; -+ goto cleanup1; -+ } -+ shared_resource->alloc_private_data_sizes[i] = -+ alloc_data_size; -+ memcpy(private_data, -+ alloc->priv_drv_data->data, -+ alloc_data_size); -+ vfree(alloc->priv_drv_data); -+ alloc->priv_drv_data = NULL; -+ private_data += alloc_data_size; -+ data_size -= alloc_data_size; -+ } -+ i++; -+ } -+ if (data_size != 0) { -+ DXG_ERR("Data size mismatch"); -+ ret = -EINVAL; -+ } -+cleanup1: -+ mutex_unlock(&resource->resource_mutex); -+ } -+cleanup: -+ up_write(&shared_resource->adapter->shared_resource_list_lock); -+ return ret; -+} -+ - static int - dxgkio_enum_adapters(struct dxgprocess *process, void *__user inargs) - { -@@ -803,6 +922,7 @@ dxgkio_create_allocation(struct dxgprocess *process, void *__user inargs) - u32 alloc_info_size = 0; - struct dxgresource *resource = NULL; - struct dxgallocation **dxgalloc = NULL; -+ struct dxgsharedresource *shared_resource = NULL; - bool resource_mutex_acquired = false; - u32 standard_alloc_priv_data_size = 0; - void *standard_alloc_priv_data = NULL; -@@ -973,6 +1093,76 @@ dxgkio_create_allocation(struct dxgprocess *process, void *__user inargs) - } - resource->private_runtime_handle = - args.private_runtime_resource_handle; -+ if (args.flags.create_shared) { -+ if (!args.flags.nt_security_sharing) { -+ dev_err(DXGDEV, -+ "nt_security_sharing must be set"); -+ ret = -EINVAL; -+ goto cleanup; -+ } -+ shared_resource = dxgsharedresource_create(adapter); -+ if (shared_resource == NULL) { -+ ret = -ENOMEM; -+ goto cleanup; -+ } -+ shared_resource->runtime_private_data_size = -+ args.priv_drv_data_size; -+ shared_resource->resource_private_data_size = -+ args.priv_drv_data_size; -+ -+ shared_resource->runtime_private_data_size = -+ args.private_runtime_data_size; -+ shared_resource->resource_private_data_size = -+ args.priv_drv_data_size; -+ dxgsharedresource_add_resource(shared_resource, -+ resource); -+ if (args.flags.standard_allocation) { -+ shared_resource->resource_private_data = -+ res_priv_data; -+ shared_resource->resource_private_data_size = -+ res_priv_data_size; -+ res_priv_data = NULL; -+ } -+ if (args.private_runtime_data_size) { -+ shared_resource->runtime_private_data = -+ vzalloc(args.private_runtime_data_size); -+ if (shared_resource->runtime_private_data == -+ NULL) { -+ ret = -ENOMEM; -+ goto cleanup; -+ } -+ ret = copy_from_user( -+ shared_resource->runtime_private_data, -+ args.private_runtime_data, -+ args.private_runtime_data_size); -+ if (ret) { -+ dev_err(DXGDEV, -+ "failed to copy runtime data"); -+ ret = -EINVAL; -+ goto cleanup; -+ } -+ } -+ if (args.priv_drv_data_size && -+ !args.flags.standard_allocation) { -+ shared_resource->resource_private_data = -+ vzalloc(args.priv_drv_data_size); -+ if (shared_resource->resource_private_data == -+ NULL) { -+ ret = -ENOMEM; -+ goto cleanup; -+ } -+ ret = copy_from_user( -+ shared_resource->resource_private_data, -+ args.priv_drv_data, -+ args.priv_drv_data_size); -+ if (ret) { -+ dev_err(DXGDEV, -+ "failed to copy res data"); -+ ret = -EINVAL; -+ goto cleanup; -+ } -+ } -+ } - } else { - if (args.resource.v) { - /* Adding new allocations to the given resource */ -@@ -991,6 +1181,12 @@ dxgkio_create_allocation(struct dxgprocess *process, void *__user inargs) - ret = -EINVAL; - goto cleanup; - } -+ if (resource->shared_owner && -+ resource->shared_owner->sealed) { -+ DXG_ERR("Resource is sealed"); -+ ret = -EINVAL; -+ goto cleanup; -+ } - /* Synchronize with resource destruction */ - mutex_lock(&resource->resource_mutex); - if (!dxgresource_is_active(resource)) { -@@ -1092,9 +1288,16 @@ dxgkio_create_allocation(struct dxgprocess *process, void *__user inargs) - } - } - if (resource && args.flags.create_resource) { -+ if (shared_resource) { -+ dxgsharedresource_remove_resource -+ (shared_resource, resource); -+ } - dxgresource_destroy(resource); - } - } -+ if (shared_resource) -+ kref_put(&shared_resource->sresource_kref, -+ dxgsharedresource_destroy); - if (dxgalloc) - vfree(dxgalloc); - if (standard_alloc_priv_data) -@@ -1140,6 +1343,10 @@ static int validate_alloc(struct dxgallocation *alloc0, - fail_reason = 4; - goto cleanup; - } -+ if (alloc->owner.resource->shared_owner) { -+ fail_reason = 5; -+ goto cleanup; -+ } - } else { - if (alloc->owner.device != device) { - fail_reason = 6; -@@ -2146,6 +2353,582 @@ dxgkio_wait_sync_object_gpu(struct dxgprocess *process, void *__user inargs) - return ret; - } - -+static int -+dxgsharedresource_get_host_nt_handle(struct dxgsharedresource *resource, -+ struct dxgprocess *process, -+ struct d3dkmthandle objecthandle) -+{ -+ int ret = 0; -+ -+ mutex_lock(&resource->fd_mutex); -+ if (resource->host_shared_handle_nt_reference == 0) { -+ ret = dxgvmb_send_create_nt_shared_object(process, -+ objecthandle, -+ &resource->host_shared_handle_nt); -+ if (ret < 0) -+ goto cleanup; -+ DXG_TRACE("Resource host_shared_handle_ht: %x", -+ resource->host_shared_handle_nt.v); -+ kref_get(&resource->sresource_kref); -+ } -+ resource->host_shared_handle_nt_reference++; -+cleanup: -+ mutex_unlock(&resource->fd_mutex); -+ return ret; -+} -+ -+enum dxg_sharedobject_type { -+ DXG_SHARED_RESOURCE -+}; -+ -+static int get_object_fd(enum dxg_sharedobject_type type, -+ void *object, int *fdout) -+{ -+ struct file *file; -+ int fd; -+ -+ fd = get_unused_fd_flags(O_CLOEXEC); -+ if (fd < 0) { -+ DXG_ERR("get_unused_fd_flags failed: %x", fd); -+ return -ENOTRECOVERABLE; -+ } -+ -+ switch (type) { -+ case DXG_SHARED_RESOURCE: -+ file = anon_inode_getfile("dxgresource", -+ &dxg_resource_fops, object, 0); -+ break; -+ default: -+ return -EINVAL; -+ }; -+ if (IS_ERR(file)) { -+ DXG_ERR("anon_inode_getfile failed: %x", fd); -+ put_unused_fd(fd); -+ return -ENOTRECOVERABLE; -+ } -+ -+ fd_install(fd, file); -+ *fdout = fd; -+ return 0; -+} -+ -+static int -+dxgkio_share_objects(struct dxgprocess *process, void *__user inargs) -+{ -+ struct d3dkmt_shareobjects args; -+ enum hmgrentry_type object_type; -+ struct dxgsyncobject *syncobj = NULL; -+ struct dxgresource *resource = NULL; -+ struct dxgsharedresource *shared_resource = NULL; -+ struct d3dkmthandle *handles = NULL; -+ int object_fd = -1; -+ void *obj = NULL; -+ u32 handle_size; -+ int ret; -+ u64 tmp = 0; -+ -+ ret = copy_from_user(&args, inargs, sizeof(args)); -+ if (ret) { -+ DXG_ERR("failed to copy input args"); -+ ret = -EINVAL; -+ goto cleanup; -+ } -+ -+ if (args.object_count == 0 || args.object_count > 1) { -+ DXG_ERR("invalid object count %d", args.object_count); -+ ret = -EINVAL; -+ goto cleanup; -+ } -+ -+ handle_size = args.object_count * sizeof(struct d3dkmthandle); -+ -+ handles = vzalloc(handle_size); -+ if (handles == NULL) { -+ ret = -ENOMEM; -+ goto cleanup; -+ } -+ ret = copy_from_user(handles, args.objects, handle_size); -+ if (ret) { -+ DXG_ERR("failed to copy object handles"); -+ ret = -EINVAL; -+ goto cleanup; -+ } -+ -+ DXG_TRACE("Sharing handle: %x", handles[0].v); -+ -+ hmgrtable_lock(&process->handle_table, DXGLOCK_SHARED); -+ object_type = hmgrtable_get_object_type(&process->handle_table, -+ handles[0]); -+ obj = hmgrtable_get_object(&process->handle_table, handles[0]); -+ if (obj == NULL) { -+ DXG_ERR("invalid object handle %x", handles[0].v); -+ ret = -EINVAL; -+ } else { -+ switch (object_type) { -+ case HMGRENTRY_TYPE_DXGRESOURCE: -+ resource = obj; -+ if (resource->shared_owner) { -+ kref_get(&resource->resource_kref); -+ shared_resource = resource->shared_owner; -+ } else { -+ resource = NULL; -+ DXG_ERR("resource object shared"); -+ ret = -EINVAL; -+ } -+ break; -+ default: -+ DXG_ERR("invalid object type %d", object_type); -+ ret = -EINVAL; -+ break; -+ } -+ } -+ hmgrtable_unlock(&process->handle_table, DXGLOCK_SHARED); -+ -+ if (ret < 0) -+ goto cleanup; -+ -+ switch (object_type) { -+ case HMGRENTRY_TYPE_DXGRESOURCE: -+ ret = get_object_fd(DXG_SHARED_RESOURCE, shared_resource, -+ &object_fd); -+ if (ret < 0) { -+ DXG_ERR("get_object_fd failed for resource"); -+ goto cleanup; -+ } -+ ret = dxgsharedresource_get_host_nt_handle(shared_resource, -+ process, handles[0]); -+ if (ret < 0) { -+ DXG_ERR("get_host_res_nt_handle failed"); -+ goto cleanup; -+ } -+ ret = dxgsharedresource_seal(shared_resource); -+ if (ret < 0) { -+ DXG_ERR("dxgsharedresource_seal failed"); -+ goto cleanup; -+ } -+ break; -+ default: -+ ret = -EINVAL; -+ break; -+ } -+ -+ if (ret < 0) -+ goto cleanup; -+ -+ DXG_TRACE("Object FD: %x", object_fd); -+ -+ tmp = (u64) object_fd; -+ -+ ret = copy_to_user(args.shared_handle, &tmp, sizeof(u64)); -+ if (ret < 0) -+ DXG_ERR("failed to copy shared handle"); -+ -+cleanup: -+ if (ret < 0) { -+ if (object_fd >= 0) -+ put_unused_fd(object_fd); -+ } -+ -+ if (handles) -+ vfree(handles); -+ -+ if (syncobj) -+ kref_put(&syncobj->syncobj_kref, dxgsyncobject_release); -+ -+ if (resource) -+ kref_put(&resource->resource_kref, dxgresource_release); -+ -+ DXG_TRACE("ioctl:%s %d", errorstr(ret), ret); -+ return ret; -+} -+ -+static int -+dxgkio_query_resource_info_nt(struct dxgprocess *process, void *__user inargs) -+{ -+ struct d3dkmt_queryresourceinfofromnthandle args; -+ int ret; -+ struct dxgdevice *device = NULL; -+ struct dxgsharedresource *shared_resource = NULL; -+ struct file *file = NULL; -+ -+ ret = copy_from_user(&args, inargs, sizeof(args)); -+ if (ret) { -+ DXG_ERR("failed to copy input args"); -+ ret = -EINVAL; -+ goto cleanup; -+ } -+ -+ file = fget(args.nt_handle); -+ if (!file) { -+ DXG_ERR("failed to get file from handle: %llx", -+ args.nt_handle); -+ ret = -EINVAL; -+ goto cleanup; -+ } -+ -+ if (file->f_op != &dxg_resource_fops) { -+ DXG_ERR("invalid fd: %llx", args.nt_handle); -+ ret = -EINVAL; -+ goto cleanup; -+ } -+ -+ shared_resource = file->private_data; -+ if (shared_resource == NULL) { -+ DXG_ERR("invalid private data: %llx", args.nt_handle); -+ ret = -EINVAL; -+ goto cleanup; -+ } -+ -+ device = dxgprocess_device_by_handle(process, args.device); -+ if (device == NULL) { -+ ret = -EINVAL; -+ goto cleanup; -+ } -+ -+ ret = dxgdevice_acquire_lock_shared(device); -+ if (ret < 0) { -+ kref_put(&device->device_kref, dxgdevice_release); -+ device = NULL; -+ goto cleanup; -+ } -+ -+ ret = dxgsharedresource_seal(shared_resource); -+ if (ret < 0) -+ goto cleanup; -+ -+ args.private_runtime_data_size = -+ shared_resource->runtime_private_data_size; -+ args.resource_priv_drv_data_size = -+ shared_resource->resource_private_data_size; -+ args.allocation_count = shared_resource->allocation_count; -+ args.total_priv_drv_data_size = -+ shared_resource->alloc_private_data_size; -+ -+ ret = copy_to_user(inargs, &args, sizeof(args)); -+ if (ret) { -+ DXG_ERR("failed to copy output args"); -+ ret = -EINVAL; -+ } -+ -+cleanup: -+ -+ if (file) -+ fput(file); -+ if (device) -+ dxgdevice_release_lock_shared(device); -+ if (device) -+ kref_put(&device->device_kref, dxgdevice_release); -+ -+ DXG_TRACE("ioctl:%s %d", errorstr(ret), ret); -+ return ret; -+} -+ -+static int -+assign_resource_handles(struct dxgprocess *process, -+ struct dxgsharedresource *shared_resource, -+ struct d3dkmt_openresourcefromnthandle *args, -+ struct d3dkmthandle resource_handle, -+ struct dxgresource *resource, -+ struct dxgallocation **allocs, -+ struct d3dkmthandle *handles) -+{ -+ int ret; -+ int i; -+ u8 *cur_priv_data; -+ u32 total_priv_data_size = 0; -+ struct d3dddi_openallocationinfo2 open_alloc_info = { }; -+ -+ hmgrtable_lock(&process->handle_table, DXGLOCK_EXCL); -+ ret = hmgrtable_assign_handle(&process->handle_table, resource, -+ HMGRENTRY_TYPE_DXGRESOURCE, -+ resource_handle); -+ if (ret < 0) -+ goto cleanup; -+ resource->handle = resource_handle; -+ resource->handle_valid = 1; -+ cur_priv_data = args->total_priv_drv_data; -+ for (i = 0; i < args->allocation_count; i++) { -+ ret = hmgrtable_assign_handle(&process->handle_table, allocs[i], -+ HMGRENTRY_TYPE_DXGALLOCATION, -+ handles[i]); -+ if (ret < 0) -+ goto cleanup; -+ allocs[i]->alloc_handle = handles[i]; -+ allocs[i]->handle_valid = 1; -+ open_alloc_info.allocation = handles[i]; -+ if (shared_resource->alloc_private_data_sizes) -+ open_alloc_info.priv_drv_data_size = -+ shared_resource->alloc_private_data_sizes[i]; -+ else -+ open_alloc_info.priv_drv_data_size = 0; -+ -+ total_priv_data_size += open_alloc_info.priv_drv_data_size; -+ open_alloc_info.priv_drv_data = cur_priv_data; -+ cur_priv_data += open_alloc_info.priv_drv_data_size; -+ -+ ret = copy_to_user(&args->open_alloc_info[i], -+ &open_alloc_info, -+ sizeof(open_alloc_info)); -+ if (ret) { -+ DXG_ERR("failed to copy alloc info"); -+ ret = -EINVAL; -+ goto cleanup; -+ } -+ } -+ args->total_priv_drv_data_size = total_priv_data_size; -+cleanup: -+ hmgrtable_unlock(&process->handle_table, DXGLOCK_EXCL); -+ if (ret < 0) { -+ for (i = 0; i < args->allocation_count; i++) -+ dxgallocation_free_handle(allocs[i]); -+ dxgresource_free_handle(resource); -+ } -+ return ret; -+} -+ -+static int -+open_resource(struct dxgprocess *process, -+ struct d3dkmt_openresourcefromnthandle *args, -+ __user struct d3dkmthandle *res_out, -+ __user u32 *total_driver_data_size_out) -+{ -+ int ret = 0; -+ int i; -+ struct d3dkmthandle *alloc_handles = NULL; -+ int alloc_handles_size = sizeof(struct d3dkmthandle) * -+ args->allocation_count; -+ struct dxgsharedresource *shared_resource = NULL; -+ struct dxgresource *resource = NULL; -+ struct dxgallocation **allocs = NULL; -+ struct d3dkmthandle global_share = {}; -+ struct dxgdevice *device = NULL; -+ struct dxgadapter *adapter = NULL; -+ struct d3dkmthandle resource_handle = {}; -+ struct file *file = NULL; -+ -+ DXG_TRACE("Opening resource handle: %llx", args->nt_handle); -+ -+ file = fget(args->nt_handle); -+ if (!file) { -+ DXG_ERR("failed to get file from handle: %llx", -+ args->nt_handle); -+ ret = -EINVAL; -+ goto cleanup; -+ } -+ if (file->f_op != &dxg_resource_fops) { -+ DXG_ERR("invalid fd type: %llx", args->nt_handle); -+ ret = -EINVAL; -+ goto cleanup; -+ } -+ shared_resource = file->private_data; -+ if (shared_resource == NULL) { -+ DXG_ERR("invalid private data: %llx", args->nt_handle); -+ ret = -EINVAL; -+ goto cleanup; -+ } -+ if (kref_get_unless_zero(&shared_resource->sresource_kref) == 0) -+ shared_resource = NULL; -+ else -+ global_share = shared_resource->host_shared_handle_nt; -+ -+ if (shared_resource == NULL) { -+ DXG_ERR("Invalid shared resource handle: %x", -+ (u32)args->nt_handle); -+ ret = -EINVAL; -+ goto cleanup; -+ } -+ -+ DXG_TRACE("Shared resource: %p %x", shared_resource, -+ global_share.v); -+ -+ device = dxgprocess_device_by_handle(process, args->device); -+ if (device == NULL) { -+ ret = -EINVAL; -+ goto cleanup; -+ } -+ -+ ret = dxgdevice_acquire_lock_shared(device); -+ if (ret < 0) { -+ kref_put(&device->device_kref, dxgdevice_release); -+ device = NULL; -+ goto cleanup; -+ } -+ -+ adapter = device->adapter; -+ ret = dxgadapter_acquire_lock_shared(adapter); -+ if (ret < 0) { -+ adapter = NULL; -+ goto cleanup; -+ } -+ -+ ret = dxgsharedresource_seal(shared_resource); -+ if (ret < 0) -+ goto cleanup; -+ -+ if (args->allocation_count != shared_resource->allocation_count || -+ args->private_runtime_data_size < -+ shared_resource->runtime_private_data_size || -+ args->resource_priv_drv_data_size < -+ shared_resource->resource_private_data_size || -+ args->total_priv_drv_data_size < -+ shared_resource->alloc_private_data_size) { -+ ret = -EINVAL; -+ DXG_ERR("Invalid data sizes"); -+ goto cleanup; -+ } -+ -+ alloc_handles = vzalloc(alloc_handles_size); -+ if (alloc_handles == NULL) { -+ ret = -ENOMEM; -+ goto cleanup; -+ } -+ -+ allocs = vzalloc(sizeof(void *) * args->allocation_count); -+ if (allocs == NULL) { -+ ret = -ENOMEM; -+ goto cleanup; -+ } -+ -+ resource = dxgresource_create(device); -+ if (resource == NULL) { -+ ret = -ENOMEM; -+ goto cleanup; -+ } -+ dxgsharedresource_add_resource(shared_resource, resource); -+ -+ for (i = 0; i < args->allocation_count; i++) { -+ allocs[i] = dxgallocation_create(process); -+ if (allocs[i] == NULL) -+ goto cleanup; -+ ret = dxgresource_add_alloc(resource, allocs[i]); -+ if (ret < 0) -+ goto cleanup; -+ } -+ -+ ret = dxgvmb_send_open_resource(process, adapter, -+ device->handle, global_share, -+ args->allocation_count, -+ args->total_priv_drv_data_size, -+ &resource_handle, alloc_handles); -+ if (ret < 0) { -+ DXG_ERR("dxgvmb_send_open_resource failed"); -+ goto cleanup; -+ } -+ -+ if (shared_resource->runtime_private_data_size) { -+ ret = copy_to_user(args->private_runtime_data, -+ shared_resource->runtime_private_data, -+ shared_resource->runtime_private_data_size); -+ if (ret) { -+ DXG_ERR("failed to copy runtime data"); -+ ret = -EINVAL; -+ goto cleanup; -+ } -+ } -+ -+ if (shared_resource->resource_private_data_size) { -+ ret = copy_to_user(args->resource_priv_drv_data, -+ shared_resource->resource_private_data, -+ shared_resource->resource_private_data_size); -+ if (ret) { -+ DXG_ERR("failed to copy resource data"); -+ ret = -EINVAL; -+ goto cleanup; -+ } -+ } -+ -+ if (shared_resource->alloc_private_data_size) { -+ ret = copy_to_user(args->total_priv_drv_data, -+ shared_resource->alloc_private_data, -+ shared_resource->alloc_private_data_size); -+ if (ret) { -+ DXG_ERR("failed to copy alloc data"); -+ ret = -EINVAL; -+ goto cleanup; -+ } -+ } -+ -+ ret = assign_resource_handles(process, shared_resource, args, -+ resource_handle, resource, allocs, -+ alloc_handles); -+ if (ret < 0) -+ goto cleanup; -+ -+ ret = copy_to_user(res_out, &resource_handle, -+ sizeof(struct d3dkmthandle)); -+ if (ret) { -+ DXG_ERR("failed to copy resource handle to user"); -+ ret = -EINVAL; -+ goto cleanup; -+ } -+ -+ ret = copy_to_user(total_driver_data_size_out, -+ &args->total_priv_drv_data_size, sizeof(u32)); -+ if (ret) { -+ DXG_ERR("failed to copy total driver data size"); -+ ret = -EINVAL; -+ } -+ -+cleanup: -+ -+ if (ret < 0) { -+ if (resource_handle.v) { -+ struct d3dkmt_destroyallocation2 tmp = { }; -+ -+ tmp.flags.assume_not_in_use = 1; -+ tmp.device = args->device; -+ tmp.resource = resource_handle; -+ ret = dxgvmb_send_destroy_allocation(process, device, -+ &tmp, NULL); -+ } -+ if (resource) -+ dxgresource_destroy(resource); -+ } -+ -+ if (file) -+ fput(file); -+ if (allocs) -+ vfree(allocs); -+ if (shared_resource) -+ kref_put(&shared_resource->sresource_kref, -+ dxgsharedresource_destroy); -+ if (alloc_handles) -+ vfree(alloc_handles); -+ if (adapter) -+ dxgadapter_release_lock_shared(adapter); -+ if (device) -+ dxgdevice_release_lock_shared(device); -+ if (device) -+ kref_put(&device->device_kref, dxgdevice_release); -+ -+ return ret; -+} -+ -+static int -+dxgkio_open_resource_nt(struct dxgprocess *process, -+ void *__user inargs) -+{ -+ struct d3dkmt_openresourcefromnthandle args; -+ struct d3dkmt_openresourcefromnthandle *__user args_user = inargs; -+ int ret; -+ -+ ret = copy_from_user(&args, inargs, sizeof(args)); -+ if (ret) { -+ DXG_ERR("failed to copy input args"); -+ ret = -EINVAL; -+ goto cleanup; -+ } -+ -+ ret = open_resource(process, &args, -+ &args_user->resource, -+ &args_user->total_priv_drv_data_size); -+ -+cleanup: -+ -+ DXG_TRACE("ioctl:%s %d", errorstr(ret), ret); -+ return ret; -+} -+ - static struct ioctl_desc ioctls[] = { - /* 0x00 */ {}, - /* 0x01 */ {dxgkio_open_adapter_from_luid, LX_DXOPENADAPTERFROMLUID}, -@@ -2215,10 +2998,11 @@ static struct ioctl_desc ioctls[] = { - /* 0x3c */ {}, - /* 0x3d */ {}, - /* 0x3e */ {dxgkio_enum_adapters3, LX_DXENUMADAPTERS3}, --/* 0x3f */ {}, -+/* 0x3f */ {dxgkio_share_objects, LX_DXSHAREOBJECTS}, - /* 0x40 */ {}, --/* 0x41 */ {}, --/* 0x42 */ {}, -+/* 0x41 */ {dxgkio_query_resource_info_nt, -+ LX_DXQUERYRESOURCEINFOFROMNTHANDLE}, -+/* 0x42 */ {dxgkio_open_resource_nt, LX_DXOPENRESOURCEFROMNTHANDLE}, - /* 0x43 */ {}, - /* 0x44 */ {}, - /* 0x45 */ {}, -diff --git a/include/uapi/misc/d3dkmthk.h b/include/uapi/misc/d3dkmthk.h -index 111111111111..222222222222 100644 ---- a/include/uapi/misc/d3dkmthk.h -+++ b/include/uapi/misc/d3dkmthk.h -@@ -682,6 +682,94 @@ enum d3dkmt_deviceexecution_state { - _D3DKMT_DEVICEEXECUTION_ERROR_DMAPAGEFAULT = 7, - }; - -+struct d3dddi_openallocationinfo2 { -+ struct d3dkmthandle allocation; -+#ifdef __KERNEL__ -+ void *priv_drv_data; -+#else -+ __u64 priv_drv_data; -+#endif -+ __u32 priv_drv_data_size; -+ __u64 gpu_va; -+ __u64 reserved[6]; -+}; -+ -+struct d3dkmt_openresourcefromnthandle { -+ struct d3dkmthandle device; -+ __u32 reserved; -+ __u64 nt_handle; -+ __u32 allocation_count; -+ __u32 reserved1; -+#ifdef __KERNEL__ -+ struct d3dddi_openallocationinfo2 *open_alloc_info; -+#else -+ __u64 open_alloc_info; -+#endif -+ int private_runtime_data_size; -+ __u32 reserved2; -+#ifdef __KERNEL__ -+ void *private_runtime_data; -+#else -+ __u64 private_runtime_data; -+#endif -+ __u32 resource_priv_drv_data_size; -+ __u32 reserved3; -+#ifdef __KERNEL__ -+ void *resource_priv_drv_data; -+#else -+ __u64 resource_priv_drv_data; -+#endif -+ __u32 total_priv_drv_data_size; -+#ifdef __KERNEL__ -+ void *total_priv_drv_data; -+#else -+ __u64 total_priv_drv_data; -+#endif -+ struct d3dkmthandle resource; -+ struct d3dkmthandle keyed_mutex; -+#ifdef __KERNEL__ -+ void *keyed_mutex_private_data; -+#else -+ __u64 keyed_mutex_private_data; -+#endif -+ __u32 keyed_mutex_private_data_size; -+ struct d3dkmthandle sync_object; -+}; -+ -+struct d3dkmt_queryresourceinfofromnthandle { -+ struct d3dkmthandle device; -+ __u32 reserved; -+ __u64 nt_handle; -+#ifdef __KERNEL__ -+ void *private_runtime_data; -+#else -+ __u64 private_runtime_data; -+#endif -+ __u32 private_runtime_data_size; -+ __u32 total_priv_drv_data_size; -+ __u32 resource_priv_drv_data_size; -+ __u32 allocation_count; -+}; -+ -+struct d3dkmt_shareobjects { -+ __u32 object_count; -+ __u32 reserved; -+#ifdef __KERNEL__ -+ const struct d3dkmthandle *objects; -+ void *object_attr; /* security attributes */ -+#else -+ __u64 objects; -+ __u64 object_attr; -+#endif -+ __u32 desired_access; -+ __u32 reserved1; -+#ifdef __KERNEL__ -+ __u64 *shared_handle; /* output file descriptors */ -+#else -+ __u64 shared_handle; -+#endif -+}; -+ - union d3dkmt_enumadapters_filter { - struct { - __u64 include_compute_only:1; -@@ -747,5 +835,13 @@ struct d3dkmt_enumadapters3 { - _IOWR(0x47, 0x3b, struct d3dkmt_waitforsynchronizationobjectfromgpu) - #define LX_DXENUMADAPTERS3 \ - _IOWR(0x47, 0x3e, struct d3dkmt_enumadapters3) -+#define LX_DXSHAREOBJECTS \ -+ _IOWR(0x47, 0x3f, struct d3dkmt_shareobjects) -+#define LX_DXOPENSYNCOBJECTFROMNTHANDLE2 \ -+ _IOWR(0x47, 0x40, struct d3dkmt_opensyncobjectfromnthandle2) -+#define LX_DXQUERYRESOURCEINFOFROMNTHANDLE \ -+ _IOWR(0x47, 0x41, struct d3dkmt_queryresourceinfofromnthandle) -+#define LX_DXOPENRESOURCEFROMNTHANDLE \ -+ _IOWR(0x47, 0x42, struct d3dkmt_openresourcefromnthandle) - - #endif /* _D3DKMTHK_H */ --- -Armbian - diff --git a/patch/kernel/archive/wsl2-arm64-6.6/1679-drivers-hv-dxgkrnl-Sharing-of-sync-objects.patch b/patch/kernel/archive/wsl2-arm64-6.6/1679-drivers-hv-dxgkrnl-Sharing-of-sync-objects.patch deleted file mode 100644 index 8e9148855001..000000000000 --- a/patch/kernel/archive/wsl2-arm64-6.6/1679-drivers-hv-dxgkrnl-Sharing-of-sync-objects.patch +++ /dev/null @@ -1,1555 +0,0 @@ -From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001 -From: Iouri Tarassov -Date: Mon, 31 Jan 2022 16:41:28 -0800 -Subject: drivers: hv: dxgkrnl: Sharing of sync objects - -Implement creation of a shared sync objects and the ioctl for sharing -dxgsyncobject objects between processes in the virtual machine. - -Sync objects are shared using file descriptor (FD) handles. -The name "NT handle" is used to be compatible with Windows implementation. - -An FD handle is created by the LX_DXSHAREOBJECTS ioctl. The created FD -handle could be sent to another process using any Linux API. - -To use a shared sync object in other ioctls, the object needs to be -opened using its FD handle. A sync object is opened by the -LX_DXOPENSYNCOBJECTFROMNTHANDLE2 ioctl, which returns a d3dkmthandle -value. - -Signed-off-by: Iouri Tarassov -[kms: Forward port to v6.1] -Signed-off-by: Kelsey Steele ---- - drivers/hv/dxgkrnl/dxgadapter.c | 181 ++- - drivers/hv/dxgkrnl/dxgkrnl.h | 96 ++ - drivers/hv/dxgkrnl/dxgmodule.c | 1 + - drivers/hv/dxgkrnl/dxgprocess.c | 4 + - drivers/hv/dxgkrnl/dxgvmbus.c | 221 ++++ - drivers/hv/dxgkrnl/dxgvmbus.h | 35 + - drivers/hv/dxgkrnl/ioctl.c | 556 +++++++++- - include/uapi/misc/d3dkmthk.h | 93 ++ - 8 files changed, 1181 insertions(+), 6 deletions(-) - -diff --git a/drivers/hv/dxgkrnl/dxgadapter.c b/drivers/hv/dxgkrnl/dxgadapter.c -index 111111111111..222222222222 100644 ---- a/drivers/hv/dxgkrnl/dxgadapter.c -+++ b/drivers/hv/dxgkrnl/dxgadapter.c -@@ -171,6 +171,26 @@ void dxgadapter_remove_shared_resource(struct dxgadapter *adapter, - up_write(&adapter->shared_resource_list_lock); - } - -+void dxgadapter_add_shared_syncobj(struct dxgadapter *adapter, -+ struct dxgsharedsyncobject *object) -+{ -+ down_write(&adapter->shared_resource_list_lock); -+ list_add_tail(&object->adapter_shared_syncobj_list_entry, -+ &adapter->adapter_shared_syncobj_list_head); -+ up_write(&adapter->shared_resource_list_lock); -+} -+ -+void dxgadapter_remove_shared_syncobj(struct dxgadapter *adapter, -+ struct dxgsharedsyncobject *object) -+{ -+ down_write(&adapter->shared_resource_list_lock); -+ if (object->adapter_shared_syncobj_list_entry.next) { -+ list_del(&object->adapter_shared_syncobj_list_entry); -+ object->adapter_shared_syncobj_list_entry.next = NULL; -+ } -+ up_write(&adapter->shared_resource_list_lock); -+} -+ - void dxgadapter_add_syncobj(struct dxgadapter *adapter, - struct dxgsyncobject *object) - { -@@ -622,7 +642,7 @@ void dxgresource_destroy(struct dxgresource *resource) - dxgallocation_destroy(alloc); - } - dxgdevice_remove_resource(device, resource); -- shared_resource = resource->shared_owner; -+ shared_resource = resource->shared_owner; - if (shared_resource) { - dxgsharedresource_remove_resource(shared_resource, - resource); -@@ -736,6 +756,9 @@ struct dxgcontext *dxgcontext_create(struct dxgdevice *device) - */ - void dxgcontext_destroy(struct dxgprocess *process, struct dxgcontext *context) - { -+ struct dxghwqueue *hwqueue; -+ struct dxghwqueue *tmp; -+ - DXG_TRACE("Destroying context %p", context); - context->object_state = DXGOBJECTSTATE_DESTROYED; - if (context->device) { -@@ -747,6 +770,10 @@ void dxgcontext_destroy(struct dxgprocess *process, struct dxgcontext *context) - dxgdevice_remove_context(context->device, context); - kref_put(&context->device->device_kref, dxgdevice_release); - } -+ list_for_each_entry_safe(hwqueue, tmp, &context->hwqueue_list_head, -+ hwqueue_list_entry) { -+ dxghwqueue_destroy(process, hwqueue); -+ } - kref_put(&context->context_kref, dxgcontext_release); - } - -@@ -773,6 +800,38 @@ void dxgcontext_release(struct kref *refcount) - kfree(context); - } - -+int dxgcontext_add_hwqueue(struct dxgcontext *context, -+ struct dxghwqueue *hwqueue) -+{ -+ int ret = 0; -+ -+ down_write(&context->hwqueue_list_lock); -+ if (dxgcontext_is_active(context)) -+ list_add_tail(&hwqueue->hwqueue_list_entry, -+ &context->hwqueue_list_head); -+ else -+ ret = -ENODEV; -+ up_write(&context->hwqueue_list_lock); -+ return ret; -+} -+ -+void dxgcontext_remove_hwqueue(struct dxgcontext *context, -+ struct dxghwqueue *hwqueue) -+{ -+ if (hwqueue->hwqueue_list_entry.next) { -+ list_del(&hwqueue->hwqueue_list_entry); -+ hwqueue->hwqueue_list_entry.next = NULL; -+ } -+} -+ -+void dxgcontext_remove_hwqueue_safe(struct dxgcontext *context, -+ struct dxghwqueue *hwqueue) -+{ -+ down_write(&context->hwqueue_list_lock); -+ dxgcontext_remove_hwqueue(context, hwqueue); -+ up_write(&context->hwqueue_list_lock); -+} -+ - struct dxgallocation *dxgallocation_create(struct dxgprocess *process) - { - struct dxgallocation *alloc; -@@ -958,6 +1017,63 @@ void dxgprocess_adapter_remove_device(struct dxgdevice *device) - mutex_unlock(&device->adapter_info->device_list_mutex); - } - -+struct dxgsharedsyncobject *dxgsharedsyncobj_create(struct dxgadapter *adapter, -+ struct dxgsyncobject *so) -+{ -+ struct dxgsharedsyncobject *syncobj; -+ -+ syncobj = kzalloc(sizeof(*syncobj), GFP_KERNEL); -+ if (syncobj) { -+ kref_init(&syncobj->ssyncobj_kref); -+ INIT_LIST_HEAD(&syncobj->shared_syncobj_list_head); -+ syncobj->adapter = adapter; -+ syncobj->type = so->type; -+ syncobj->monitored_fence = so->monitored_fence; -+ dxgadapter_add_shared_syncobj(adapter, syncobj); -+ kref_get(&adapter->adapter_kref); -+ init_rwsem(&syncobj->syncobj_list_lock); -+ mutex_init(&syncobj->fd_mutex); -+ } -+ return syncobj; -+} -+ -+void dxgsharedsyncobj_release(struct kref *refcount) -+{ -+ struct dxgsharedsyncobject *syncobj; -+ -+ syncobj = container_of(refcount, struct dxgsharedsyncobject, -+ ssyncobj_kref); -+ DXG_TRACE("Destroying shared sync object %p", syncobj); -+ if (syncobj->adapter) { -+ dxgadapter_remove_shared_syncobj(syncobj->adapter, -+ syncobj); -+ kref_put(&syncobj->adapter->adapter_kref, -+ dxgadapter_release); -+ } -+ kfree(syncobj); -+} -+ -+void dxgsharedsyncobj_add_syncobj(struct dxgsharedsyncobject *shared, -+ struct dxgsyncobject *syncobj) -+{ -+ DXG_TRACE("Add syncobj 0x%p 0x%p", shared, syncobj); -+ kref_get(&shared->ssyncobj_kref); -+ down_write(&shared->syncobj_list_lock); -+ list_add(&syncobj->shared_syncobj_list_entry, -+ &shared->shared_syncobj_list_head); -+ syncobj->shared_owner = shared; -+ up_write(&shared->syncobj_list_lock); -+} -+ -+void dxgsharedsyncobj_remove_syncobj(struct dxgsharedsyncobject *shared, -+ struct dxgsyncobject *syncobj) -+{ -+ DXG_TRACE("Remove syncobj 0x%p", shared); -+ down_write(&shared->syncobj_list_lock); -+ list_del(&syncobj->shared_syncobj_list_entry); -+ up_write(&shared->syncobj_list_lock); -+} -+ - struct dxgsyncobject *dxgsyncobject_create(struct dxgprocess *process, - struct dxgdevice *device, - struct dxgadapter *adapter, -@@ -1091,7 +1207,70 @@ void dxgsyncobject_release(struct kref *refcount) - struct dxgsyncobject *syncobj; - - syncobj = container_of(refcount, struct dxgsyncobject, syncobj_kref); -+ if (syncobj->shared_owner) { -+ dxgsharedsyncobj_remove_syncobj(syncobj->shared_owner, -+ syncobj); -+ kref_put(&syncobj->shared_owner->ssyncobj_kref, -+ dxgsharedsyncobj_release); -+ } - if (syncobj->host_event) - kfree(syncobj->host_event); - kfree(syncobj); - } -+ -+struct dxghwqueue *dxghwqueue_create(struct dxgcontext *context) -+{ -+ struct dxgprocess *process = context->device->process; -+ struct dxghwqueue *hwqueue = kzalloc(sizeof(*hwqueue), GFP_KERNEL); -+ -+ if (hwqueue) { -+ kref_init(&hwqueue->hwqueue_kref); -+ hwqueue->context = context; -+ hwqueue->process = process; -+ hwqueue->device_handle = context->device->handle; -+ if (dxgcontext_add_hwqueue(context, hwqueue) < 0) { -+ kref_put(&hwqueue->hwqueue_kref, dxghwqueue_release); -+ hwqueue = NULL; -+ } else { -+ kref_get(&context->context_kref); -+ } -+ } -+ return hwqueue; -+} -+ -+void dxghwqueue_destroy(struct dxgprocess *process, struct dxghwqueue *hwqueue) -+{ -+ DXG_TRACE("Destroyng hwqueue %p", hwqueue); -+ hmgrtable_lock(&process->handle_table, DXGLOCK_EXCL); -+ if (hwqueue->handle.v) { -+ hmgrtable_free_handle(&process->handle_table, -+ HMGRENTRY_TYPE_DXGHWQUEUE, -+ hwqueue->handle); -+ hwqueue->handle.v = 0; -+ } -+ if (hwqueue->progress_fence_sync_object.v) { -+ hmgrtable_free_handle(&process->handle_table, -+ HMGRENTRY_TYPE_MONITOREDFENCE, -+ hwqueue->progress_fence_sync_object); -+ hwqueue->progress_fence_sync_object.v = 0; -+ } -+ hmgrtable_unlock(&process->handle_table, DXGLOCK_EXCL); -+ -+ if (hwqueue->progress_fence_mapped_address) { -+ dxg_unmap_iospace(hwqueue->progress_fence_mapped_address, -+ PAGE_SIZE); -+ hwqueue->progress_fence_mapped_address = NULL; -+ } -+ dxgcontext_remove_hwqueue_safe(hwqueue->context, hwqueue); -+ -+ kref_put(&hwqueue->context->context_kref, dxgcontext_release); -+ kref_put(&hwqueue->hwqueue_kref, dxghwqueue_release); -+} -+ -+void dxghwqueue_release(struct kref *refcount) -+{ -+ struct dxghwqueue *hwqueue; -+ -+ hwqueue = container_of(refcount, struct dxghwqueue, hwqueue_kref); -+ kfree(hwqueue); -+} -diff --git a/drivers/hv/dxgkrnl/dxgkrnl.h b/drivers/hv/dxgkrnl/dxgkrnl.h -index 111111111111..222222222222 100644 ---- a/drivers/hv/dxgkrnl/dxgkrnl.h -+++ b/drivers/hv/dxgkrnl/dxgkrnl.h -@@ -40,6 +40,8 @@ struct dxgallocation; - struct dxgresource; - struct dxgsharedresource; - struct dxgsyncobject; -+struct dxgsharedsyncobject; -+struct dxghwqueue; - - /* - * Driver private data. -@@ -137,6 +139,18 @@ struct dxghosteventcpu { - * "device" syncobject, because the belong to a device (dxgdevice). - * Device syncobjects are inserted to a list in dxgdevice. - * -+ * A syncobject can be "shared", meaning that it could be opened by many -+ * processes. -+ * -+ * Shared syncobjects are inserted to a list in its owner -+ * (dxgsharedsyncobject). -+ * A syncobject can be shared by using a global handle or by using -+ * "NT security handle". -+ * When global handle sharing is used, the handle is created durinig object -+ * creation. -+ * When "NT security" is used, the handle for sharing is create be calling -+ * dxgk_share_objects. On Linux "NT handle" is represented by a file -+ * descriptor. FD points to dxgsharedsyncobject. - */ - struct dxgsyncobject { - struct kref syncobj_kref; -@@ -146,6 +160,8 @@ struct dxgsyncobject { - * List entry in dxgadapter for other objects - */ - struct list_head syncobj_list_entry; -+ /* List entry in the dxgsharedsyncobject object for shared synobjects */ -+ struct list_head shared_syncobj_list_entry; - /* Adapter, the syncobject belongs to. NULL for stopped sync obejcts. */ - struct dxgadapter *adapter; - /* -@@ -156,6 +172,8 @@ struct dxgsyncobject { - struct dxgprocess *process; - /* Used by D3DDDI_CPU_NOTIFICATION objects */ - struct dxghosteventcpu *host_event; -+ /* Owner object for shared syncobjects */ -+ struct dxgsharedsyncobject *shared_owner; - /* CPU virtual address of the fence value for "device" syncobjects */ - void *mapped_address; - /* Handle in the process handle table */ -@@ -187,6 +205,41 @@ struct dxgvgpuchannel { - struct hv_device *hdev; - }; - -+/* -+ * The object is used as parent of all sync objects, created for a shared -+ * syncobject. When a shared syncobject is created without NT security, the -+ * handle in the global handle table will point to this object. -+ */ -+struct dxgsharedsyncobject { -+ struct kref ssyncobj_kref; -+ /* Referenced by file descriptors */ -+ int host_shared_handle_nt_reference; -+ /* Corresponding handle in the host global handle table */ -+ struct d3dkmthandle host_shared_handle; -+ /* -+ * When the sync object is shared by NT handle, this is the -+ * corresponding handle in the host -+ */ -+ struct d3dkmthandle host_shared_handle_nt; -+ /* Protects access to host_shared_handle_nt */ -+ struct mutex fd_mutex; -+ struct rw_semaphore syncobj_list_lock; -+ struct list_head shared_syncobj_list_head; -+ struct list_head adapter_shared_syncobj_list_entry; -+ struct dxgadapter *adapter; -+ enum d3dddi_synchronizationobject_type type; -+ u32 monitored_fence:1; -+}; -+ -+struct dxgsharedsyncobject *dxgsharedsyncobj_create(struct dxgadapter *adapter, -+ struct dxgsyncobject -+ *syncobj); -+void dxgsharedsyncobj_release(struct kref *refcount); -+void dxgsharedsyncobj_add_syncobj(struct dxgsharedsyncobject *sharedsyncobj, -+ struct dxgsyncobject *syncobj); -+void dxgsharedsyncobj_remove_syncobj(struct dxgsharedsyncobject *sharedsyncobj, -+ struct dxgsyncobject *syncobj); -+ - struct dxgsyncobject *dxgsyncobject_create(struct dxgprocess *process, - struct dxgdevice *device, - struct dxgadapter *adapter, -@@ -375,6 +428,8 @@ struct dxgadapter { - struct list_head adapter_process_list_head; - /* List of all dxgsharedresource objects */ - struct list_head shared_resource_list_head; -+ /* List of all dxgsharedsyncobject objects */ -+ struct list_head adapter_shared_syncobj_list_head; - /* List of all non-device dxgsyncobject objects */ - struct list_head syncobj_list_head; - /* This lock protects shared resource and syncobject lists */ -@@ -402,6 +457,10 @@ void dxgadapter_release_lock_shared(struct dxgadapter *adapter); - int dxgadapter_acquire_lock_exclusive(struct dxgadapter *adapter); - void dxgadapter_acquire_lock_forced(struct dxgadapter *adapter); - void dxgadapter_release_lock_exclusive(struct dxgadapter *adapter); -+void dxgadapter_add_shared_syncobj(struct dxgadapter *adapter, -+ struct dxgsharedsyncobject *so); -+void dxgadapter_remove_shared_syncobj(struct dxgadapter *adapter, -+ struct dxgsharedsyncobject *so); - void dxgadapter_add_syncobj(struct dxgadapter *adapter, - struct dxgsyncobject *so); - void dxgadapter_remove_syncobj(struct dxgsyncobject *so); -@@ -487,8 +546,32 @@ struct dxgcontext *dxgcontext_create(struct dxgdevice *dev); - void dxgcontext_destroy(struct dxgprocess *pr, struct dxgcontext *ctx); - void dxgcontext_destroy_safe(struct dxgprocess *pr, struct dxgcontext *ctx); - void dxgcontext_release(struct kref *refcount); -+int dxgcontext_add_hwqueue(struct dxgcontext *ctx, -+ struct dxghwqueue *hq); -+void dxgcontext_remove_hwqueue(struct dxgcontext *ctx, struct dxghwqueue *hq); -+void dxgcontext_remove_hwqueue_safe(struct dxgcontext *ctx, -+ struct dxghwqueue *hq); - bool dxgcontext_is_active(struct dxgcontext *ctx); - -+/* -+ * The object represent the execution hardware queue of a device. -+ */ -+struct dxghwqueue { -+ /* entry in the context hw queue list */ -+ struct list_head hwqueue_list_entry; -+ struct kref hwqueue_kref; -+ struct dxgcontext *context; -+ struct dxgprocess *process; -+ struct d3dkmthandle progress_fence_sync_object; -+ struct d3dkmthandle handle; -+ struct d3dkmthandle device_handle; -+ void *progress_fence_mapped_address; -+}; -+ -+struct dxghwqueue *dxghwqueue_create(struct dxgcontext *ctx); -+void dxghwqueue_destroy(struct dxgprocess *pr, struct dxghwqueue *hq); -+void dxghwqueue_release(struct kref *refcount); -+ - /* - * A shared resource object is created to track the list of dxgresource objects, - * which are opened for the same underlying shared resource. -@@ -720,9 +803,22 @@ int dxgvmb_send_wait_sync_object_cpu(struct dxgprocess *process, - d3dkmt_waitforsynchronizationobjectfromcpu - *args, - u64 cpu_event); -+int dxgvmb_send_create_hwqueue(struct dxgprocess *process, -+ struct dxgadapter *adapter, -+ struct d3dkmt_createhwqueue *args, -+ struct d3dkmt_createhwqueue *__user inargs, -+ struct dxghwqueue *hq); -+int dxgvmb_send_destroy_hwqueue(struct dxgprocess *process, -+ struct dxgadapter *adapter, -+ struct d3dkmthandle handle); - int dxgvmb_send_query_adapter_info(struct dxgprocess *process, - struct dxgadapter *adapter, - struct d3dkmt_queryadapterinfo *args); -+int dxgvmb_send_open_sync_object_nt(struct dxgprocess *process, -+ struct dxgvmbuschannel *channel, -+ struct d3dkmt_opensyncobjectfromnthandle2 -+ *args, -+ struct dxgsyncobject *syncobj); - int dxgvmb_send_create_nt_shared_object(struct dxgprocess *process, - struct d3dkmthandle object, - struct d3dkmthandle *shared_handle); -diff --git a/drivers/hv/dxgkrnl/dxgmodule.c b/drivers/hv/dxgkrnl/dxgmodule.c -index 111111111111..222222222222 100644 ---- a/drivers/hv/dxgkrnl/dxgmodule.c -+++ b/drivers/hv/dxgkrnl/dxgmodule.c -@@ -259,6 +259,7 @@ int dxgglobal_create_adapter(struct pci_dev *dev, guid_t *guid, - - INIT_LIST_HEAD(&adapter->adapter_process_list_head); - INIT_LIST_HEAD(&adapter->shared_resource_list_head); -+ INIT_LIST_HEAD(&adapter->adapter_shared_syncobj_list_head); - INIT_LIST_HEAD(&adapter->syncobj_list_head); - init_rwsem(&adapter->shared_resource_list_lock); - adapter->pci_dev = dev; -diff --git a/drivers/hv/dxgkrnl/dxgprocess.c b/drivers/hv/dxgkrnl/dxgprocess.c -index 111111111111..222222222222 100644 ---- a/drivers/hv/dxgkrnl/dxgprocess.c -+++ b/drivers/hv/dxgkrnl/dxgprocess.c -@@ -277,6 +277,10 @@ struct dxgdevice *dxgprocess_device_by_object_handle(struct dxgprocess *process, - device_handle = - ((struct dxgcontext *)obj)->device_handle; - break; -+ case HMGRENTRY_TYPE_DXGHWQUEUE: -+ device_handle = -+ ((struct dxghwqueue *)obj)->device_handle; -+ break; - default: - DXG_ERR("invalid handle type: %d", t); - break; -diff --git a/drivers/hv/dxgkrnl/dxgvmbus.c b/drivers/hv/dxgkrnl/dxgvmbus.c -index 111111111111..222222222222 100644 ---- a/drivers/hv/dxgkrnl/dxgvmbus.c -+++ b/drivers/hv/dxgkrnl/dxgvmbus.c -@@ -712,6 +712,69 @@ int dxgvmb_send_destroy_process(struct d3dkmthandle process) - return ret; - } - -+int dxgvmb_send_open_sync_object_nt(struct dxgprocess *process, -+ struct dxgvmbuschannel *channel, -+ struct d3dkmt_opensyncobjectfromnthandle2 -+ *args, -+ struct dxgsyncobject *syncobj) -+{ -+ struct dxgkvmb_command_opensyncobject *command; -+ struct dxgkvmb_command_opensyncobject_return result = { }; -+ int ret; -+ struct dxgvmbusmsg msg; -+ -+ ret = init_message(&msg, NULL, process, sizeof(*command)); -+ if (ret) -+ return ret; -+ command = (void *)msg.msg; -+ -+ command_vm_to_host_init2(&command->hdr, DXGK_VMBCOMMAND_OPENSYNCOBJECT, -+ process->host_handle); -+ command->device = args->device; -+ command->global_sync_object = syncobj->shared_owner->host_shared_handle; -+ command->flags = args->flags; -+ if (syncobj->monitored_fence) -+ command->engine_affinity = -+ args->monitored_fence.engine_affinity; -+ -+ ret = dxgglobal_acquire_channel_lock(); -+ if (ret < 0) -+ goto cleanup; -+ -+ ret = dxgvmb_send_sync_msg(channel, msg.hdr, msg.size, -+ &result, sizeof(result)); -+ -+ dxgglobal_release_channel_lock(); -+ -+ if (ret < 0) -+ goto cleanup; -+ -+ ret = ntstatus2int(result.status); -+ if (ret < 0) -+ goto cleanup; -+ -+ args->sync_object = result.sync_object; -+ if (syncobj->monitored_fence) { -+ void *va = dxg_map_iospace(result.guest_cpu_physical_address, -+ PAGE_SIZE, PROT_READ | PROT_WRITE, -+ true); -+ if (va == NULL) { -+ ret = -ENOMEM; -+ goto cleanup; -+ } -+ args->monitored_fence.fence_value_cpu_va = va; -+ args->monitored_fence.fence_value_gpu_va = -+ result.gpu_virtual_address; -+ syncobj->mapped_address = va; -+ } -+ -+cleanup: -+ free_message(&msg, process); -+ if (ret) -+ DXG_TRACE("err: %d", ret); -+ return ret; -+} -+ - int dxgvmb_send_create_nt_shared_object(struct dxgprocess *process, - struct d3dkmthandle object, - struct d3dkmthandle *shared_handle) -@@ -2050,6 +2113,164 @@ int dxgvmb_send_wait_sync_object_gpu(struct dxgprocess *process, - return ret; - } - -+int dxgvmb_send_create_hwqueue(struct dxgprocess *process, -+ struct dxgadapter *adapter, -+ struct d3dkmt_createhwqueue *args, -+ struct d3dkmt_createhwqueue *__user inargs, -+ struct dxghwqueue *hwqueue) -+{ -+ struct dxgkvmb_command_createhwqueue *command = NULL; -+ u32 cmd_size = sizeof(struct dxgkvmb_command_createhwqueue); -+ int ret; -+ struct dxgvmbusmsg msg = {.hdr = NULL}; -+ -+ if (args->priv_drv_data_size > DXG_MAX_VM_BUS_PACKET_SIZE) { -+ DXG_ERR("invalid private driver data size: %d", -+ args->priv_drv_data_size); -+ ret = -EINVAL; -+ goto cleanup; -+ } -+ -+ if (args->priv_drv_data_size) -+ cmd_size += args->priv_drv_data_size - 1; -+ -+ ret = init_message(&msg, adapter, process, cmd_size); -+ if (ret) -+ goto cleanup; -+ command = (void *)msg.msg; -+ -+ command_vgpu_to_host_init2(&command->hdr, -+ DXGK_VMBCOMMAND_CREATEHWQUEUE, -+ process->host_handle); -+ command->context = args->context; -+ command->flags = args->flags; -+ command->priv_drv_data_size = args->priv_drv_data_size; -+ if (args->priv_drv_data_size) { -+ ret = copy_from_user(command->priv_drv_data, -+ args->priv_drv_data, -+ args->priv_drv_data_size); -+ if (ret) { -+ DXG_ERR("failed to copy private data"); -+ ret = -EINVAL; -+ goto cleanup; -+ } -+ } -+ -+ ret = dxgvmb_send_sync_msg(msg.channel, msg.hdr, msg.size, -+ command, cmd_size); -+ if (ret < 0) -+ goto cleanup; -+ -+ ret = ntstatus2int(command->status); -+ if (ret < 0) { -+ DXG_ERR("dxgvmb_send_sync_msg failed: %x", -+ command->status.v); -+ goto cleanup; -+ } -+ -+ ret = hmgrtable_assign_handle_safe(&process->handle_table, hwqueue, -+ HMGRENTRY_TYPE_DXGHWQUEUE, -+ command->hwqueue); -+ if (ret < 0) -+ goto cleanup; -+ -+ ret = hmgrtable_assign_handle_safe(&process->handle_table, -+ NULL, -+ HMGRENTRY_TYPE_MONITOREDFENCE, -+ command->hwqueue_progress_fence); -+ if (ret < 0) -+ goto cleanup; -+ -+ hwqueue->handle = command->hwqueue; -+ hwqueue->progress_fence_sync_object = command->hwqueue_progress_fence; -+ -+ hwqueue->progress_fence_mapped_address = -+ dxg_map_iospace((u64)command->hwqueue_progress_fence_cpuva, -+ PAGE_SIZE, PROT_READ | PROT_WRITE, true); -+ if (hwqueue->progress_fence_mapped_address == NULL) { -+ ret = -ENOMEM; -+ goto cleanup; -+ } -+ -+ ret = copy_to_user(&inargs->queue, &command->hwqueue, -+ sizeof(struct d3dkmthandle)); -+ if (ret < 0) { -+ DXG_ERR("failed to copy hwqueue handle"); -+ goto cleanup; -+ } -+ ret = copy_to_user(&inargs->queue_progress_fence, -+ &command->hwqueue_progress_fence, -+ sizeof(struct d3dkmthandle)); -+ if (ret < 0) { -+ DXG_ERR("failed to progress fence"); -+ goto cleanup; -+ } -+ ret = copy_to_user(&inargs->queue_progress_fence_cpu_va, -+ &hwqueue->progress_fence_mapped_address, -+ sizeof(inargs->queue_progress_fence_cpu_va)); -+ if (ret < 0) { -+ DXG_ERR("failed to copy fence cpu va"); -+ goto cleanup; -+ } -+ ret = copy_to_user(&inargs->queue_progress_fence_gpu_va, -+ &command->hwqueue_progress_fence_gpuva, -+ sizeof(u64)); -+ if (ret < 0) { -+ DXG_ERR("failed to copy fence gpu va"); -+ goto cleanup; -+ } -+ if (args->priv_drv_data_size) { -+ ret = copy_to_user(args->priv_drv_data, -+ command->priv_drv_data, -+ args->priv_drv_data_size); -+ if (ret < 0) -+ DXG_ERR("failed to copy private data"); -+ } -+ -+cleanup: -+ if (ret < 0) { -+ DXG_ERR("failed %x", ret); -+ if (hwqueue->handle.v) { -+ hmgrtable_free_handle_safe(&process->handle_table, -+ HMGRENTRY_TYPE_DXGHWQUEUE, -+ hwqueue->handle); -+ hwqueue->handle.v = 0; -+ } -+ if (command && command->hwqueue.v) -+ dxgvmb_send_destroy_hwqueue(process, adapter, -+ command->hwqueue); -+ } -+ free_message(&msg, process); -+ return ret; -+} -+ -+int dxgvmb_send_destroy_hwqueue(struct dxgprocess *process, -+ struct dxgadapter *adapter, -+ struct d3dkmthandle handle) -+{ -+ int ret; -+ struct dxgkvmb_command_destroyhwqueue *command; -+ struct dxgvmbusmsg msg = {.hdr = NULL}; -+ -+ ret = init_message(&msg, adapter, process, sizeof(*command)); -+ if (ret) -+ goto cleanup; -+ command = (void *)msg.msg; -+ -+ command_vgpu_to_host_init2(&command->hdr, -+ DXGK_VMBCOMMAND_DESTROYHWQUEUE, -+ process->host_handle); -+ command->hwqueue = handle; -+ -+ ret = dxgvmb_send_sync_msg_ntstatus(msg.channel, msg.hdr, msg.size); -+ -+cleanup: -+ free_message(&msg, process); -+ if (ret) -+ DXG_TRACE("err: %d", ret); -+ return ret; -+} -+ - int dxgvmb_send_query_adapter_info(struct dxgprocess *process, - struct dxgadapter *adapter, - struct d3dkmt_queryadapterinfo *args) -diff --git a/drivers/hv/dxgkrnl/dxgvmbus.h b/drivers/hv/dxgkrnl/dxgvmbus.h -index 111111111111..222222222222 100644 ---- a/drivers/hv/dxgkrnl/dxgvmbus.h -+++ b/drivers/hv/dxgkrnl/dxgvmbus.h -@@ -172,6 +172,21 @@ struct dxgkvmb_command_signalguestevent { - bool dereference_event; - }; - -+struct dxgkvmb_command_opensyncobject { -+ struct dxgkvmb_command_vm_to_host hdr; -+ struct d3dkmthandle device; -+ struct d3dkmthandle global_sync_object; -+ u32 engine_affinity; -+ struct d3dddi_synchronizationobject_flags flags; -+}; -+ -+struct dxgkvmb_command_opensyncobject_return { -+ struct d3dkmthandle sync_object; -+ struct ntstatus status; -+ u64 gpu_virtual_address; -+ u64 guest_cpu_physical_address; -+}; -+ - /* - * The command returns struct d3dkmthandle of a shared object for the - * given pre-process object -@@ -508,4 +523,24 @@ struct dxgkvmb_command_waitforsyncobjectfromgpu { - /* struct d3dkmthandle ObjectHandles[object_count] */ - }; - -+/* Returns the same structure */ -+struct dxgkvmb_command_createhwqueue { -+ struct dxgkvmb_command_vgpu_to_host hdr; -+ struct ntstatus status; -+ struct d3dkmthandle hwqueue; -+ struct d3dkmthandle hwqueue_progress_fence; -+ void *hwqueue_progress_fence_cpuva; -+ u64 hwqueue_progress_fence_gpuva; -+ struct d3dkmthandle context; -+ struct d3dddi_createhwqueueflags flags; -+ u32 priv_drv_data_size; -+ char priv_drv_data[1]; -+}; -+ -+/* The command returns ntstatus */ -+struct dxgkvmb_command_destroyhwqueue { -+ struct dxgkvmb_command_vgpu_to_host hdr; -+ struct d3dkmthandle hwqueue; -+}; -+ - #endif /* _DXGVMBUS_H */ -diff --git a/drivers/hv/dxgkrnl/ioctl.c b/drivers/hv/dxgkrnl/ioctl.c -index 111111111111..222222222222 100644 ---- a/drivers/hv/dxgkrnl/ioctl.c -+++ b/drivers/hv/dxgkrnl/ioctl.c -@@ -36,6 +36,33 @@ static char *errorstr(int ret) - } - #endif - -+static int dxgsyncobj_release(struct inode *inode, struct file *file) -+{ -+ struct dxgsharedsyncobject *syncobj = file->private_data; -+ -+ DXG_TRACE("Release syncobj: %p", syncobj); -+ mutex_lock(&syncobj->fd_mutex); -+ kref_get(&syncobj->ssyncobj_kref); -+ syncobj->host_shared_handle_nt_reference--; -+ if (syncobj->host_shared_handle_nt_reference == 0) { -+ if (syncobj->host_shared_handle_nt.v) { -+ dxgvmb_send_destroy_nt_shared_object( -+ syncobj->host_shared_handle_nt); -+ DXG_TRACE("Syncobj host_handle_nt destroyed: %x", -+ syncobj->host_shared_handle_nt.v); -+ syncobj->host_shared_handle_nt.v = 0; -+ } -+ kref_put(&syncobj->ssyncobj_kref, dxgsharedsyncobj_release); -+ } -+ mutex_unlock(&syncobj->fd_mutex); -+ kref_put(&syncobj->ssyncobj_kref, dxgsharedsyncobj_release); -+ return 0; -+} -+ -+static const struct file_operations dxg_syncobj_fops = { -+ .release = dxgsyncobj_release, -+}; -+ - static int dxgsharedresource_release(struct inode *inode, struct file *file) - { - struct dxgsharedresource *resource = file->private_data; -@@ -833,6 +860,156 @@ dxgkio_destroy_context(struct dxgprocess *process, void *__user inargs) - return ret; - } - -+static int -+dxgkio_create_hwqueue(struct dxgprocess *process, void *__user inargs) -+{ -+ struct d3dkmt_createhwqueue args; -+ struct dxgdevice *device = NULL; -+ struct dxgcontext *context = NULL; -+ struct dxgadapter *adapter = NULL; -+ struct dxghwqueue *hwqueue = NULL; -+ int ret; -+ bool device_lock_acquired = false; -+ -+ ret = copy_from_user(&args, inargs, sizeof(args)); -+ if (ret) { -+ DXG_ERR("failed to copy input args"); -+ ret = -EINVAL; -+ goto cleanup; -+ } -+ -+ /* -+ * The call acquires reference on the device. It is safe to access the -+ * adapter, because the device holds reference on it. -+ */ -+ device = dxgprocess_device_by_object_handle(process, -+ HMGRENTRY_TYPE_DXGCONTEXT, -+ args.context); -+ if (device == NULL) { -+ ret = -EINVAL; -+ goto cleanup; -+ } -+ -+ ret = dxgdevice_acquire_lock_shared(device); -+ if (ret < 0) -+ goto cleanup; -+ -+ device_lock_acquired = true; -+ -+ hmgrtable_lock(&process->handle_table, DXGLOCK_SHARED); -+ context = hmgrtable_get_object_by_type(&process->handle_table, -+ HMGRENTRY_TYPE_DXGCONTEXT, -+ args.context); -+ hmgrtable_unlock(&process->handle_table, DXGLOCK_SHARED); -+ -+ if (context == NULL) { -+ DXG_ERR("Invalid context handle %x", args.context.v); -+ ret = -EINVAL; -+ goto cleanup; -+ } -+ -+ hwqueue = dxghwqueue_create(context); -+ if (hwqueue == NULL) { -+ ret = -ENOMEM; -+ goto cleanup; -+ } -+ -+ adapter = device->adapter; -+ ret = dxgadapter_acquire_lock_shared(adapter); -+ if (ret < 0) { -+ adapter = NULL; -+ goto cleanup; -+ } -+ -+ ret = dxgvmb_send_create_hwqueue(process, adapter, &args, -+ inargs, hwqueue); -+ -+cleanup: -+ -+ if (ret < 0 && hwqueue) -+ dxghwqueue_destroy(process, hwqueue); -+ -+ if (adapter) -+ dxgadapter_release_lock_shared(adapter); -+ -+ if (device_lock_acquired) -+ dxgdevice_release_lock_shared(device); -+ -+ if (device) -+ kref_put(&device->device_kref, dxgdevice_release); -+ -+ DXG_TRACE("ioctl:%s %d", errorstr(ret), ret); -+ return ret; -+} -+ -+static int dxgkio_destroy_hwqueue(struct dxgprocess *process, -+ void *__user inargs) -+{ -+ struct d3dkmt_destroyhwqueue args; -+ int ret; -+ struct dxgadapter *adapter = NULL; -+ struct dxgdevice *device = NULL; -+ struct dxghwqueue *hwqueue = NULL; -+ struct d3dkmthandle device_handle = {}; -+ -+ ret = copy_from_user(&args, inargs, sizeof(args)); -+ if (ret) { -+ DXG_ERR("failed to copy input args"); -+ ret = -EINVAL; -+ goto cleanup; -+ } -+ -+ hmgrtable_lock(&process->handle_table, DXGLOCK_EXCL); -+ hwqueue = hmgrtable_get_object_by_type(&process->handle_table, -+ HMGRENTRY_TYPE_DXGHWQUEUE, -+ args.queue); -+ if (hwqueue) { -+ hmgrtable_free_handle(&process->handle_table, -+ HMGRENTRY_TYPE_DXGHWQUEUE, args.queue); -+ hwqueue->handle.v = 0; -+ device_handle = hwqueue->device_handle; -+ } -+ hmgrtable_unlock(&process->handle_table, DXGLOCK_EXCL); -+ -+ if (hwqueue == NULL) { -+ DXG_ERR("invalid hwqueue handle: %x", args.queue.v); -+ ret = -EINVAL; -+ goto cleanup; -+ } -+ -+ /* -+ * The call acquires reference on the device. It is safe to access the -+ * adapter, because the device holds reference on it. -+ */ -+ device = dxgprocess_device_by_handle(process, device_handle); -+ if (device == NULL) { -+ ret = -EINVAL; -+ goto cleanup; -+ } -+ -+ adapter = device->adapter; -+ ret = dxgadapter_acquire_lock_shared(adapter); -+ if (ret < 0) { -+ adapter = NULL; -+ goto cleanup; -+ } -+ -+ ret = dxgvmb_send_destroy_hwqueue(process, adapter, args.queue); -+ -+ dxghwqueue_destroy(process, hwqueue); -+ -+cleanup: -+ -+ if (adapter) -+ dxgadapter_release_lock_shared(adapter); -+ -+ if (device) -+ kref_put(&device->device_kref, dxgdevice_release); -+ -+ DXG_TRACE("ioctl:%s %d", errorstr(ret), ret); -+ return ret; -+} -+ - static int - get_standard_alloc_priv_data(struct dxgdevice *device, - struct d3dkmt_createstandardallocation *alloc_info, -@@ -1548,6 +1725,164 @@ dxgkio_destroy_allocation(struct dxgprocess *process, void *__user inargs) - return ret; - } - -+static int -+dxgkio_submit_signal_to_hwqueue(struct dxgprocess *process, void *__user inargs) -+{ -+ int ret; -+ struct d3dkmt_submitsignalsyncobjectstohwqueue args; -+ struct dxgdevice *device = NULL; -+ struct dxgadapter *adapter = NULL; -+ struct d3dkmthandle hwqueue = {}; -+ -+ ret = copy_from_user(&args, inargs, sizeof(args)); -+ if (ret) { -+ DXG_ERR("failed to copy input args"); -+ ret = -EINVAL; -+ goto cleanup; -+ } -+ -+ if (args.hwqueue_count > D3DDDI_MAX_BROADCAST_CONTEXT || -+ args.hwqueue_count == 0) { -+ DXG_ERR("invalid hwqueue count: %d", -+ args.hwqueue_count); -+ ret = -EINVAL; -+ goto cleanup; -+ } -+ -+ if (args.object_count > D3DDDI_MAX_OBJECT_SIGNALED || -+ args.object_count == 0) { -+ DXG_ERR("invalid number of syncobjects: %d", -+ args.object_count); -+ ret = -EINVAL; -+ goto cleanup; -+ } -+ -+ ret = copy_from_user(&hwqueue, args.hwqueues, -+ sizeof(struct d3dkmthandle)); -+ if (ret) { -+ DXG_ERR("failed to copy hwqueue handle"); -+ ret = -EINVAL; -+ goto cleanup; -+ } -+ -+ device = dxgprocess_device_by_object_handle(process, -+ HMGRENTRY_TYPE_DXGHWQUEUE, -+ hwqueue); -+ if (device == NULL) { -+ ret = -EINVAL; -+ goto cleanup; -+ } -+ -+ adapter = device->adapter; -+ ret = dxgadapter_acquire_lock_shared(adapter); -+ if (ret < 0) { -+ adapter = NULL; -+ goto cleanup; -+ } -+ -+ ret = dxgvmb_send_signal_sync_object(process, adapter, -+ args.flags, 0, zerohandle, -+ args.object_count, args.objects, -+ args.hwqueue_count, args.hwqueues, -+ args.object_count, -+ args.fence_values, NULL, -+ zerohandle); -+ -+cleanup: -+ -+ if (adapter) -+ dxgadapter_release_lock_shared(adapter); -+ if (device) -+ kref_put(&device->device_kref, dxgdevice_release); -+ -+ DXG_TRACE("ioctl:%s %d", errorstr(ret), ret); -+ return ret; -+} -+ -+static int -+dxgkio_submit_wait_to_hwqueue(struct dxgprocess *process, void *__user inargs) -+{ -+ struct d3dkmt_submitwaitforsyncobjectstohwqueue args; -+ struct dxgdevice *device = NULL; -+ struct dxgadapter *adapter = NULL; -+ int ret; -+ struct d3dkmthandle *objects = NULL; -+ u32 object_size; -+ u64 *fences = NULL; -+ -+ ret = copy_from_user(&args, inargs, sizeof(args)); -+ if (ret) { -+ DXG_ERR("failed to copy input args"); -+ ret = -EINVAL; -+ goto cleanup; -+ } -+ -+ if (args.object_count > D3DDDI_MAX_OBJECT_WAITED_ON || -+ args.object_count == 0) { -+ ret = -EINVAL; -+ goto cleanup; -+ } -+ -+ object_size = sizeof(struct d3dkmthandle) * args.object_count; -+ objects = vzalloc(object_size); -+ if (objects == NULL) { -+ ret = -ENOMEM; -+ goto cleanup; -+ } -+ ret = copy_from_user(objects, args.objects, object_size); -+ if (ret) { -+ DXG_ERR("failed to copy objects"); -+ ret = -EINVAL; -+ goto cleanup; -+ } -+ -+ object_size = sizeof(u64) * args.object_count; -+ fences = vzalloc(object_size); -+ if (fences == NULL) { -+ ret = -ENOMEM; -+ goto cleanup; -+ } -+ ret = copy_from_user(fences, args.fence_values, object_size); -+ if (ret) { -+ DXG_ERR("failed to copy fence values"); -+ ret = -EINVAL; -+ goto cleanup; -+ } -+ -+ device = dxgprocess_device_by_object_handle(process, -+ HMGRENTRY_TYPE_DXGHWQUEUE, -+ args.hwqueue); -+ if (device == NULL) { -+ ret = -EINVAL; -+ goto cleanup; -+ } -+ -+ adapter = device->adapter; -+ ret = dxgadapter_acquire_lock_shared(adapter); -+ if (ret < 0) { -+ adapter = NULL; -+ goto cleanup; -+ } -+ -+ ret = dxgvmb_send_wait_sync_object_gpu(process, adapter, -+ args.hwqueue, args.object_count, -+ objects, fences, false); -+ -+cleanup: -+ -+ if (objects) -+ vfree(objects); -+ if (fences) -+ vfree(fences); -+ if (adapter) -+ dxgadapter_release_lock_shared(adapter); -+ if (device) -+ kref_put(&device->device_kref, dxgdevice_release); -+ -+ DXG_TRACE("ioctl:%s %d", errorstr(ret), ret); -+ return ret; -+} -+ - static int - dxgkio_create_sync_object(struct dxgprocess *process, void *__user inargs) - { -@@ -1558,6 +1893,7 @@ dxgkio_create_sync_object(struct dxgprocess *process, void *__user inargs) - struct eventfd_ctx *event = NULL; - struct dxgsyncobject *syncobj = NULL; - bool device_lock_acquired = false; -+ struct dxgsharedsyncobject *syncobjgbl = NULL; - struct dxghosteventcpu *host_event = NULL; - - ret = copy_from_user(&args, inargs, sizeof(args)); -@@ -1618,6 +1954,22 @@ dxgkio_create_sync_object(struct dxgprocess *process, void *__user inargs) - if (ret < 0) - goto cleanup; - -+ if (args.info.flags.shared) { -+ if (args.info.shared_handle.v == 0) { -+ DXG_ERR("shared handle should not be 0"); -+ ret = -EINVAL; -+ goto cleanup; -+ } -+ syncobjgbl = dxgsharedsyncobj_create(device->adapter, syncobj); -+ if (syncobjgbl == NULL) { -+ ret = -ENOMEM; -+ goto cleanup; -+ } -+ dxgsharedsyncobj_add_syncobj(syncobjgbl, syncobj); -+ -+ syncobjgbl->host_shared_handle = args.info.shared_handle; -+ } -+ - ret = copy_to_user(inargs, &args, sizeof(args)); - if (ret) { - DXG_ERR("failed to copy output args"); -@@ -1646,6 +1998,8 @@ dxgkio_create_sync_object(struct dxgprocess *process, void *__user inargs) - if (event) - eventfd_ctx_put(event); - } -+ if (syncobjgbl) -+ kref_put(&syncobjgbl->ssyncobj_kref, dxgsharedsyncobj_release); - if (adapter) - dxgadapter_release_lock_shared(adapter); - if (device_lock_acquired) -@@ -1700,6 +2054,140 @@ dxgkio_destroy_sync_object(struct dxgprocess *process, void *__user inargs) - return ret; - } - -+static int -+dxgkio_open_sync_object_nt(struct dxgprocess *process, void *__user inargs) -+{ -+ struct d3dkmt_opensyncobjectfromnthandle2 args; -+ struct dxgsyncobject *syncobj = NULL; -+ struct dxgsharedsyncobject *syncobj_fd = NULL; -+ struct file *file = NULL; -+ struct dxgdevice *device = NULL; -+ struct dxgadapter *adapter = NULL; -+ struct d3dddi_synchronizationobject_flags flags = { }; -+ int ret; -+ bool device_lock_acquired = false; -+ struct dxgglobal *dxgglobal = dxggbl(); -+ -+ ret = copy_from_user(&args, inargs, sizeof(args)); -+ if (ret) { -+ DXG_ERR("failed to copy input args"); -+ ret = -EINVAL; -+ goto cleanup; -+ } -+ -+ args.sync_object.v = 0; -+ -+ if (args.device.v) { -+ device = dxgprocess_device_by_handle(process, args.device); -+ if (device == NULL) { -+ return -EINVAL; -+ goto cleanup; -+ } -+ } else { -+ DXG_ERR("device handle is missing"); -+ ret = -EINVAL; -+ goto cleanup; -+ } -+ -+ ret = dxgdevice_acquire_lock_shared(device); -+ if (ret < 0) -+ goto cleanup; -+ -+ device_lock_acquired = true; -+ -+ adapter = device->adapter; -+ ret = dxgadapter_acquire_lock_shared(adapter); -+ if (ret < 0) { -+ adapter = NULL; -+ goto cleanup; -+ } -+ -+ file = fget(args.nt_handle); -+ if (!file) { -+ DXG_ERR("failed to get file from handle: %llx", -+ args.nt_handle); -+ ret = -EINVAL; -+ goto cleanup; -+ } -+ -+ if (file->f_op != &dxg_syncobj_fops) { -+ DXG_ERR("invalid fd: %llx", args.nt_handle); -+ ret = -EINVAL; -+ goto cleanup; -+ } -+ -+ syncobj_fd = file->private_data; -+ if (syncobj_fd == NULL) { -+ DXG_ERR("invalid private data: %llx", args.nt_handle); -+ ret = -EINVAL; -+ goto cleanup; -+ } -+ -+ flags.shared = 1; -+ flags.nt_security_sharing = 1; -+ syncobj = dxgsyncobject_create(process, device, adapter, -+ syncobj_fd->type, flags); -+ if (syncobj == NULL) { -+ DXG_ERR("failed to create sync object"); -+ ret = -ENOMEM; -+ goto cleanup; -+ } -+ -+ dxgsharedsyncobj_add_syncobj(syncobj_fd, syncobj); -+ -+ ret = dxgvmb_send_open_sync_object_nt(process, &dxgglobal->channel, -+ &args, syncobj); -+ if (ret < 0) { -+ DXG_ERR("failed to open sync object on host: %x", -+ syncobj_fd->host_shared_handle.v); -+ goto cleanup; -+ } -+ -+ hmgrtable_lock(&process->handle_table, DXGLOCK_EXCL); -+ ret = hmgrtable_assign_handle(&process->handle_table, syncobj, -+ HMGRENTRY_TYPE_DXGSYNCOBJECT, -+ args.sync_object); -+ if (ret >= 0) { -+ syncobj->handle = args.sync_object; -+ kref_get(&syncobj->syncobj_kref); -+ } -+ hmgrtable_unlock(&process->handle_table, DXGLOCK_EXCL); -+ -+ if (ret < 0) -+ goto cleanup; -+ -+ ret = copy_to_user(inargs, &args, sizeof(args)); -+ if (ret == 0) -+ goto success; -+ DXG_ERR("failed to copy output args"); -+ -+cleanup: -+ -+ if (syncobj) { -+ dxgsyncobject_destroy(process, syncobj); -+ syncobj = NULL; -+ } -+ -+ if (args.sync_object.v) -+ dxgvmb_send_destroy_sync_object(process, args.sync_object); -+ -+success: -+ -+ if (file) -+ fput(file); -+ if (syncobj) -+ kref_put(&syncobj->syncobj_kref, dxgsyncobject_release); -+ if (adapter) -+ dxgadapter_release_lock_shared(adapter); -+ if (device_lock_acquired) -+ dxgdevice_release_lock_shared(device); -+ if (device) -+ kref_put(&device->device_kref, dxgdevice_release); -+ -+ DXG_TRACE("ioctl:%s %d", errorstr(ret), ret); -+ return ret; -+} -+ - static int - dxgkio_signal_sync_object(struct dxgprocess *process, void *__user inargs) - { -@@ -2353,6 +2841,30 @@ dxgkio_wait_sync_object_gpu(struct dxgprocess *process, void *__user inargs) - return ret; - } - -+static int -+dxgsharedsyncobj_get_host_nt_handle(struct dxgsharedsyncobject *syncobj, -+ struct dxgprocess *process, -+ struct d3dkmthandle objecthandle) -+{ -+ int ret = 0; -+ -+ mutex_lock(&syncobj->fd_mutex); -+ if (syncobj->host_shared_handle_nt_reference == 0) { -+ ret = dxgvmb_send_create_nt_shared_object(process, -+ objecthandle, -+ &syncobj->host_shared_handle_nt); -+ if (ret < 0) -+ goto cleanup; -+ DXG_TRACE("Host_shared_handle_ht: %x", -+ syncobj->host_shared_handle_nt.v); -+ kref_get(&syncobj->ssyncobj_kref); -+ } -+ syncobj->host_shared_handle_nt_reference++; -+cleanup: -+ mutex_unlock(&syncobj->fd_mutex); -+ return ret; -+} -+ - static int - dxgsharedresource_get_host_nt_handle(struct dxgsharedresource *resource, - struct dxgprocess *process, -@@ -2378,6 +2890,7 @@ dxgsharedresource_get_host_nt_handle(struct dxgsharedresource *resource, - } - - enum dxg_sharedobject_type { -+ DXG_SHARED_SYNCOBJECT, - DXG_SHARED_RESOURCE - }; - -@@ -2394,6 +2907,10 @@ static int get_object_fd(enum dxg_sharedobject_type type, - } - - switch (type) { -+ case DXG_SHARED_SYNCOBJECT: -+ file = anon_inode_getfile("dxgsyncobj", -+ &dxg_syncobj_fops, object, 0); -+ break; - case DXG_SHARED_RESOURCE: - file = anon_inode_getfile("dxgresource", - &dxg_resource_fops, object, 0); -@@ -2419,6 +2936,7 @@ dxgkio_share_objects(struct dxgprocess *process, void *__user inargs) - enum hmgrentry_type object_type; - struct dxgsyncobject *syncobj = NULL; - struct dxgresource *resource = NULL; -+ struct dxgsharedsyncobject *shared_syncobj = NULL; - struct dxgsharedresource *shared_resource = NULL; - struct d3dkmthandle *handles = NULL; - int object_fd = -1; -@@ -2465,6 +2983,17 @@ dxgkio_share_objects(struct dxgprocess *process, void *__user inargs) - ret = -EINVAL; - } else { - switch (object_type) { -+ case HMGRENTRY_TYPE_DXGSYNCOBJECT: -+ syncobj = obj; -+ if (syncobj->shared) { -+ kref_get(&syncobj->syncobj_kref); -+ shared_syncobj = syncobj->shared_owner; -+ } else { -+ DXG_ERR("sync object is not shared"); -+ syncobj = NULL; -+ ret = -EINVAL; -+ } -+ break; - case HMGRENTRY_TYPE_DXGRESOURCE: - resource = obj; - if (resource->shared_owner) { -@@ -2488,6 +3017,21 @@ dxgkio_share_objects(struct dxgprocess *process, void *__user inargs) - goto cleanup; - - switch (object_type) { -+ case HMGRENTRY_TYPE_DXGSYNCOBJECT: -+ ret = get_object_fd(DXG_SHARED_SYNCOBJECT, shared_syncobj, -+ &object_fd); -+ if (ret < 0) { -+ DXG_ERR("get_object_fd failed for sync object"); -+ goto cleanup; -+ } -+ ret = dxgsharedsyncobj_get_host_nt_handle(shared_syncobj, -+ process, -+ handles[0]); -+ if (ret < 0) { -+ DXG_ERR("get_host_nt_handle failed"); -+ goto cleanup; -+ } -+ break; - case HMGRENTRY_TYPE_DXGRESOURCE: - ret = get_object_fd(DXG_SHARED_RESOURCE, shared_resource, - &object_fd); -@@ -2954,10 +3498,10 @@ static struct ioctl_desc ioctls[] = { - /* 0x15 */ {dxgkio_close_adapter, LX_DXCLOSEADAPTER}, - /* 0x16 */ {}, - /* 0x17 */ {}, --/* 0x18 */ {}, -+/* 0x18 */ {dxgkio_create_hwqueue, LX_DXCREATEHWQUEUE}, - /* 0x19 */ {dxgkio_destroy_device, LX_DXDESTROYDEVICE}, - /* 0x1a */ {}, --/* 0x1b */ {}, -+/* 0x1b */ {dxgkio_destroy_hwqueue, LX_DXDESTROYHWQUEUE}, - /* 0x1c */ {}, - /* 0x1d */ {dxgkio_destroy_sync_object, LX_DXDESTROYSYNCHRONIZATIONOBJECT}, - /* 0x1e */ {}, -@@ -2986,8 +3530,10 @@ static struct ioctl_desc ioctls[] = { - /* 0x33 */ {dxgkio_signal_sync_object_gpu2, - LX_DXSIGNALSYNCHRONIZATIONOBJECTFROMGPU2}, - /* 0x34 */ {}, --/* 0x35 */ {}, --/* 0x36 */ {}, -+/* 0x35 */ {dxgkio_submit_signal_to_hwqueue, -+ LX_DXSUBMITSIGNALSYNCOBJECTSTOHWQUEUE}, -+/* 0x36 */ {dxgkio_submit_wait_to_hwqueue, -+ LX_DXSUBMITWAITFORSYNCOBJECTSTOHWQUEUE}, - /* 0x37 */ {}, - /* 0x38 */ {}, - /* 0x39 */ {}, -@@ -2999,7 +3545,7 @@ static struct ioctl_desc ioctls[] = { - /* 0x3d */ {}, - /* 0x3e */ {dxgkio_enum_adapters3, LX_DXENUMADAPTERS3}, - /* 0x3f */ {dxgkio_share_objects, LX_DXSHAREOBJECTS}, --/* 0x40 */ {}, -+/* 0x40 */ {dxgkio_open_sync_object_nt, LX_DXOPENSYNCOBJECTFROMNTHANDLE2}, - /* 0x41 */ {dxgkio_query_resource_info_nt, - LX_DXQUERYRESOURCEINFOFROMNTHANDLE}, - /* 0x42 */ {dxgkio_open_resource_nt, LX_DXOPENRESOURCEFROMNTHANDLE}, -diff --git a/include/uapi/misc/d3dkmthk.h b/include/uapi/misc/d3dkmthk.h -index 111111111111..222222222222 100644 ---- a/include/uapi/misc/d3dkmthk.h -+++ b/include/uapi/misc/d3dkmthk.h -@@ -201,6 +201,16 @@ struct d3dkmt_createcontextvirtual { - struct d3dkmthandle context; - }; - -+struct d3dddi_createhwqueueflags { -+ union { -+ struct { -+ __u32 disable_gpu_timeout:1; -+ __u32 reserved:31; -+ }; -+ __u32 value; -+ }; -+}; -+ - enum d3dkmdt_gdisurfacetype { - _D3DKMDT_GDISURFACE_INVALID = 0, - _D3DKMDT_GDISURFACE_TEXTURE = 1, -@@ -694,6 +704,81 @@ struct d3dddi_openallocationinfo2 { - __u64 reserved[6]; - }; - -+struct d3dkmt_createhwqueue { -+ struct d3dkmthandle context; -+ struct d3dddi_createhwqueueflags flags; -+ __u32 priv_drv_data_size; -+ __u32 reserved; -+#ifdef __KERNEL__ -+ void *priv_drv_data; -+#else -+ __u64 priv_drv_data; -+#endif -+ struct d3dkmthandle queue; -+ struct d3dkmthandle queue_progress_fence; -+#ifdef __KERNEL__ -+ void *queue_progress_fence_cpu_va; -+#else -+ __u64 queue_progress_fence_cpu_va; -+#endif -+ __u64 queue_progress_fence_gpu_va; -+}; -+ -+struct d3dkmt_destroyhwqueue { -+ struct d3dkmthandle queue; -+}; -+ -+struct d3dkmt_submitwaitforsyncobjectstohwqueue { -+ struct d3dkmthandle hwqueue; -+ __u32 object_count; -+#ifdef __KERNEL__ -+ struct d3dkmthandle *objects; -+ __u64 *fence_values; -+#else -+ __u64 objects; -+ __u64 fence_values; -+#endif -+}; -+ -+struct d3dkmt_submitsignalsyncobjectstohwqueue { -+ struct d3dddicb_signalflags flags; -+ __u32 hwqueue_count; -+#ifdef __KERNEL__ -+ struct d3dkmthandle *hwqueues; -+#else -+ __u64 hwqueues; -+#endif -+ __u32 object_count; -+ __u32 reserved; -+#ifdef __KERNEL__ -+ struct d3dkmthandle *objects; -+ __u64 *fence_values; -+#else -+ __u64 objects; -+ __u64 fence_values; -+#endif -+}; -+ -+struct d3dkmt_opensyncobjectfromnthandle2 { -+ __u64 nt_handle; -+ struct d3dkmthandle device; -+ struct d3dddi_synchronizationobject_flags flags; -+ struct d3dkmthandle sync_object; -+ __u32 reserved1; -+ union { -+ struct { -+#ifdef __KERNEL__ -+ void *fence_value_cpu_va; -+#else -+ __u64 fence_value_cpu_va; -+#endif -+ __u64 fence_value_gpu_va; -+ __u32 engine_affinity; -+ } monitored_fence; -+ __u64 reserved[8]; -+ }; -+}; -+ - struct d3dkmt_openresourcefromnthandle { - struct d3dkmthandle device; - __u32 reserved; -@@ -819,6 +904,10 @@ struct d3dkmt_enumadapters3 { - _IOWR(0x47, 0x14, struct d3dkmt_enumadapters2) - #define LX_DXCLOSEADAPTER \ - _IOWR(0x47, 0x15, struct d3dkmt_closeadapter) -+#define LX_DXCREATEHWQUEUE \ -+ _IOWR(0x47, 0x18, struct d3dkmt_createhwqueue) -+#define LX_DXDESTROYHWQUEUE \ -+ _IOWR(0x47, 0x1b, struct d3dkmt_destroyhwqueue) - #define LX_DXDESTROYDEVICE \ - _IOWR(0x47, 0x19, struct d3dkmt_destroydevice) - #define LX_DXDESTROYSYNCHRONIZATIONOBJECT \ -@@ -829,6 +918,10 @@ struct d3dkmt_enumadapters3 { - _IOWR(0x47, 0x32, struct d3dkmt_signalsynchronizationobjectfromgpu) - #define LX_DXSIGNALSYNCHRONIZATIONOBJECTFROMGPU2 \ - _IOWR(0x47, 0x33, struct d3dkmt_signalsynchronizationobjectfromgpu2) -+#define LX_DXSUBMITSIGNALSYNCOBJECTSTOHWQUEUE \ -+ _IOWR(0x47, 0x35, struct d3dkmt_submitsignalsyncobjectstohwqueue) -+#define LX_DXSUBMITWAITFORSYNCOBJECTSTOHWQUEUE \ -+ _IOWR(0x47, 0x36, struct d3dkmt_submitwaitforsyncobjectstohwqueue) - #define LX_DXWAITFORSYNCHRONIZATIONOBJECTFROMCPU \ - _IOWR(0x47, 0x3a, struct d3dkmt_waitforsynchronizationobjectfromcpu) - #define LX_DXWAITFORSYNCHRONIZATIONOBJECTFROMGPU \ --- -Armbian - diff --git a/patch/kernel/archive/wsl2-arm64-6.6/1680-drivers-hv-dxgkrnl-Creation-of-paging-queue-objects.patch b/patch/kernel/archive/wsl2-arm64-6.6/1680-drivers-hv-dxgkrnl-Creation-of-paging-queue-objects.patch deleted file mode 100644 index 6043c2319241..000000000000 --- a/patch/kernel/archive/wsl2-arm64-6.6/1680-drivers-hv-dxgkrnl-Creation-of-paging-queue-objects.patch +++ /dev/null @@ -1,640 +0,0 @@ -From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001 -From: Iouri Tarassov -Date: Thu, 20 Jan 2022 15:15:18 -0800 -Subject: drivers: hv: dxgkrnl: Creation of paging queue objects. - -Implement ioctls for creation/destruction of the paging queue objects: - - LX_DXCREATEPAGINGQUEUE, - - LX_DXDESTROYPAGINGQUEUE - -Paging queue objects (dxgpagingqueue) contain operations, which -handle residency of device accessible allocations. An allocation is -resident, when the device has access to it. For example, the allocation -resides in local device memory or device page tables point to system -memory which is made non-pageable. - -Each paging queue has an associated monitored fence sync object, which -is used to detect when a paging operation is completed. - -Signed-off-by: Iouri Tarassov -[kms: Forward port to v6.1] -Signed-off-by: Kelsey Steele ---- - drivers/hv/dxgkrnl/dxgadapter.c | 89 +++++ - drivers/hv/dxgkrnl/dxgkrnl.h | 24 ++ - drivers/hv/dxgkrnl/dxgprocess.c | 4 + - drivers/hv/dxgkrnl/dxgvmbus.c | 74 ++++ - drivers/hv/dxgkrnl/dxgvmbus.h | 17 + - drivers/hv/dxgkrnl/ioctl.c | 189 +++++++++- - include/uapi/misc/d3dkmthk.h | 27 ++ - 7 files changed, 418 insertions(+), 6 deletions(-) - -diff --git a/drivers/hv/dxgkrnl/dxgadapter.c b/drivers/hv/dxgkrnl/dxgadapter.c -index 111111111111..222222222222 100644 ---- a/drivers/hv/dxgkrnl/dxgadapter.c -+++ b/drivers/hv/dxgkrnl/dxgadapter.c -@@ -278,6 +278,7 @@ struct dxgdevice *dxgdevice_create(struct dxgadapter *adapter, - void dxgdevice_stop(struct dxgdevice *device) - { - struct dxgallocation *alloc; -+ struct dxgpagingqueue *pqueue; - struct dxgsyncobject *syncobj; - - DXG_TRACE("Stopping device: %p", device); -@@ -288,6 +289,10 @@ void dxgdevice_stop(struct dxgdevice *device) - dxgdevice_release_alloc_list_lock(device); - - hmgrtable_lock(&device->process->handle_table, DXGLOCK_EXCL); -+ list_for_each_entry(pqueue, &device->pqueue_list_head, -+ pqueue_list_entry) { -+ dxgpagingqueue_stop(pqueue); -+ } - list_for_each_entry(syncobj, &device->syncobj_list_head, - syncobj_list_entry) { - dxgsyncobject_stop(syncobj); -@@ -375,6 +380,17 @@ void dxgdevice_destroy(struct dxgdevice *device) - dxgdevice_release_context_list_lock(device); - } - -+ { -+ struct dxgpagingqueue *tmp; -+ struct dxgpagingqueue *pqueue; -+ -+ DXG_TRACE("destroying paging queues"); -+ list_for_each_entry_safe(pqueue, tmp, &device->pqueue_list_head, -+ pqueue_list_entry) { -+ dxgpagingqueue_destroy(pqueue); -+ } -+ } -+ - /* Guest handles need to be released before the host handles */ - hmgrtable_lock(&process->handle_table, DXGLOCK_EXCL); - if (device->handle_valid) { -@@ -708,6 +724,26 @@ void dxgdevice_release(struct kref *refcount) - kfree(device); - } - -+void dxgdevice_add_paging_queue(struct dxgdevice *device, -+ struct dxgpagingqueue *entry) -+{ -+ dxgdevice_acquire_alloc_list_lock(device); -+ list_add_tail(&entry->pqueue_list_entry, &device->pqueue_list_head); -+ dxgdevice_release_alloc_list_lock(device); -+} -+ -+void dxgdevice_remove_paging_queue(struct dxgpagingqueue *pqueue) -+{ -+ struct dxgdevice *device = pqueue->device; -+ -+ dxgdevice_acquire_alloc_list_lock(device); -+ if (pqueue->pqueue_list_entry.next) { -+ list_del(&pqueue->pqueue_list_entry); -+ pqueue->pqueue_list_entry.next = NULL; -+ } -+ dxgdevice_release_alloc_list_lock(device); -+} -+ - void dxgdevice_add_syncobj(struct dxgdevice *device, - struct dxgsyncobject *syncobj) - { -@@ -899,6 +935,59 @@ else - kfree(alloc); - } - -+struct dxgpagingqueue *dxgpagingqueue_create(struct dxgdevice *device) -+{ -+ struct dxgpagingqueue *pqueue; -+ -+ pqueue = kzalloc(sizeof(*pqueue), GFP_KERNEL); -+ if (pqueue) { -+ pqueue->device = device; -+ pqueue->process = device->process; -+ pqueue->device_handle = device->handle; -+ dxgdevice_add_paging_queue(device, pqueue); -+ } -+ return pqueue; -+} -+ -+void dxgpagingqueue_stop(struct dxgpagingqueue *pqueue) -+{ -+ int ret; -+ -+ if (pqueue->mapped_address) { -+ ret = dxg_unmap_iospace(pqueue->mapped_address, PAGE_SIZE); -+ DXG_TRACE("fence is unmapped %d %p", -+ ret, pqueue->mapped_address); -+ pqueue->mapped_address = NULL; -+ } -+} -+ -+void dxgpagingqueue_destroy(struct dxgpagingqueue *pqueue) -+{ -+ struct dxgprocess *process = pqueue->process; -+ -+ DXG_TRACE("Destroying pqueue %p %x", pqueue, pqueue->handle.v); -+ -+ dxgpagingqueue_stop(pqueue); -+ -+ hmgrtable_lock(&process->handle_table, DXGLOCK_EXCL); -+ if (pqueue->handle.v) { -+ hmgrtable_free_handle(&process->handle_table, -+ HMGRENTRY_TYPE_DXGPAGINGQUEUE, -+ pqueue->handle); -+ pqueue->handle.v = 0; -+ } -+ if (pqueue->syncobj_handle.v) { -+ hmgrtable_free_handle(&process->handle_table, -+ HMGRENTRY_TYPE_MONITOREDFENCE, -+ pqueue->syncobj_handle); -+ pqueue->syncobj_handle.v = 0; -+ } -+ hmgrtable_unlock(&process->handle_table, DXGLOCK_EXCL); -+ if (pqueue->device) -+ dxgdevice_remove_paging_queue(pqueue); -+ kfree(pqueue); -+} -+ - struct dxgprocess_adapter *dxgprocess_adapter_create(struct dxgprocess *process, - struct dxgadapter *adapter) - { -diff --git a/drivers/hv/dxgkrnl/dxgkrnl.h b/drivers/hv/dxgkrnl/dxgkrnl.h -index 111111111111..222222222222 100644 ---- a/drivers/hv/dxgkrnl/dxgkrnl.h -+++ b/drivers/hv/dxgkrnl/dxgkrnl.h -@@ -104,6 +104,16 @@ int dxgvmbuschannel_init(struct dxgvmbuschannel *ch, struct hv_device *hdev); - void dxgvmbuschannel_destroy(struct dxgvmbuschannel *ch); - void dxgvmbuschannel_receive(void *ctx); - -+struct dxgpagingqueue { -+ struct dxgdevice *device; -+ struct dxgprocess *process; -+ struct list_head pqueue_list_entry; -+ struct d3dkmthandle device_handle; -+ struct d3dkmthandle handle; -+ struct d3dkmthandle syncobj_handle; -+ void *mapped_address; -+}; -+ - /* - * The structure describes an event, which will be signaled by - * a message from host. -@@ -127,6 +137,10 @@ struct dxghosteventcpu { - bool remove_from_list; - }; - -+struct dxgpagingqueue *dxgpagingqueue_create(struct dxgdevice *device); -+void dxgpagingqueue_destroy(struct dxgpagingqueue *pqueue); -+void dxgpagingqueue_stop(struct dxgpagingqueue *pqueue); -+ - /* - * This is GPU synchronization object, which is used to synchronize execution - * between GPU contextx/hardware queues or for tracking GPU execution progress. -@@ -516,6 +530,9 @@ void dxgdevice_remove_alloc_safe(struct dxgdevice *dev, - struct dxgallocation *a); - void dxgdevice_add_resource(struct dxgdevice *dev, struct dxgresource *res); - void dxgdevice_remove_resource(struct dxgdevice *dev, struct dxgresource *res); -+void dxgdevice_add_paging_queue(struct dxgdevice *dev, -+ struct dxgpagingqueue *pqueue); -+void dxgdevice_remove_paging_queue(struct dxgpagingqueue *pqueue); - void dxgdevice_add_syncobj(struct dxgdevice *dev, struct dxgsyncobject *so); - void dxgdevice_remove_syncobj(struct dxgsyncobject *so); - bool dxgdevice_is_active(struct dxgdevice *dev); -@@ -762,6 +779,13 @@ dxgvmb_send_create_context(struct dxgadapter *adapter, - int dxgvmb_send_destroy_context(struct dxgadapter *adapter, - struct dxgprocess *process, - struct d3dkmthandle h); -+int dxgvmb_send_create_paging_queue(struct dxgprocess *pr, -+ struct dxgdevice *dev, -+ struct d3dkmt_createpagingqueue *args, -+ struct dxgpagingqueue *pq); -+int dxgvmb_send_destroy_paging_queue(struct dxgprocess *process, -+ struct dxgadapter *adapter, -+ struct d3dkmthandle h); - int dxgvmb_send_create_allocation(struct dxgprocess *pr, struct dxgdevice *dev, - struct d3dkmt_createallocation *args, - struct d3dkmt_createallocation *__user inargs, -diff --git a/drivers/hv/dxgkrnl/dxgprocess.c b/drivers/hv/dxgkrnl/dxgprocess.c -index 111111111111..222222222222 100644 ---- a/drivers/hv/dxgkrnl/dxgprocess.c -+++ b/drivers/hv/dxgkrnl/dxgprocess.c -@@ -277,6 +277,10 @@ struct dxgdevice *dxgprocess_device_by_object_handle(struct dxgprocess *process, - device_handle = - ((struct dxgcontext *)obj)->device_handle; - break; -+ case HMGRENTRY_TYPE_DXGPAGINGQUEUE: -+ device_handle = -+ ((struct dxgpagingqueue *)obj)->device_handle; -+ break; - case HMGRENTRY_TYPE_DXGHWQUEUE: - device_handle = - ((struct dxghwqueue *)obj)->device_handle; -diff --git a/drivers/hv/dxgkrnl/dxgvmbus.c b/drivers/hv/dxgkrnl/dxgvmbus.c -index 111111111111..222222222222 100644 ---- a/drivers/hv/dxgkrnl/dxgvmbus.c -+++ b/drivers/hv/dxgkrnl/dxgvmbus.c -@@ -1155,6 +1155,80 @@ int dxgvmb_send_destroy_context(struct dxgadapter *adapter, - return ret; - } - -+int dxgvmb_send_create_paging_queue(struct dxgprocess *process, -+ struct dxgdevice *device, -+ struct d3dkmt_createpagingqueue *args, -+ struct dxgpagingqueue *pqueue) -+{ -+ struct dxgkvmb_command_createpagingqueue_return result; -+ struct dxgkvmb_command_createpagingqueue *command; -+ int ret; -+ struct dxgvmbusmsg msg = {.hdr = NULL}; -+ -+ ret = init_message(&msg, device->adapter, process, sizeof(*command)); -+ if (ret) -+ goto cleanup; -+ command = (void *)msg.msg; -+ -+ command_vgpu_to_host_init2(&command->hdr, -+ DXGK_VMBCOMMAND_CREATEPAGINGQUEUE, -+ process->host_handle); -+ command->args = *args; -+ args->paging_queue.v = 0; -+ -+ ret = dxgvmb_send_sync_msg(msg.channel, msg.hdr, msg.size, &result, -+ sizeof(result)); -+ if (ret < 0) { -+ DXG_ERR("send_create_paging_queue failed %x", ret); -+ goto cleanup; -+ } -+ -+ args->paging_queue = result.paging_queue; -+ args->sync_object = result.sync_object; -+ args->fence_cpu_virtual_address = -+ dxg_map_iospace(result.fence_storage_physical_address, PAGE_SIZE, -+ PROT_READ | PROT_WRITE, true); -+ if (args->fence_cpu_virtual_address == NULL) { -+ ret = -ENOMEM; -+ goto cleanup; -+ } -+ pqueue->mapped_address = args->fence_cpu_virtual_address; -+ pqueue->handle = args->paging_queue; -+ -+cleanup: -+ free_message(&msg, process); -+ if (ret) -+ DXG_TRACE("err: %d", ret); -+ return ret; -+} -+ -+int dxgvmb_send_destroy_paging_queue(struct dxgprocess *process, -+ struct dxgadapter *adapter, -+ struct d3dkmthandle h) -+{ -+ int ret; -+ struct dxgkvmb_command_destroypagingqueue *command; -+ struct dxgvmbusmsg msg = {.hdr = NULL}; -+ -+ ret = init_message(&msg, adapter, process, sizeof(*command)); -+ if (ret) -+ goto cleanup; -+ command = (void *)msg.msg; -+ -+ command_vgpu_to_host_init2(&command->hdr, -+ DXGK_VMBCOMMAND_DESTROYPAGINGQUEUE, -+ process->host_handle); -+ command->paging_queue = h; -+ -+ ret = dxgvmb_send_sync_msg(msg.channel, msg.hdr, msg.size, NULL, 0); -+ -+cleanup: -+ free_message(&msg, process); -+ if (ret) -+ DXG_TRACE("err: %d", ret); -+ return ret; -+} -+ - static int - copy_private_data(struct d3dkmt_createallocation *args, - struct dxgkvmb_command_createallocation *command, -diff --git a/drivers/hv/dxgkrnl/dxgvmbus.h b/drivers/hv/dxgkrnl/dxgvmbus.h -index 111111111111..222222222222 100644 ---- a/drivers/hv/dxgkrnl/dxgvmbus.h -+++ b/drivers/hv/dxgkrnl/dxgvmbus.h -@@ -462,6 +462,23 @@ struct dxgkvmb_command_destroycontext { - struct d3dkmthandle context; - }; - -+struct dxgkvmb_command_createpagingqueue { -+ struct dxgkvmb_command_vgpu_to_host hdr; -+ struct d3dkmt_createpagingqueue args; -+}; -+ -+struct dxgkvmb_command_createpagingqueue_return { -+ struct d3dkmthandle paging_queue; -+ struct d3dkmthandle sync_object; -+ u64 fence_storage_physical_address; -+ u64 fence_storage_offset; -+}; -+ -+struct dxgkvmb_command_destroypagingqueue { -+ struct dxgkvmb_command_vgpu_to_host hdr; -+ struct d3dkmthandle paging_queue; -+}; -+ - struct dxgkvmb_command_createsyncobject { - struct dxgkvmb_command_vgpu_to_host hdr; - struct d3dkmt_createsynchronizationobject2 args; -diff --git a/drivers/hv/dxgkrnl/ioctl.c b/drivers/hv/dxgkrnl/ioctl.c -index 111111111111..222222222222 100644 ---- a/drivers/hv/dxgkrnl/ioctl.c -+++ b/drivers/hv/dxgkrnl/ioctl.c -@@ -329,7 +329,7 @@ static int dxgsharedresource_seal(struct dxgsharedresource *shared_resource) - - if (alloc_data_size) { - if (data_size < alloc_data_size) { -- dev_err(DXGDEV, -+ DXG_ERR( - "Invalid private data size"); - ret = -EINVAL; - goto cleanup1; -@@ -1010,6 +1010,183 @@ static int dxgkio_destroy_hwqueue(struct dxgprocess *process, - return ret; - } - -+static int -+dxgkio_create_paging_queue(struct dxgprocess *process, void *__user inargs) -+{ -+ struct d3dkmt_createpagingqueue args; -+ struct dxgdevice *device = NULL; -+ struct dxgadapter *adapter = NULL; -+ struct dxgpagingqueue *pqueue = NULL; -+ int ret; -+ struct d3dkmthandle host_handle = {}; -+ bool device_lock_acquired = false; -+ -+ ret = copy_from_user(&args, inargs, sizeof(args)); -+ if (ret) { -+ DXG_ERR("failed to copy input args"); -+ ret = -EINVAL; -+ goto cleanup; -+ } -+ -+ /* -+ * The call acquires reference on the device. It is safe to access the -+ * adapter, because the device holds reference on it. -+ */ -+ device = dxgprocess_device_by_handle(process, args.device); -+ if (device == NULL) { -+ ret = -EINVAL; -+ goto cleanup; -+ } -+ -+ ret = dxgdevice_acquire_lock_shared(device); -+ if (ret < 0) -+ goto cleanup; -+ -+ device_lock_acquired = true; -+ adapter = device->adapter; -+ -+ ret = dxgadapter_acquire_lock_shared(adapter); -+ if (ret < 0) { -+ adapter = NULL; -+ goto cleanup; -+ } -+ -+ pqueue = dxgpagingqueue_create(device); -+ if (pqueue == NULL) { -+ ret = -ENOMEM; -+ goto cleanup; -+ } -+ -+ ret = dxgvmb_send_create_paging_queue(process, device, &args, pqueue); -+ if (ret >= 0) { -+ host_handle = args.paging_queue; -+ -+ ret = copy_to_user(inargs, &args, sizeof(args)); -+ if (ret) { -+ DXG_ERR("failed to copy input args"); -+ ret = -EINVAL; -+ goto cleanup; -+ } -+ -+ hmgrtable_lock(&process->handle_table, DXGLOCK_EXCL); -+ ret = hmgrtable_assign_handle(&process->handle_table, pqueue, -+ HMGRENTRY_TYPE_DXGPAGINGQUEUE, -+ host_handle); -+ if (ret >= 0) { -+ pqueue->handle = host_handle; -+ ret = hmgrtable_assign_handle(&process->handle_table, -+ NULL, -+ HMGRENTRY_TYPE_MONITOREDFENCE, -+ args.sync_object); -+ if (ret >= 0) -+ pqueue->syncobj_handle = args.sync_object; -+ } -+ hmgrtable_unlock(&process->handle_table, DXGLOCK_EXCL); -+ /* should not fail after this */ -+ } -+ -+cleanup: -+ -+ if (ret < 0) { -+ if (pqueue) -+ dxgpagingqueue_destroy(pqueue); -+ if (host_handle.v) -+ dxgvmb_send_destroy_paging_queue(process, -+ adapter, -+ host_handle); -+ } -+ -+ if (adapter) -+ dxgadapter_release_lock_shared(adapter); -+ -+ if (device) { -+ if (device_lock_acquired) -+ dxgdevice_release_lock_shared(device); -+ kref_put(&device->device_kref, dxgdevice_release); -+ } -+ -+ DXG_TRACE("ioctl:%s %d", errorstr(ret), ret); -+ return ret; -+} -+ -+static int -+dxgkio_destroy_paging_queue(struct dxgprocess *process, void *__user inargs) -+{ -+ struct d3dddi_destroypagingqueue args; -+ struct dxgpagingqueue *paging_queue = NULL; -+ int ret; -+ struct d3dkmthandle device_handle = {}; -+ struct dxgdevice *device = NULL; -+ struct dxgadapter *adapter = NULL; -+ -+ ret = copy_from_user(&args, inargs, sizeof(args)); -+ if (ret) { -+ DXG_ERR("failed to copy input args"); -+ ret = -EINVAL; -+ goto cleanup; -+ } -+ -+ hmgrtable_lock(&process->handle_table, DXGLOCK_EXCL); -+ paging_queue = hmgrtable_get_object_by_type(&process->handle_table, -+ HMGRENTRY_TYPE_DXGPAGINGQUEUE, -+ args.paging_queue); -+ if (paging_queue) { -+ device_handle = paging_queue->device_handle; -+ hmgrtable_free_handle(&process->handle_table, -+ HMGRENTRY_TYPE_DXGPAGINGQUEUE, -+ args.paging_queue); -+ hmgrtable_free_handle(&process->handle_table, -+ HMGRENTRY_TYPE_MONITOREDFENCE, -+ paging_queue->syncobj_handle); -+ paging_queue->syncobj_handle.v = 0; -+ paging_queue->handle.v = 0; -+ } -+ hmgrtable_unlock(&process->handle_table, DXGLOCK_EXCL); -+ -+ /* -+ * The call acquires reference on the device. It is safe to access the -+ * adapter, because the device holds reference on it. -+ */ -+ if (device_handle.v) -+ device = dxgprocess_device_by_handle(process, device_handle); -+ if (device == NULL) { -+ ret = -EINVAL; -+ goto cleanup; -+ } -+ -+ ret = dxgdevice_acquire_lock_shared(device); -+ if (ret < 0) { -+ kref_put(&device->device_kref, dxgdevice_release); -+ device = NULL; -+ goto cleanup; -+ } -+ -+ adapter = device->adapter; -+ ret = dxgadapter_acquire_lock_shared(adapter); -+ if (ret < 0) { -+ adapter = NULL; -+ goto cleanup; -+ } -+ -+ ret = dxgvmb_send_destroy_paging_queue(process, adapter, -+ args.paging_queue); -+ -+ dxgpagingqueue_destroy(paging_queue); -+ -+cleanup: -+ -+ if (adapter) -+ dxgadapter_release_lock_shared(adapter); -+ -+ if (device) { -+ dxgdevice_release_lock_shared(device); -+ kref_put(&device->device_kref, dxgdevice_release); -+ } -+ -+ DXG_TRACE("ioctl:%s %d", errorstr(ret), ret); -+ return ret; -+} -+ - static int - get_standard_alloc_priv_data(struct dxgdevice *device, - struct d3dkmt_createstandardallocation *alloc_info, -@@ -1272,7 +1449,7 @@ dxgkio_create_allocation(struct dxgprocess *process, void *__user inargs) - args.private_runtime_resource_handle; - if (args.flags.create_shared) { - if (!args.flags.nt_security_sharing) { -- dev_err(DXGDEV, -+ DXG_ERR( - "nt_security_sharing must be set"); - ret = -EINVAL; - goto cleanup; -@@ -1313,7 +1490,7 @@ dxgkio_create_allocation(struct dxgprocess *process, void *__user inargs) - args.private_runtime_data, - args.private_runtime_data_size); - if (ret) { -- dev_err(DXGDEV, -+ DXG_ERR( - "failed to copy runtime data"); - ret = -EINVAL; - goto cleanup; -@@ -1333,7 +1510,7 @@ dxgkio_create_allocation(struct dxgprocess *process, void *__user inargs) - args.priv_drv_data, - args.priv_drv_data_size); - if (ret) { -- dev_err(DXGDEV, -+ DXG_ERR( - "failed to copy res data"); - ret = -EINVAL; - goto cleanup; -@@ -3481,7 +3658,7 @@ static struct ioctl_desc ioctls[] = { - /* 0x04 */ {dxgkio_create_context_virtual, LX_DXCREATECONTEXTVIRTUAL}, - /* 0x05 */ {dxgkio_destroy_context, LX_DXDESTROYCONTEXT}, - /* 0x06 */ {dxgkio_create_allocation, LX_DXCREATEALLOCATION}, --/* 0x07 */ {}, -+/* 0x07 */ {dxgkio_create_paging_queue, LX_DXCREATEPAGINGQUEUE}, - /* 0x08 */ {}, - /* 0x09 */ {dxgkio_query_adapter_info, LX_DXQUERYADAPTERINFO}, - /* 0x0a */ {}, -@@ -3502,7 +3679,7 @@ static struct ioctl_desc ioctls[] = { - /* 0x19 */ {dxgkio_destroy_device, LX_DXDESTROYDEVICE}, - /* 0x1a */ {}, - /* 0x1b */ {dxgkio_destroy_hwqueue, LX_DXDESTROYHWQUEUE}, --/* 0x1c */ {}, -+/* 0x1c */ {dxgkio_destroy_paging_queue, LX_DXDESTROYPAGINGQUEUE}, - /* 0x1d */ {dxgkio_destroy_sync_object, LX_DXDESTROYSYNCHRONIZATIONOBJECT}, - /* 0x1e */ {}, - /* 0x1f */ {}, -diff --git a/include/uapi/misc/d3dkmthk.h b/include/uapi/misc/d3dkmthk.h -index 111111111111..222222222222 100644 ---- a/include/uapi/misc/d3dkmthk.h -+++ b/include/uapi/misc/d3dkmthk.h -@@ -211,6 +211,29 @@ struct d3dddi_createhwqueueflags { - }; - }; - -+enum d3dddi_pagingqueue_priority { -+ _D3DDDI_PAGINGQUEUE_PRIORITY_BELOW_NORMAL = -1, -+ _D3DDDI_PAGINGQUEUE_PRIORITY_NORMAL = 0, -+ _D3DDDI_PAGINGQUEUE_PRIORITY_ABOVE_NORMAL = 1, -+}; -+ -+struct d3dkmt_createpagingqueue { -+ struct d3dkmthandle device; -+ enum d3dddi_pagingqueue_priority priority; -+ struct d3dkmthandle paging_queue; -+ struct d3dkmthandle sync_object; -+#ifdef __KERNEL__ -+ void *fence_cpu_virtual_address; -+#else -+ __u64 fence_cpu_virtual_address; -+#endif -+ __u32 physical_adapter_index; -+}; -+ -+struct d3dddi_destroypagingqueue { -+ struct d3dkmthandle paging_queue; -+}; -+ - enum d3dkmdt_gdisurfacetype { - _D3DKMDT_GDISURFACE_INVALID = 0, - _D3DKMDT_GDISURFACE_TEXTURE = 1, -@@ -890,6 +913,8 @@ struct d3dkmt_enumadapters3 { - _IOWR(0x47, 0x05, struct d3dkmt_destroycontext) - #define LX_DXCREATEALLOCATION \ - _IOWR(0x47, 0x06, struct d3dkmt_createallocation) -+#define LX_DXCREATEPAGINGQUEUE \ -+ _IOWR(0x47, 0x07, struct d3dkmt_createpagingqueue) - #define LX_DXQUERYADAPTERINFO \ - _IOWR(0x47, 0x09, struct d3dkmt_queryadapterinfo) - #define LX_DXCREATESYNCHRONIZATIONOBJECT \ -@@ -908,6 +933,8 @@ struct d3dkmt_enumadapters3 { - _IOWR(0x47, 0x18, struct d3dkmt_createhwqueue) - #define LX_DXDESTROYHWQUEUE \ - _IOWR(0x47, 0x1b, struct d3dkmt_destroyhwqueue) -+#define LX_DXDESTROYPAGINGQUEUE \ -+ _IOWR(0x47, 0x1c, struct d3dddi_destroypagingqueue) - #define LX_DXDESTROYDEVICE \ - _IOWR(0x47, 0x19, struct d3dkmt_destroydevice) - #define LX_DXDESTROYSYNCHRONIZATIONOBJECT \ --- -Armbian - diff --git a/patch/kernel/archive/wsl2-arm64-6.6/1681-drivers-hv-dxgkrnl-Submit-execution-commands-to-the-compute-device.patch b/patch/kernel/archive/wsl2-arm64-6.6/1681-drivers-hv-dxgkrnl-Submit-execution-commands-to-the-compute-device.patch deleted file mode 100644 index a2bc7bd7880a..000000000000 --- a/patch/kernel/archive/wsl2-arm64-6.6/1681-drivers-hv-dxgkrnl-Submit-execution-commands-to-the-compute-device.patch +++ /dev/null @@ -1,450 +0,0 @@ -From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001 -From: Iouri Tarassov -Date: Wed, 19 Jan 2022 18:02:09 -0800 -Subject: drivers: hv: dxgkrnl: Submit execution commands to the compute device - -Implements ioctls for submission of compute device buffers for execution: - - LX_DXSUBMITCOMMAND - The ioctl is used to submit a command buffer to the device, - working in the "packet scheduling" mode. - - - LX_DXSUBMITCOMMANDTOHWQUEUE - The ioctl is used to submit a command buffer to the device, - working in the "hardware scheduling" mode. - -To improve performance both ioctls use asynchronous VM bus messages -to communicate with the host as these are high frequency operations. - -Signed-off-by: Iouri Tarassov -[kms: Forward port to v6.1] -Signed-off-by: Kelsey Steele ---- - drivers/hv/dxgkrnl/dxgkrnl.h | 6 + - drivers/hv/dxgkrnl/dxgvmbus.c | 113 +++++++++ - drivers/hv/dxgkrnl/dxgvmbus.h | 14 + - drivers/hv/dxgkrnl/ioctl.c | 127 +++++++++- - include/uapi/misc/d3dkmthk.h | 58 +++++ - 5 files changed, 316 insertions(+), 2 deletions(-) - -diff --git a/drivers/hv/dxgkrnl/dxgkrnl.h b/drivers/hv/dxgkrnl/dxgkrnl.h -index 111111111111..222222222222 100644 ---- a/drivers/hv/dxgkrnl/dxgkrnl.h -+++ b/drivers/hv/dxgkrnl/dxgkrnl.h -@@ -796,6 +796,9 @@ int dxgvmb_send_create_allocation(struct dxgprocess *pr, struct dxgdevice *dev, - int dxgvmb_send_destroy_allocation(struct dxgprocess *pr, struct dxgdevice *dev, - struct d3dkmt_destroyallocation2 *args, - struct d3dkmthandle *alloc_handles); -+int dxgvmb_send_submit_command(struct dxgprocess *pr, -+ struct dxgadapter *adapter, -+ struct d3dkmt_submitcommand *args); - int dxgvmb_send_create_sync_object(struct dxgprocess *pr, - struct dxgadapter *adapter, - struct d3dkmt_createsynchronizationobject2 -@@ -838,6 +841,9 @@ int dxgvmb_send_destroy_hwqueue(struct dxgprocess *process, - int dxgvmb_send_query_adapter_info(struct dxgprocess *process, - struct dxgadapter *adapter, - struct d3dkmt_queryadapterinfo *args); -+int dxgvmb_send_submit_command_hwqueue(struct dxgprocess *process, -+ struct dxgadapter *adapter, -+ struct d3dkmt_submitcommandtohwqueue *a); - int dxgvmb_send_open_sync_object_nt(struct dxgprocess *process, - struct dxgvmbuschannel *channel, - struct d3dkmt_opensyncobjectfromnthandle2 -diff --git a/drivers/hv/dxgkrnl/dxgvmbus.c b/drivers/hv/dxgkrnl/dxgvmbus.c -index 111111111111..222222222222 100644 ---- a/drivers/hv/dxgkrnl/dxgvmbus.c -+++ b/drivers/hv/dxgkrnl/dxgvmbus.c -@@ -1901,6 +1901,61 @@ int dxgvmb_send_get_stdalloc_data(struct dxgdevice *device, - return ret; - } - -+int dxgvmb_send_submit_command(struct dxgprocess *process, -+ struct dxgadapter *adapter, -+ struct d3dkmt_submitcommand *args) -+{ -+ int ret; -+ u32 cmd_size; -+ struct dxgkvmb_command_submitcommand *command; -+ u32 hbufsize = args->num_history_buffers * sizeof(struct d3dkmthandle); -+ struct dxgvmbusmsg msg = {.hdr = NULL}; -+ struct dxgglobal *dxgglobal = dxggbl(); -+ -+ cmd_size = sizeof(struct dxgkvmb_command_submitcommand) + -+ hbufsize + args->priv_drv_data_size; -+ -+ ret = init_message(&msg, adapter, process, cmd_size); -+ if (ret) -+ goto cleanup; -+ command = (void *)msg.msg; -+ -+ ret = copy_from_user(&command[1], args->history_buffer_array, -+ hbufsize); -+ if (ret) { -+ DXG_ERR(" failed to copy history buffer"); -+ ret = -EINVAL; -+ goto cleanup; -+ } -+ ret = copy_from_user((u8 *) &command[1] + hbufsize, -+ args->priv_drv_data, args->priv_drv_data_size); -+ if (ret) { -+ DXG_ERR("failed to copy history priv data"); -+ ret = -EINVAL; -+ goto cleanup; -+ } -+ -+ command_vgpu_to_host_init2(&command->hdr, -+ DXGK_VMBCOMMAND_SUBMITCOMMAND, -+ process->host_handle); -+ command->args = *args; -+ -+ if (dxgglobal->async_msg_enabled) { -+ command->hdr.async_msg = 1; -+ ret = dxgvmb_send_async_msg(msg.channel, msg.hdr, msg.size); -+ } else { -+ ret = dxgvmb_send_sync_msg_ntstatus(msg.channel, msg.hdr, -+ msg.size); -+ } -+ -+cleanup: -+ -+ free_message(&msg, process); -+ if (ret) -+ DXG_TRACE("err: %d", ret); -+ return ret; -+} -+ - static void set_result(struct d3dkmt_createsynchronizationobject2 *args, - u64 fence_gpu_va, u8 *va) - { -@@ -2427,3 +2482,61 @@ int dxgvmb_send_query_adapter_info(struct dxgprocess *process, - DXG_TRACE("err: %d", ret); - return ret; - } -+ -+int dxgvmb_send_submit_command_hwqueue(struct dxgprocess *process, -+ struct dxgadapter *adapter, -+ struct d3dkmt_submitcommandtohwqueue -+ *args) -+{ -+ int ret = -EINVAL; -+ u32 cmd_size; -+ struct dxgkvmb_command_submitcommandtohwqueue *command; -+ u32 primaries_size = args->num_primaries * sizeof(struct d3dkmthandle); -+ struct dxgvmbusmsg msg = {.hdr = NULL}; -+ struct dxgglobal *dxgglobal = dxggbl(); -+ -+ cmd_size = sizeof(*command) + args->priv_drv_data_size + primaries_size; -+ ret = init_message(&msg, adapter, process, cmd_size); -+ if (ret) -+ goto cleanup; -+ command = (void *)msg.msg; -+ -+ if (primaries_size) { -+ ret = copy_from_user(&command[1], args->written_primaries, -+ primaries_size); -+ if (ret) { -+ DXG_ERR("failed to copy primaries handles"); -+ ret = -EINVAL; -+ goto cleanup; -+ } -+ } -+ if (args->priv_drv_data_size) { -+ ret = copy_from_user((char *)&command[1] + primaries_size, -+ args->priv_drv_data, -+ args->priv_drv_data_size); -+ if (ret) { -+ DXG_ERR("failed to copy primaries data"); -+ ret = -EINVAL; -+ goto cleanup; -+ } -+ } -+ -+ command_vgpu_to_host_init2(&command->hdr, -+ DXGK_VMBCOMMAND_SUBMITCOMMANDTOHWQUEUE, -+ process->host_handle); -+ command->args = *args; -+ -+ if (dxgglobal->async_msg_enabled) { -+ command->hdr.async_msg = 1; -+ ret = dxgvmb_send_async_msg(msg.channel, msg.hdr, msg.size); -+ } else { -+ ret = dxgvmb_send_sync_msg_ntstatus(msg.channel, msg.hdr, -+ msg.size); -+ } -+ -+cleanup: -+ free_message(&msg, process); -+ if (ret) -+ DXG_TRACE("err: %d", ret); -+ return ret; -+} -diff --git a/drivers/hv/dxgkrnl/dxgvmbus.h b/drivers/hv/dxgkrnl/dxgvmbus.h -index 111111111111..222222222222 100644 ---- a/drivers/hv/dxgkrnl/dxgvmbus.h -+++ b/drivers/hv/dxgkrnl/dxgvmbus.h -@@ -314,6 +314,20 @@ struct dxgkvmb_command_flushdevice { - enum dxgdevice_flushschedulerreason reason; - }; - -+struct dxgkvmb_command_submitcommand { -+ struct dxgkvmb_command_vgpu_to_host hdr; -+ struct d3dkmt_submitcommand args; -+ /* HistoryBufferHandles */ -+ /* PrivateDriverData */ -+}; -+ -+struct dxgkvmb_command_submitcommandtohwqueue { -+ struct dxgkvmb_command_vgpu_to_host hdr; -+ struct d3dkmt_submitcommandtohwqueue args; -+ /* Written primaries */ -+ /* PrivateDriverData */ -+}; -+ - struct dxgkvmb_command_createallocation_allocinfo { - u32 flags; - u32 priv_drv_data_size; -diff --git a/drivers/hv/dxgkrnl/ioctl.c b/drivers/hv/dxgkrnl/ioctl.c -index 111111111111..222222222222 100644 ---- a/drivers/hv/dxgkrnl/ioctl.c -+++ b/drivers/hv/dxgkrnl/ioctl.c -@@ -1902,6 +1902,129 @@ dxgkio_destroy_allocation(struct dxgprocess *process, void *__user inargs) - return ret; - } - -+static int -+dxgkio_submit_command(struct dxgprocess *process, void *__user inargs) -+{ -+ int ret; -+ struct d3dkmt_submitcommand args; -+ struct dxgdevice *device = NULL; -+ struct dxgadapter *adapter = NULL; -+ -+ ret = copy_from_user(&args, inargs, sizeof(args)); -+ if (ret) { -+ DXG_ERR("failed to copy input args"); -+ ret = -EINVAL; -+ goto cleanup; -+ } -+ -+ if (args.broadcast_context_count > D3DDDI_MAX_BROADCAST_CONTEXT || -+ args.broadcast_context_count == 0) { -+ DXG_ERR("invalid number of contexts"); -+ ret = -EINVAL; -+ goto cleanup; -+ } -+ -+ if (args.priv_drv_data_size > DXG_MAX_VM_BUS_PACKET_SIZE) { -+ DXG_ERR("invalid private data size"); -+ ret = -EINVAL; -+ goto cleanup; -+ } -+ -+ if (args.num_history_buffers > 1024) { -+ DXG_ERR("invalid number of history buffers"); -+ ret = -EINVAL; -+ goto cleanup; -+ } -+ -+ if (args.num_primaries > DXG_MAX_VM_BUS_PACKET_SIZE) { -+ DXG_ERR("invalid number of primaries"); -+ ret = -EINVAL; -+ goto cleanup; -+ } -+ -+ device = dxgprocess_device_by_object_handle(process, -+ HMGRENTRY_TYPE_DXGCONTEXT, -+ args.broadcast_context[0]); -+ if (device == NULL) { -+ ret = -EINVAL; -+ goto cleanup; -+ } -+ -+ adapter = device->adapter; -+ ret = dxgadapter_acquire_lock_shared(adapter); -+ if (ret < 0) { -+ adapter = NULL; -+ goto cleanup; -+ } -+ -+ ret = dxgvmb_send_submit_command(process, adapter, &args); -+ -+cleanup: -+ -+ if (adapter) -+ dxgadapter_release_lock_shared(adapter); -+ if (device) -+ kref_put(&device->device_kref, dxgdevice_release); -+ -+ DXG_TRACE("ioctl:%s %d", errorstr(ret), ret); -+ return ret; -+} -+ -+static int -+dxgkio_submit_command_to_hwqueue(struct dxgprocess *process, void *__user inargs) -+{ -+ int ret; -+ struct d3dkmt_submitcommandtohwqueue args; -+ struct dxgdevice *device = NULL; -+ struct dxgadapter *adapter = NULL; -+ -+ ret = copy_from_user(&args, inargs, sizeof(args)); -+ if (ret) { -+ DXG_ERR("failed to copy input args"); -+ ret = -EINVAL; -+ goto cleanup; -+ } -+ -+ if (args.priv_drv_data_size > DXG_MAX_VM_BUS_PACKET_SIZE) { -+ DXG_ERR("invalid private data size"); -+ ret = -EINVAL; -+ goto cleanup; -+ } -+ -+ if (args.num_primaries > DXG_MAX_VM_BUS_PACKET_SIZE) { -+ DXG_ERR("invalid number of primaries"); -+ ret = -EINVAL; -+ goto cleanup; -+ } -+ -+ device = dxgprocess_device_by_object_handle(process, -+ HMGRENTRY_TYPE_DXGHWQUEUE, -+ args.hwqueue); -+ if (device == NULL) { -+ ret = -EINVAL; -+ goto cleanup; -+ } -+ -+ adapter = device->adapter; -+ ret = dxgadapter_acquire_lock_shared(adapter); -+ if (ret < 0) { -+ adapter = NULL; -+ goto cleanup; -+ } -+ -+ ret = dxgvmb_send_submit_command_hwqueue(process, adapter, &args); -+ -+cleanup: -+ -+ if (adapter) -+ dxgadapter_release_lock_shared(adapter); -+ if (device) -+ kref_put(&device->device_kref, dxgdevice_release); -+ -+ DXG_TRACE("ioctl:%s %d", errorstr(ret), ret); -+ return ret; -+} -+ - static int - dxgkio_submit_signal_to_hwqueue(struct dxgprocess *process, void *__user inargs) - { -@@ -3666,7 +3789,7 @@ static struct ioctl_desc ioctls[] = { - /* 0x0c */ {}, - /* 0x0d */ {}, - /* 0x0e */ {}, --/* 0x0f */ {}, -+/* 0x0f */ {dxgkio_submit_command, LX_DXSUBMITCOMMAND}, - /* 0x10 */ {dxgkio_create_sync_object, LX_DXCREATESYNCHRONIZATIONOBJECT}, - /* 0x11 */ {dxgkio_signal_sync_object, LX_DXSIGNALSYNCHRONIZATIONOBJECT}, - /* 0x12 */ {dxgkio_wait_sync_object, LX_DXWAITFORSYNCHRONIZATIONOBJECT}, -@@ -3706,7 +3829,7 @@ static struct ioctl_desc ioctls[] = { - LX_DXSIGNALSYNCHRONIZATIONOBJECTFROMGPU}, - /* 0x33 */ {dxgkio_signal_sync_object_gpu2, - LX_DXSIGNALSYNCHRONIZATIONOBJECTFROMGPU2}, --/* 0x34 */ {}, -+/* 0x34 */ {dxgkio_submit_command_to_hwqueue, LX_DXSUBMITCOMMANDTOHWQUEUE}, - /* 0x35 */ {dxgkio_submit_signal_to_hwqueue, - LX_DXSUBMITSIGNALSYNCOBJECTSTOHWQUEUE}, - /* 0x36 */ {dxgkio_submit_wait_to_hwqueue, -diff --git a/include/uapi/misc/d3dkmthk.h b/include/uapi/misc/d3dkmthk.h -index 111111111111..222222222222 100644 ---- a/include/uapi/misc/d3dkmthk.h -+++ b/include/uapi/misc/d3dkmthk.h -@@ -58,6 +58,8 @@ struct winluid { - __u32 b; - }; - -+#define D3DDDI_MAX_WRITTEN_PRIMARIES 16 -+ - #define D3DKMT_CREATEALLOCATION_MAX 1024 - #define D3DKMT_ADAPTERS_MAX 64 - #define D3DDDI_MAX_BROADCAST_CONTEXT 64 -@@ -525,6 +527,58 @@ struct d3dkmt_destroysynchronizationobject { - struct d3dkmthandle sync_object; - }; - -+struct d3dkmt_submitcommandflags { -+ __u32 null_rendering:1; -+ __u32 present_redirected:1; -+ __u32 reserved:30; -+}; -+ -+struct d3dkmt_submitcommand { -+ __u64 command_buffer; -+ __u32 command_length; -+ struct d3dkmt_submitcommandflags flags; -+ __u64 present_history_token; -+ __u32 broadcast_context_count; -+ struct d3dkmthandle broadcast_context[D3DDDI_MAX_BROADCAST_CONTEXT]; -+ __u32 reserved; -+#ifdef __KERNEL__ -+ void *priv_drv_data; -+#else -+ __u64 priv_drv_data; -+#endif -+ __u32 priv_drv_data_size; -+ __u32 num_primaries; -+ struct d3dkmthandle written_primaries[D3DDDI_MAX_WRITTEN_PRIMARIES]; -+ __u32 num_history_buffers; -+ __u32 reserved1; -+#ifdef __KERNEL__ -+ struct d3dkmthandle *history_buffer_array; -+#else -+ __u64 history_buffer_array; -+#endif -+}; -+ -+struct d3dkmt_submitcommandtohwqueue { -+ struct d3dkmthandle hwqueue; -+ __u32 reserved; -+ __u64 hwqueue_progress_fence_id; -+ __u64 command_buffer; -+ __u32 command_length; -+ __u32 priv_drv_data_size; -+#ifdef __KERNEL__ -+ void *priv_drv_data; -+#else -+ __u64 priv_drv_data; -+#endif -+ __u32 num_primaries; -+ __u32 reserved1; -+#ifdef __KERNEL__ -+ struct d3dkmthandle *written_primaries; -+#else -+ __u64 written_primaries; -+#endif -+}; -+ - enum d3dkmt_standardallocationtype { - _D3DKMT_STANDARDALLOCATIONTYPE_EXISTINGHEAP = 1, - _D3DKMT_STANDARDALLOCATIONTYPE_CROSSADAPTER = 2, -@@ -917,6 +971,8 @@ struct d3dkmt_enumadapters3 { - _IOWR(0x47, 0x07, struct d3dkmt_createpagingqueue) - #define LX_DXQUERYADAPTERINFO \ - _IOWR(0x47, 0x09, struct d3dkmt_queryadapterinfo) -+#define LX_DXSUBMITCOMMAND \ -+ _IOWR(0x47, 0x0f, struct d3dkmt_submitcommand) - #define LX_DXCREATESYNCHRONIZATIONOBJECT \ - _IOWR(0x47, 0x10, struct d3dkmt_createsynchronizationobject2) - #define LX_DXSIGNALSYNCHRONIZATIONOBJECT \ -@@ -945,6 +1001,8 @@ struct d3dkmt_enumadapters3 { - _IOWR(0x47, 0x32, struct d3dkmt_signalsynchronizationobjectfromgpu) - #define LX_DXSIGNALSYNCHRONIZATIONOBJECTFROMGPU2 \ - _IOWR(0x47, 0x33, struct d3dkmt_signalsynchronizationobjectfromgpu2) -+#define LX_DXSUBMITCOMMANDTOHWQUEUE \ -+ _IOWR(0x47, 0x34, struct d3dkmt_submitcommandtohwqueue) - #define LX_DXSUBMITSIGNALSYNCOBJECTSTOHWQUEUE \ - _IOWR(0x47, 0x35, struct d3dkmt_submitsignalsyncobjectstohwqueue) - #define LX_DXSUBMITWAITFORSYNCOBJECTSTOHWQUEUE \ --- -Armbian - diff --git a/patch/kernel/archive/wsl2-arm64-6.6/1682-drivers-hv-dxgkrnl-Share-objects-with-the-host.patch b/patch/kernel/archive/wsl2-arm64-6.6/1682-drivers-hv-dxgkrnl-Share-objects-with-the-host.patch deleted file mode 100644 index 42736ff7800f..000000000000 --- a/patch/kernel/archive/wsl2-arm64-6.6/1682-drivers-hv-dxgkrnl-Share-objects-with-the-host.patch +++ /dev/null @@ -1,271 +0,0 @@ -From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001 -From: Iouri Tarassov -Date: Sat, 7 Aug 2021 18:11:34 -0700 -Subject: drivers: hv: dxgkrnl: Share objects with the host - -Implement the LX_DXSHAREOBJECTWITHHOST ioctl. -This ioctl is used to create a Windows NT handle on the host -for the given shared object (resource or sync object). The NT -handle is returned to the caller. The caller could share the NT -handle with a host application, which needs to access the object. -The host application can open the shared resource using the NT -handle. This way the guest and the host have access to the same -object. - -Fix incorrect handling of error results from copy_from_user(). - -Signed-off-by: Iouri Tarassov -[kms: Forward port to v6.1] -Signed-off-by: Kelsey Steele ---- - drivers/hv/dxgkrnl/dxgkrnl.h | 2 + - drivers/hv/dxgkrnl/dxgvmbus.c | 60 +++++++++- - drivers/hv/dxgkrnl/dxgvmbus.h | 18 +++ - drivers/hv/dxgkrnl/ioctl.c | 38 +++++- - include/uapi/misc/d3dkmthk.h | 9 ++ - 5 files changed, 120 insertions(+), 7 deletions(-) - -diff --git a/drivers/hv/dxgkrnl/dxgkrnl.h b/drivers/hv/dxgkrnl/dxgkrnl.h -index 111111111111..222222222222 100644 ---- a/drivers/hv/dxgkrnl/dxgkrnl.h -+++ b/drivers/hv/dxgkrnl/dxgkrnl.h -@@ -872,6 +872,8 @@ int dxgvmb_send_get_stdalloc_data(struct dxgdevice *device, - int dxgvmb_send_async_msg(struct dxgvmbuschannel *channel, - void *command, - u32 cmd_size); -+int dxgvmb_send_share_object_with_host(struct dxgprocess *process, -+ struct d3dkmt_shareobjectwithhost *args); - - void signal_host_cpu_event(struct dxghostevent *eventhdr); - int ntstatus2int(struct ntstatus status); -diff --git a/drivers/hv/dxgkrnl/dxgvmbus.c b/drivers/hv/dxgkrnl/dxgvmbus.c -index 111111111111..222222222222 100644 ---- a/drivers/hv/dxgkrnl/dxgvmbus.c -+++ b/drivers/hv/dxgkrnl/dxgvmbus.c -@@ -881,6 +881,50 @@ int dxgvmb_send_destroy_sync_object(struct dxgprocess *process, - return ret; - } - -+int dxgvmb_send_share_object_with_host(struct dxgprocess *process, -+ struct d3dkmt_shareobjectwithhost *args) -+{ -+ struct dxgkvmb_command_shareobjectwithhost *command; -+ struct dxgkvmb_command_shareobjectwithhost_return result = {}; -+ int ret; -+ struct dxgvmbusmsg msg = {.hdr = NULL}; -+ -+ ret = init_message(&msg, NULL, process, sizeof(*command)); -+ if (ret) -+ return ret; -+ command = (void *)msg.msg; -+ -+ ret = dxgglobal_acquire_channel_lock(); -+ if (ret < 0) -+ goto cleanup; -+ -+ command_vm_to_host_init2(&command->hdr, -+ DXGK_VMBCOMMAND_SHAREOBJECTWITHHOST, -+ process->host_handle); -+ command->device_handle = args->device_handle; -+ command->object_handle = args->object_handle; -+ -+ ret = dxgvmb_send_sync_msg(dxgglobal_get_dxgvmbuschannel(), -+ msg.hdr, msg.size, &result, sizeof(result)); -+ -+ dxgglobal_release_channel_lock(); -+ -+ if (ret || !NT_SUCCESS(result.status)) { -+ if (ret == 0) -+ ret = ntstatus2int(result.status); -+ DXG_ERR("Host failed to share object with host: %d %x", -+ ret, result.status.v); -+ goto cleanup; -+ } -+ args->object_vail_nt_handle = result.vail_nt_handle; -+ -+cleanup: -+ free_message(&msg, process); -+ if (ret) -+ DXG_ERR("err: %d", ret); -+ return ret; -+} -+ - /* - * Virtual GPU messages to the host - */ -@@ -2323,37 +2367,43 @@ int dxgvmb_send_create_hwqueue(struct dxgprocess *process, - - ret = copy_to_user(&inargs->queue, &command->hwqueue, - sizeof(struct d3dkmthandle)); -- if (ret < 0) { -+ if (ret) { - DXG_ERR("failed to copy hwqueue handle"); -+ ret = -EINVAL; - goto cleanup; - } - ret = copy_to_user(&inargs->queue_progress_fence, - &command->hwqueue_progress_fence, - sizeof(struct d3dkmthandle)); -- if (ret < 0) { -+ if (ret) { - DXG_ERR("failed to progress fence"); -+ ret = -EINVAL; - goto cleanup; - } - ret = copy_to_user(&inargs->queue_progress_fence_cpu_va, - &hwqueue->progress_fence_mapped_address, - sizeof(inargs->queue_progress_fence_cpu_va)); -- if (ret < 0) { -+ if (ret) { - DXG_ERR("failed to copy fence cpu va"); -+ ret = -EINVAL; - goto cleanup; - } - ret = copy_to_user(&inargs->queue_progress_fence_gpu_va, - &command->hwqueue_progress_fence_gpuva, - sizeof(u64)); -- if (ret < 0) { -+ if (ret) { - DXG_ERR("failed to copy fence gpu va"); -+ ret = -EINVAL; - goto cleanup; - } - if (args->priv_drv_data_size) { - ret = copy_to_user(args->priv_drv_data, - command->priv_drv_data, - args->priv_drv_data_size); -- if (ret < 0) -+ if (ret) { - DXG_ERR("failed to copy private data"); -+ ret = -EINVAL; -+ } - } - - cleanup: -diff --git a/drivers/hv/dxgkrnl/dxgvmbus.h b/drivers/hv/dxgkrnl/dxgvmbus.h -index 111111111111..222222222222 100644 ---- a/drivers/hv/dxgkrnl/dxgvmbus.h -+++ b/drivers/hv/dxgkrnl/dxgvmbus.h -@@ -574,4 +574,22 @@ struct dxgkvmb_command_destroyhwqueue { - struct d3dkmthandle hwqueue; - }; - -+struct dxgkvmb_command_shareobjectwithhost { -+ struct dxgkvmb_command_vm_to_host hdr; -+ struct d3dkmthandle device_handle; -+ struct d3dkmthandle object_handle; -+ u64 reserved; -+}; -+ -+struct dxgkvmb_command_shareobjectwithhost_return { -+ struct ntstatus status; -+ u32 alignment; -+ u64 vail_nt_handle; -+}; -+ -+int -+dxgvmb_send_sync_msg(struct dxgvmbuschannel *channel, -+ void *command, u32 command_size, void *result, -+ u32 result_size); -+ - #endif /* _DXGVMBUS_H */ -diff --git a/drivers/hv/dxgkrnl/ioctl.c b/drivers/hv/dxgkrnl/ioctl.c -index 111111111111..222222222222 100644 ---- a/drivers/hv/dxgkrnl/ioctl.c -+++ b/drivers/hv/dxgkrnl/ioctl.c -@@ -2460,6 +2460,7 @@ dxgkio_open_sync_object_nt(struct dxgprocess *process, void *__user inargs) - if (ret == 0) - goto success; - DXG_ERR("failed to copy output args"); -+ ret = -EINVAL; - - cleanup: - -@@ -3364,8 +3365,10 @@ dxgkio_share_objects(struct dxgprocess *process, void *__user inargs) - tmp = (u64) object_fd; - - ret = copy_to_user(args.shared_handle, &tmp, sizeof(u64)); -- if (ret < 0) -+ if (ret) { - DXG_ERR("failed to copy shared handle"); -+ ret = -EINVAL; -+ } - - cleanup: - if (ret < 0) { -@@ -3773,6 +3776,37 @@ dxgkio_open_resource_nt(struct dxgprocess *process, - return ret; - } - -+static int -+dxgkio_share_object_with_host(struct dxgprocess *process, void *__user inargs) -+{ -+ struct d3dkmt_shareobjectwithhost args; -+ int ret; -+ -+ ret = copy_from_user(&args, inargs, sizeof(args)); -+ if (ret) { -+ DXG_ERR("failed to copy input args"); -+ ret = -EINVAL; -+ goto cleanup; -+ } -+ -+ ret = dxgvmb_send_share_object_with_host(process, &args); -+ if (ret) { -+ DXG_ERR("dxgvmb_send_share_object_with_host dailed"); -+ goto cleanup; -+ } -+ -+ ret = copy_to_user(inargs, &args, sizeof(args)); -+ if (ret) { -+ DXG_ERR("failed to copy data to user"); -+ ret = -EINVAL; -+ } -+ -+cleanup: -+ -+ DXG_TRACE("ioctl:%s %d", errorstr(ret), ret); -+ return ret; -+} -+ - static struct ioctl_desc ioctls[] = { - /* 0x00 */ {}, - /* 0x01 */ {dxgkio_open_adapter_from_luid, LX_DXOPENADAPTERFROMLUID}, -@@ -3850,7 +3884,7 @@ static struct ioctl_desc ioctls[] = { - LX_DXQUERYRESOURCEINFOFROMNTHANDLE}, - /* 0x42 */ {dxgkio_open_resource_nt, LX_DXOPENRESOURCEFROMNTHANDLE}, - /* 0x43 */ {}, --/* 0x44 */ {}, -+/* 0x44 */ {dxgkio_share_object_with_host, LX_DXSHAREOBJECTWITHHOST}, - /* 0x45 */ {}, - }; - -diff --git a/include/uapi/misc/d3dkmthk.h b/include/uapi/misc/d3dkmthk.h -index 111111111111..222222222222 100644 ---- a/include/uapi/misc/d3dkmthk.h -+++ b/include/uapi/misc/d3dkmthk.h -@@ -952,6 +952,13 @@ struct d3dkmt_enumadapters3 { - #endif - }; - -+struct d3dkmt_shareobjectwithhost { -+ struct d3dkmthandle device_handle; -+ struct d3dkmthandle object_handle; -+ __u64 reserved; -+ __u64 object_vail_nt_handle; -+}; -+ - /* - * Dxgkrnl Graphics Port Driver ioctl definitions - * -@@ -1021,5 +1028,7 @@ struct d3dkmt_enumadapters3 { - _IOWR(0x47, 0x41, struct d3dkmt_queryresourceinfofromnthandle) - #define LX_DXOPENRESOURCEFROMNTHANDLE \ - _IOWR(0x47, 0x42, struct d3dkmt_openresourcefromnthandle) -+#define LX_DXSHAREOBJECTWITHHOST \ -+ _IOWR(0x47, 0x44, struct d3dkmt_shareobjectwithhost) - - #endif /* _D3DKMTHK_H */ --- -Armbian - diff --git a/patch/kernel/archive/wsl2-arm64-6.6/1683-drivers-hv-dxgkrnl-Query-the-dxgdevice-state.patch b/patch/kernel/archive/wsl2-arm64-6.6/1683-drivers-hv-dxgkrnl-Query-the-dxgdevice-state.patch deleted file mode 100644 index 1b63bcdf315b..000000000000 --- a/patch/kernel/archive/wsl2-arm64-6.6/1683-drivers-hv-dxgkrnl-Query-the-dxgdevice-state.patch +++ /dev/null @@ -1,454 +0,0 @@ -From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001 -From: Iouri Tarassov -Date: Wed, 19 Jan 2022 16:53:47 -0800 -Subject: drivers: hv: dxgkrnl: Query the dxgdevice state - -Implement the ioctl to query the dxgdevice state - LX_DXGETDEVICESTATE. -The IOCTL is used to query the state of the given dxgdevice object (active, -error, etc.). - -A call to the dxgdevice execution state could be high frequency. -The following method is used to avoid sending a synchronous VM -bus message to the host for every call: -- When a dxgdevice is created, a pointer to dxgglobal->device_state_counter - is sent to the host -- Every time the device state on the host is changed, the host will send - an asynchronous message to the guest (DXGK_VMBCOMMAND_SETGUESTDATA) and - the guest will increment the device_state_counter value. -- the dxgdevice object has execution_state_counter member, which is equal - to dxgglobal->device_state_counter value at the time when - LX_DXGETDEVICESTATE was last processed.. -- if execution_state_counter is different from device_state_counter, the - dxgk_vmbcommand_getdevicestate VM bus message is sent to the host. - Otherwise, the cached value is returned to the caller. - -Signed-off-by: Iouri Tarassov -[kms: Forward port to v6.1] -Signed-off-by: Kelsey Steele ---- - drivers/hv/dxgkrnl/dxgkrnl.h | 11 + - drivers/hv/dxgkrnl/dxgmodule.c | 1 - - drivers/hv/dxgkrnl/dxgvmbus.c | 68 +++++++ - drivers/hv/dxgkrnl/dxgvmbus.h | 26 +++ - drivers/hv/dxgkrnl/ioctl.c | 66 +++++- - include/uapi/misc/d3dkmthk.h | 101 +++++++++- - 6 files changed, 261 insertions(+), 12 deletions(-) - -diff --git a/drivers/hv/dxgkrnl/dxgkrnl.h b/drivers/hv/dxgkrnl/dxgkrnl.h -index 111111111111..222222222222 100644 ---- a/drivers/hv/dxgkrnl/dxgkrnl.h -+++ b/drivers/hv/dxgkrnl/dxgkrnl.h -@@ -268,12 +268,18 @@ void dxgsyncobject_destroy(struct dxgprocess *process, - void dxgsyncobject_stop(struct dxgsyncobject *syncobj); - void dxgsyncobject_release(struct kref *refcount); - -+/* -+ * device_state_counter - incremented every time the execition state of -+ * a DXGDEVICE is changed in the host. Used to optimize access to the -+ * device execution state. -+ */ - struct dxgglobal { - struct dxgdriver *drvdata; - struct dxgvmbuschannel channel; - struct hv_device *hdev; - u32 num_adapters; - u32 vmbus_ver; /* Interface version */ -+ atomic_t device_state_counter; - struct resource *mem; - u64 mmiospace_base; - u64 mmiospace_size; -@@ -512,6 +518,7 @@ struct dxgdevice { - struct list_head syncobj_list_head; - struct d3dkmthandle handle; - enum d3dkmt_deviceexecution_state execution_state; -+ int execution_state_counter; - u32 handle_valid; - }; - -@@ -849,6 +856,10 @@ int dxgvmb_send_open_sync_object_nt(struct dxgprocess *process, - struct d3dkmt_opensyncobjectfromnthandle2 - *args, - struct dxgsyncobject *syncobj); -+int dxgvmb_send_get_device_state(struct dxgprocess *process, -+ struct dxgadapter *adapter, -+ struct d3dkmt_getdevicestate *args, -+ struct d3dkmt_getdevicestate *__user inargs); - int dxgvmb_send_create_nt_shared_object(struct dxgprocess *process, - struct d3dkmthandle object, - struct d3dkmthandle *shared_handle); -diff --git a/drivers/hv/dxgkrnl/dxgmodule.c b/drivers/hv/dxgkrnl/dxgmodule.c -index 111111111111..222222222222 100644 ---- a/drivers/hv/dxgkrnl/dxgmodule.c -+++ b/drivers/hv/dxgkrnl/dxgmodule.c -@@ -827,7 +827,6 @@ static struct dxgglobal *dxgglobal_create(void) - #ifdef DEBUG - dxgk_validate_ioctls(); - #endif -- - return dxgglobal; - } - -diff --git a/drivers/hv/dxgkrnl/dxgvmbus.c b/drivers/hv/dxgkrnl/dxgvmbus.c -index 111111111111..222222222222 100644 ---- a/drivers/hv/dxgkrnl/dxgvmbus.c -+++ b/drivers/hv/dxgkrnl/dxgvmbus.c -@@ -281,6 +281,24 @@ static void command_vm_to_host_init1(struct dxgkvmb_command_vm_to_host *command, - command->channel_type = DXGKVMB_VM_TO_HOST; - } - -+static void set_guest_data(struct dxgkvmb_command_host_to_vm *packet, -+ u32 packet_length) -+{ -+ struct dxgkvmb_command_setguestdata *command = (void *)packet; -+ struct dxgglobal *dxgglobal = dxggbl(); -+ -+ DXG_TRACE("Setting guest data: %d %d %p %p", -+ command->data_type, -+ command->data32, -+ command->guest_pointer, -+ &dxgglobal->device_state_counter); -+ if (command->data_type == SETGUESTDATA_DATATYPE_DWORD && -+ command->guest_pointer == &dxgglobal->device_state_counter && -+ command->data32 != 0) { -+ atomic_inc(&dxgglobal->device_state_counter); -+ } -+} -+ - static void signal_guest_event(struct dxgkvmb_command_host_to_vm *packet, - u32 packet_length) - { -@@ -311,6 +329,9 @@ static void process_inband_packet(struct dxgvmbuschannel *channel, - DXG_TRACE("global packet %d", - packet->command_type); - switch (packet->command_type) { -+ case DXGK_VMBCOMMAND_SETGUESTDATA: -+ set_guest_data(packet, packet_length); -+ break; - case DXGK_VMBCOMMAND_SIGNALGUESTEVENT: - case DXGK_VMBCOMMAND_SIGNALGUESTEVENTPASSIVE: - signal_guest_event(packet, packet_length); -@@ -1028,6 +1049,7 @@ struct d3dkmthandle dxgvmb_send_create_device(struct dxgadapter *adapter, - struct dxgkvmb_command_createdevice *command; - struct dxgkvmb_command_createdevice_return result = { }; - struct dxgvmbusmsg msg; -+ struct dxgglobal *dxgglobal = dxggbl(); - - ret = init_message(&msg, adapter, process, sizeof(*command)); - if (ret) -@@ -1037,6 +1059,7 @@ struct d3dkmthandle dxgvmb_send_create_device(struct dxgadapter *adapter, - command_vgpu_to_host_init2(&command->hdr, DXGK_VMBCOMMAND_CREATEDEVICE, - process->host_handle); - command->flags = args->flags; -+ command->error_code = &dxgglobal->device_state_counter; - - ret = dxgvmb_send_sync_msg(msg.channel, msg.hdr, msg.size, - &result, sizeof(result)); -@@ -1806,6 +1829,51 @@ int dxgvmb_send_destroy_allocation(struct dxgprocess *process, - return ret; - } - -+int dxgvmb_send_get_device_state(struct dxgprocess *process, -+ struct dxgadapter *adapter, -+ struct d3dkmt_getdevicestate *args, -+ struct d3dkmt_getdevicestate *__user output) -+{ -+ int ret; -+ struct dxgkvmb_command_getdevicestate *command; -+ struct dxgkvmb_command_getdevicestate_return result = { }; -+ struct dxgvmbusmsg msg = {.hdr = NULL}; -+ -+ ret = init_message(&msg, adapter, process, sizeof(*command)); -+ if (ret) -+ goto cleanup; -+ command = (void *)msg.msg; -+ -+ command_vgpu_to_host_init2(&command->hdr, -+ DXGK_VMBCOMMAND_GETDEVICESTATE, -+ process->host_handle); -+ command->args = *args; -+ -+ ret = dxgvmb_send_sync_msg(msg.channel, msg.hdr, msg.size, -+ &result, sizeof(result)); -+ if (ret < 0) -+ goto cleanup; -+ -+ ret = ntstatus2int(result.status); -+ if (ret < 0) -+ goto cleanup; -+ -+ ret = copy_to_user(output, &result.args, sizeof(result.args)); -+ if (ret) { -+ DXG_ERR("failed to copy output args"); -+ ret = -EINVAL; -+ } -+ -+ if (args->state_type == _D3DKMT_DEVICESTATE_EXECUTION) -+ args->execution_state = result.args.execution_state; -+ -+cleanup: -+ free_message(&msg, process); -+ if (ret) -+ DXG_TRACE("err: %d", ret); -+ return ret; -+} -+ - int dxgvmb_send_open_resource(struct dxgprocess *process, - struct dxgadapter *adapter, - struct d3dkmthandle device, -diff --git a/drivers/hv/dxgkrnl/dxgvmbus.h b/drivers/hv/dxgkrnl/dxgvmbus.h -index 111111111111..222222222222 100644 ---- a/drivers/hv/dxgkrnl/dxgvmbus.h -+++ b/drivers/hv/dxgkrnl/dxgvmbus.h -@@ -172,6 +172,22 @@ struct dxgkvmb_command_signalguestevent { - bool dereference_event; - }; - -+enum set_guestdata_type { -+ SETGUESTDATA_DATATYPE_DWORD = 0, -+ SETGUESTDATA_DATATYPE_UINT64 = 1 -+}; -+ -+struct dxgkvmb_command_setguestdata { -+ struct dxgkvmb_command_host_to_vm hdr; -+ void *guest_pointer; -+ union { -+ u64 data64; -+ u32 data32; -+ }; -+ u32 dereference : 1; -+ u32 data_type : 4; -+}; -+ - struct dxgkvmb_command_opensyncobject { - struct dxgkvmb_command_vm_to_host hdr; - struct d3dkmthandle device; -@@ -574,6 +590,16 @@ struct dxgkvmb_command_destroyhwqueue { - struct d3dkmthandle hwqueue; - }; - -+struct dxgkvmb_command_getdevicestate { -+ struct dxgkvmb_command_vgpu_to_host hdr; -+ struct d3dkmt_getdevicestate args; -+}; -+ -+struct dxgkvmb_command_getdevicestate_return { -+ struct d3dkmt_getdevicestate args; -+ struct ntstatus status; -+}; -+ - struct dxgkvmb_command_shareobjectwithhost { - struct dxgkvmb_command_vm_to_host hdr; - struct d3dkmthandle device_handle; -diff --git a/drivers/hv/dxgkrnl/ioctl.c b/drivers/hv/dxgkrnl/ioctl.c -index 111111111111..222222222222 100644 ---- a/drivers/hv/dxgkrnl/ioctl.c -+++ b/drivers/hv/dxgkrnl/ioctl.c -@@ -3142,6 +3142,70 @@ dxgkio_wait_sync_object_gpu(struct dxgprocess *process, void *__user inargs) - return ret; - } - -+static int -+dxgkio_get_device_state(struct dxgprocess *process, void *__user inargs) -+{ -+ int ret; -+ struct d3dkmt_getdevicestate args; -+ struct dxgdevice *device = NULL; -+ struct dxgadapter *adapter = NULL; -+ int global_device_state_counter = 0; -+ struct dxgglobal *dxgglobal = dxggbl(); -+ -+ ret = copy_from_user(&args, inargs, sizeof(args)); -+ if (ret) { -+ DXG_ERR("failed to copy input args"); -+ ret = -EINVAL; -+ goto cleanup; -+ } -+ -+ device = dxgprocess_device_by_handle(process, args.device); -+ if (device == NULL) { -+ ret = -EINVAL; -+ goto cleanup; -+ } -+ -+ adapter = device->adapter; -+ ret = dxgadapter_acquire_lock_shared(adapter); -+ if (ret < 0) { -+ adapter = NULL; -+ goto cleanup; -+ } -+ -+ if (args.state_type == _D3DKMT_DEVICESTATE_EXECUTION) { -+ global_device_state_counter = -+ atomic_read(&dxgglobal->device_state_counter); -+ if (device->execution_state_counter == -+ global_device_state_counter) { -+ args.execution_state = device->execution_state; -+ ret = copy_to_user(inargs, &args, sizeof(args)); -+ if (ret) { -+ DXG_ERR("failed to copy args to user"); -+ ret = -EINVAL; -+ } -+ goto cleanup; -+ } -+ } -+ -+ ret = dxgvmb_send_get_device_state(process, adapter, &args, inargs); -+ -+ if (ret == 0 && args.state_type == _D3DKMT_DEVICESTATE_EXECUTION) { -+ device->execution_state = args.execution_state; -+ device->execution_state_counter = global_device_state_counter; -+ } -+ -+cleanup: -+ -+ if (adapter) -+ dxgadapter_release_lock_shared(adapter); -+ if (device) -+ kref_put(&device->device_kref, dxgdevice_release); -+ if (ret < 0) -+ DXG_ERR("Failed to get device state %x", ret); -+ -+ return ret; -+} -+ - static int - dxgsharedsyncobj_get_host_nt_handle(struct dxgsharedsyncobject *syncobj, - struct dxgprocess *process, -@@ -3822,7 +3886,7 @@ static struct ioctl_desc ioctls[] = { - /* 0x0b */ {}, - /* 0x0c */ {}, - /* 0x0d */ {}, --/* 0x0e */ {}, -+/* 0x0e */ {dxgkio_get_device_state, LX_DXGETDEVICESTATE}, - /* 0x0f */ {dxgkio_submit_command, LX_DXSUBMITCOMMAND}, - /* 0x10 */ {dxgkio_create_sync_object, LX_DXCREATESYNCHRONIZATIONOBJECT}, - /* 0x11 */ {dxgkio_signal_sync_object, LX_DXSIGNALSYNCHRONIZATIONOBJECT}, -diff --git a/include/uapi/misc/d3dkmthk.h b/include/uapi/misc/d3dkmthk.h -index 111111111111..222222222222 100644 ---- a/include/uapi/misc/d3dkmthk.h -+++ b/include/uapi/misc/d3dkmthk.h -@@ -236,6 +236,95 @@ struct d3dddi_destroypagingqueue { - struct d3dkmthandle paging_queue; - }; - -+enum dxgk_render_pipeline_stage { -+ _DXGK_RENDER_PIPELINE_STAGE_UNKNOWN = 0, -+ _DXGK_RENDER_PIPELINE_STAGE_INPUT_ASSEMBLER = 1, -+ _DXGK_RENDER_PIPELINE_STAGE_VERTEX_SHADER = 2, -+ _DXGK_RENDER_PIPELINE_STAGE_GEOMETRY_SHADER = 3, -+ _DXGK_RENDER_PIPELINE_STAGE_STREAM_OUTPUT = 4, -+ _DXGK_RENDER_PIPELINE_STAGE_RASTERIZER = 5, -+ _DXGK_RENDER_PIPELINE_STAGE_PIXEL_SHADER = 6, -+ _DXGK_RENDER_PIPELINE_STAGE_OUTPUT_MERGER = 7, -+}; -+ -+enum dxgk_page_fault_flags { -+ _DXGK_PAGE_FAULT_WRITE = 0x1, -+ _DXGK_PAGE_FAULT_FENCE_INVALID = 0x2, -+ _DXGK_PAGE_FAULT_ADAPTER_RESET_REQUIRED = 0x4, -+ _DXGK_PAGE_FAULT_ENGINE_RESET_REQUIRED = 0x8, -+ _DXGK_PAGE_FAULT_FATAL_HARDWARE_ERROR = 0x10, -+ _DXGK_PAGE_FAULT_IOMMU = 0x20, -+ _DXGK_PAGE_FAULT_HW_CONTEXT_VALID = 0x40, -+ _DXGK_PAGE_FAULT_PROCESS_HANDLE_VALID = 0x80, -+}; -+ -+enum dxgk_general_error_code { -+ _DXGK_GENERAL_ERROR_PAGE_FAULT = 0, -+ _DXGK_GENERAL_ERROR_INVALID_INSTRUCTION = 1, -+}; -+ -+struct dxgk_fault_error_code { -+ union { -+ struct { -+ __u32 is_device_specific_code:1; -+ enum dxgk_general_error_code general_error_code:31; -+ }; -+ struct { -+ __u32 is_device_specific_code_reserved_bit:1; -+ __u32 device_specific_code:31; -+ }; -+ }; -+}; -+ -+struct d3dkmt_devicereset_state { -+ union { -+ struct { -+ __u32 desktop_switched:1; -+ __u32 reserved:31; -+ }; -+ __u32 value; -+ }; -+}; -+ -+struct d3dkmt_devicepagefault_state { -+ __u64 faulted_primitive_api_sequence_number; -+ enum dxgk_render_pipeline_stage faulted_pipeline_stage; -+ __u32 faulted_bind_table_entry; -+ enum dxgk_page_fault_flags page_fault_flags; -+ struct dxgk_fault_error_code fault_error_code; -+ __u64 faulted_virtual_address; -+}; -+ -+enum d3dkmt_deviceexecution_state { -+ _D3DKMT_DEVICEEXECUTION_ACTIVE = 1, -+ _D3DKMT_DEVICEEXECUTION_RESET = 2, -+ _D3DKMT_DEVICEEXECUTION_HUNG = 3, -+ _D3DKMT_DEVICEEXECUTION_STOPPED = 4, -+ _D3DKMT_DEVICEEXECUTION_ERROR_OUTOFMEMORY = 5, -+ _D3DKMT_DEVICEEXECUTION_ERROR_DMAFAULT = 6, -+ _D3DKMT_DEVICEEXECUTION_ERROR_DMAPAGEFAULT = 7, -+}; -+ -+enum d3dkmt_devicestate_type { -+ _D3DKMT_DEVICESTATE_EXECUTION = 1, -+ _D3DKMT_DEVICESTATE_PRESENT = 2, -+ _D3DKMT_DEVICESTATE_RESET = 3, -+ _D3DKMT_DEVICESTATE_PRESENT_DWM = 4, -+ _D3DKMT_DEVICESTATE_PAGE_FAULT = 5, -+ _D3DKMT_DEVICESTATE_PRESENT_QUEUE = 6, -+}; -+ -+struct d3dkmt_getdevicestate { -+ struct d3dkmthandle device; -+ enum d3dkmt_devicestate_type state_type; -+ union { -+ enum d3dkmt_deviceexecution_state execution_state; -+ struct d3dkmt_devicereset_state reset_state; -+ struct d3dkmt_devicepagefault_state page_fault_state; -+ char alignment[48]; -+ }; -+}; -+ - enum d3dkmdt_gdisurfacetype { - _D3DKMDT_GDISURFACE_INVALID = 0, - _D3DKMDT_GDISURFACE_TEXTURE = 1, -@@ -759,16 +848,6 @@ struct d3dkmt_queryadapterinfo { - __u32 private_data_size; - }; - --enum d3dkmt_deviceexecution_state { -- _D3DKMT_DEVICEEXECUTION_ACTIVE = 1, -- _D3DKMT_DEVICEEXECUTION_RESET = 2, -- _D3DKMT_DEVICEEXECUTION_HUNG = 3, -- _D3DKMT_DEVICEEXECUTION_STOPPED = 4, -- _D3DKMT_DEVICEEXECUTION_ERROR_OUTOFMEMORY = 5, -- _D3DKMT_DEVICEEXECUTION_ERROR_DMAFAULT = 6, -- _D3DKMT_DEVICEEXECUTION_ERROR_DMAPAGEFAULT = 7, --}; -- - struct d3dddi_openallocationinfo2 { - struct d3dkmthandle allocation; - #ifdef __KERNEL__ -@@ -978,6 +1057,8 @@ struct d3dkmt_shareobjectwithhost { - _IOWR(0x47, 0x07, struct d3dkmt_createpagingqueue) - #define LX_DXQUERYADAPTERINFO \ - _IOWR(0x47, 0x09, struct d3dkmt_queryadapterinfo) -+#define LX_DXGETDEVICESTATE \ -+ _IOWR(0x47, 0x0e, struct d3dkmt_getdevicestate) - #define LX_DXSUBMITCOMMAND \ - _IOWR(0x47, 0x0f, struct d3dkmt_submitcommand) - #define LX_DXCREATESYNCHRONIZATIONOBJECT \ --- -Armbian - diff --git a/patch/kernel/archive/wsl2-arm64-6.6/1684-drivers-hv-dxgkrnl-Map-unmap-CPU-address-to-device-allocation.patch b/patch/kernel/archive/wsl2-arm64-6.6/1684-drivers-hv-dxgkrnl-Map-unmap-CPU-address-to-device-allocation.patch deleted file mode 100644 index 8b3e0ee42808..000000000000 --- a/patch/kernel/archive/wsl2-arm64-6.6/1684-drivers-hv-dxgkrnl-Map-unmap-CPU-address-to-device-allocation.patch +++ /dev/null @@ -1,498 +0,0 @@ -From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001 -From: Iouri Tarassov -Date: Wed, 19 Jan 2022 13:58:28 -0800 -Subject: drivers: hv: dxgkrnl: Map(unmap) CPU address to device allocation - -Implement ioctls to map/unmap CPU virtual addresses to compute device -allocations - LX_DXLOCK2 and LX_DXUNLOCK2. - -The LX_DXLOCK2 ioctl maps a CPU virtual address to a compute device -allocation. The allocation could be located in system memory or local -device memory on the host. When the device allocation is created -from the guest system memory (existing sysmem allocation), the -allocation CPU address is known and is returned to the caller. -For other CPU visible allocations the code flow is the following: -1. A VM bus message is sent to the host to map the allocation -2. The host allocates a portion of the guest IO space and maps it - to the allocation backing store. The IO space address of the - allocation is returned back to the guest. -3. The guest allocates a CPU virtual address and maps it to the IO - space (see the dxg_map_iospace function). -4. The CPU VA is returned back to the caller -cpu_address_mapped and cpu_address_refcount are used to track how -many times an allocation was mapped. - -The LX_DXUNLOCK2 ioctl unmaps a CPU virtual address from a compute -device allocation. - -Signed-off-by: Iouri Tarassov -[kms: Forward port to v6.1] -Signed-off-by: Kelsey Steele ---- - drivers/hv/dxgkrnl/dxgadapter.c | 11 + - drivers/hv/dxgkrnl/dxgkrnl.h | 14 + - drivers/hv/dxgkrnl/dxgvmbus.c | 107 +++++++ - drivers/hv/dxgkrnl/dxgvmbus.h | 19 ++ - drivers/hv/dxgkrnl/ioctl.c | 160 +++++++++- - include/uapi/misc/d3dkmthk.h | 30 ++ - 6 files changed, 339 insertions(+), 2 deletions(-) - -diff --git a/drivers/hv/dxgkrnl/dxgadapter.c b/drivers/hv/dxgkrnl/dxgadapter.c -index 111111111111..222222222222 100644 ---- a/drivers/hv/dxgkrnl/dxgadapter.c -+++ b/drivers/hv/dxgkrnl/dxgadapter.c -@@ -885,6 +885,15 @@ void dxgallocation_stop(struct dxgallocation *alloc) - vfree(alloc->pages); - alloc->pages = NULL; - } -+ dxgprocess_ht_lock_exclusive_down(alloc->process); -+ if (alloc->cpu_address_mapped) { -+ dxg_unmap_iospace(alloc->cpu_address, -+ alloc->num_pages << PAGE_SHIFT); -+ alloc->cpu_address_mapped = false; -+ alloc->cpu_address = NULL; -+ alloc->cpu_address_refcount = 0; -+ } -+ dxgprocess_ht_lock_exclusive_up(alloc->process); - } - - void dxgallocation_free_handle(struct dxgallocation *alloc) -@@ -932,6 +941,8 @@ else - #endif - if (alloc->priv_drv_data) - vfree(alloc->priv_drv_data); -+ if (alloc->cpu_address_mapped) -+ pr_err("Alloc IO space is mapped: %p", alloc); - kfree(alloc); - } - -diff --git a/drivers/hv/dxgkrnl/dxgkrnl.h b/drivers/hv/dxgkrnl/dxgkrnl.h -index 111111111111..222222222222 100644 ---- a/drivers/hv/dxgkrnl/dxgkrnl.h -+++ b/drivers/hv/dxgkrnl/dxgkrnl.h -@@ -708,6 +708,8 @@ struct dxgallocation { - struct d3dkmthandle alloc_handle; - /* Set to 1 when allocation belongs to resource. */ - u32 resource_owner:1; -+ /* Set to 1 when 'cpu_address' is mapped to the IO space. */ -+ u32 cpu_address_mapped:1; - /* Set to 1 when the allocatio is mapped as cached */ - u32 cached:1; - u32 handle_valid:1; -@@ -719,6 +721,11 @@ struct dxgallocation { - #endif - /* Number of pages in the 'pages' array */ - u32 num_pages; -+ /* -+ * How many times dxgk_lock2 is called to allocation, which is mapped -+ * to IO space. -+ */ -+ u32 cpu_address_refcount; - /* - * CPU address from the existing sysmem allocation, or - * mapped to the CPU visible backing store in the IO space -@@ -837,6 +844,13 @@ int dxgvmb_send_wait_sync_object_cpu(struct dxgprocess *process, - d3dkmt_waitforsynchronizationobjectfromcpu - *args, - u64 cpu_event); -+int dxgvmb_send_lock2(struct dxgprocess *process, -+ struct dxgadapter *adapter, -+ struct d3dkmt_lock2 *args, -+ struct d3dkmt_lock2 *__user outargs); -+int dxgvmb_send_unlock2(struct dxgprocess *process, -+ struct dxgadapter *adapter, -+ struct d3dkmt_unlock2 *args); - int dxgvmb_send_create_hwqueue(struct dxgprocess *process, - struct dxgadapter *adapter, - struct d3dkmt_createhwqueue *args, -diff --git a/drivers/hv/dxgkrnl/dxgvmbus.c b/drivers/hv/dxgkrnl/dxgvmbus.c -index 111111111111..222222222222 100644 ---- a/drivers/hv/dxgkrnl/dxgvmbus.c -+++ b/drivers/hv/dxgkrnl/dxgvmbus.c -@@ -2354,6 +2354,113 @@ int dxgvmb_send_wait_sync_object_gpu(struct dxgprocess *process, - return ret; - } - -+int dxgvmb_send_lock2(struct dxgprocess *process, -+ struct dxgadapter *adapter, -+ struct d3dkmt_lock2 *args, -+ struct d3dkmt_lock2 *__user outargs) -+{ -+ int ret; -+ struct dxgkvmb_command_lock2 *command; -+ struct dxgkvmb_command_lock2_return result = { }; -+ struct dxgallocation *alloc = NULL; -+ struct dxgvmbusmsg msg = {.hdr = NULL}; -+ -+ ret = init_message(&msg, adapter, process, sizeof(*command)); -+ if (ret) -+ goto cleanup; -+ command = (void *)msg.msg; -+ -+ command_vgpu_to_host_init2(&command->hdr, -+ DXGK_VMBCOMMAND_LOCK2, process->host_handle); -+ command->args = *args; -+ -+ ret = dxgvmb_send_sync_msg(msg.channel, msg.hdr, msg.size, -+ &result, sizeof(result)); -+ if (ret < 0) -+ goto cleanup; -+ -+ ret = ntstatus2int(result.status); -+ if (ret < 0) -+ goto cleanup; -+ -+ hmgrtable_lock(&process->handle_table, DXGLOCK_EXCL); -+ alloc = hmgrtable_get_object_by_type(&process->handle_table, -+ HMGRENTRY_TYPE_DXGALLOCATION, -+ args->allocation); -+ if (alloc == NULL) { -+ DXG_ERR("invalid alloc"); -+ ret = -EINVAL; -+ } else { -+ if (alloc->cpu_address) { -+ args->data = alloc->cpu_address; -+ if (alloc->cpu_address_mapped) -+ alloc->cpu_address_refcount++; -+ } else { -+ u64 offset = (u64)result.cpu_visible_buffer_offset; -+ -+ args->data = dxg_map_iospace(offset, -+ alloc->num_pages << PAGE_SHIFT, -+ PROT_READ | PROT_WRITE, alloc->cached); -+ if (args->data) { -+ alloc->cpu_address_refcount = 1; -+ alloc->cpu_address_mapped = true; -+ alloc->cpu_address = args->data; -+ } -+ } -+ if (args->data == NULL) { -+ ret = -ENOMEM; -+ } else { -+ ret = copy_to_user(&outargs->data, &args->data, -+ sizeof(args->data)); -+ if (ret) { -+ DXG_ERR("failed to copy data"); -+ ret = -EINVAL; -+ alloc->cpu_address_refcount--; -+ if (alloc->cpu_address_refcount == 0) { -+ dxg_unmap_iospace(alloc->cpu_address, -+ alloc->num_pages << PAGE_SHIFT); -+ alloc->cpu_address_mapped = false; -+ alloc->cpu_address = NULL; -+ } -+ } -+ } -+ } -+ hmgrtable_unlock(&process->handle_table, DXGLOCK_EXCL); -+ -+cleanup: -+ free_message(&msg, process); -+ if (ret) -+ DXG_TRACE("err: %d", ret); -+ return ret; -+} -+ -+int dxgvmb_send_unlock2(struct dxgprocess *process, -+ struct dxgadapter *adapter, -+ struct d3dkmt_unlock2 *args) -+{ -+ int ret; -+ struct dxgkvmb_command_unlock2 *command; -+ struct dxgvmbusmsg msg = {.hdr = NULL}; -+ -+ ret = init_message(&msg, adapter, process, sizeof(*command)); -+ if (ret) -+ goto cleanup; -+ command = (void *)msg.msg; -+ -+ command_vgpu_to_host_init2(&command->hdr, -+ DXGK_VMBCOMMAND_UNLOCK2, -+ process->host_handle); -+ command->args = *args; -+ -+ ret = dxgvmb_send_sync_msg_ntstatus(msg.channel, msg.hdr, msg.size); -+ -+cleanup: -+ free_message(&msg, process); -+ if (ret) -+ DXG_TRACE("err: %d", ret); -+ return ret; -+} -+ - int dxgvmb_send_create_hwqueue(struct dxgprocess *process, - struct dxgadapter *adapter, - struct d3dkmt_createhwqueue *args, -diff --git a/drivers/hv/dxgkrnl/dxgvmbus.h b/drivers/hv/dxgkrnl/dxgvmbus.h -index 111111111111..222222222222 100644 ---- a/drivers/hv/dxgkrnl/dxgvmbus.h -+++ b/drivers/hv/dxgkrnl/dxgvmbus.h -@@ -570,6 +570,25 @@ struct dxgkvmb_command_waitforsyncobjectfromgpu { - /* struct d3dkmthandle ObjectHandles[object_count] */ - }; - -+struct dxgkvmb_command_lock2 { -+ struct dxgkvmb_command_vgpu_to_host hdr; -+ struct d3dkmt_lock2 args; -+ bool use_legacy_lock; -+ u32 flags; -+ u32 priv_drv_data; -+}; -+ -+struct dxgkvmb_command_lock2_return { -+ struct ntstatus status; -+ void *cpu_visible_buffer_offset; -+}; -+ -+struct dxgkvmb_command_unlock2 { -+ struct dxgkvmb_command_vgpu_to_host hdr; -+ struct d3dkmt_unlock2 args; -+ bool use_legacy_unlock; -+}; -+ - /* Returns the same structure */ - struct dxgkvmb_command_createhwqueue { - struct dxgkvmb_command_vgpu_to_host hdr; -diff --git a/drivers/hv/dxgkrnl/ioctl.c b/drivers/hv/dxgkrnl/ioctl.c -index 111111111111..222222222222 100644 ---- a/drivers/hv/dxgkrnl/ioctl.c -+++ b/drivers/hv/dxgkrnl/ioctl.c -@@ -3142,6 +3142,162 @@ dxgkio_wait_sync_object_gpu(struct dxgprocess *process, void *__user inargs) - return ret; - } - -+static int -+dxgkio_lock2(struct dxgprocess *process, void *__user inargs) -+{ -+ struct d3dkmt_lock2 args; -+ struct d3dkmt_lock2 *__user result = inargs; -+ int ret; -+ struct dxgadapter *adapter = NULL; -+ struct dxgdevice *device = NULL; -+ struct dxgallocation *alloc = NULL; -+ -+ ret = copy_from_user(&args, inargs, sizeof(args)); -+ if (ret) { -+ DXG_ERR("failed to copy input args"); -+ ret = -EINVAL; -+ goto cleanup; -+ } -+ -+ args.data = NULL; -+ hmgrtable_lock(&process->handle_table, DXGLOCK_EXCL); -+ alloc = hmgrtable_get_object_by_type(&process->handle_table, -+ HMGRENTRY_TYPE_DXGALLOCATION, -+ args.allocation); -+ if (alloc == NULL) { -+ ret = -EINVAL; -+ } else { -+ if (alloc->cpu_address) { -+ ret = copy_to_user(&result->data, -+ &alloc->cpu_address, -+ sizeof(args.data)); -+ if (ret == 0) { -+ args.data = alloc->cpu_address; -+ if (alloc->cpu_address_mapped) -+ alloc->cpu_address_refcount++; -+ } else { -+ DXG_ERR("Failed to copy cpu address"); -+ ret = -EINVAL; -+ } -+ } -+ } -+ hmgrtable_unlock(&process->handle_table, DXGLOCK_EXCL); -+ if (ret < 0) -+ goto cleanup; -+ if (args.data) -+ goto success; -+ -+ /* -+ * The call acquires reference on the device. It is safe to access the -+ * adapter, because the device holds reference on it. -+ */ -+ device = dxgprocess_device_by_handle(process, args.device); -+ if (device == NULL) { -+ ret = -EINVAL; -+ goto cleanup; -+ } -+ adapter = device->adapter; -+ ret = dxgadapter_acquire_lock_shared(adapter); -+ if (ret < 0) { -+ adapter = NULL; -+ goto cleanup; -+ } -+ -+ ret = dxgvmb_send_lock2(process, adapter, &args, result); -+ -+cleanup: -+ -+ if (adapter) -+ dxgadapter_release_lock_shared(adapter); -+ -+ if (device) -+ kref_put(&device->device_kref, dxgdevice_release); -+ -+success: -+ DXG_TRACE("ioctl:%s %s %d", errorstr(ret), __func__, ret); -+ return ret; -+} -+ -+static int -+dxgkio_unlock2(struct dxgprocess *process, void *__user inargs) -+{ -+ struct d3dkmt_unlock2 args; -+ int ret; -+ struct dxgadapter *adapter = NULL; -+ struct dxgdevice *device = NULL; -+ struct dxgallocation *alloc = NULL; -+ bool done = false; -+ -+ ret = copy_from_user(&args, inargs, sizeof(args)); -+ if (ret) { -+ DXG_ERR("failed to copy input args"); -+ ret = -EINVAL; -+ goto cleanup; -+ } -+ -+ hmgrtable_lock(&process->handle_table, DXGLOCK_EXCL); -+ alloc = hmgrtable_get_object_by_type(&process->handle_table, -+ HMGRENTRY_TYPE_DXGALLOCATION, -+ args.allocation); -+ if (alloc == NULL) { -+ ret = -EINVAL; -+ } else { -+ if (alloc->cpu_address == NULL) { -+ DXG_ERR("Allocation is not locked: %p", alloc); -+ ret = -EINVAL; -+ } else if (alloc->cpu_address_mapped) { -+ if (alloc->cpu_address_refcount > 0) { -+ alloc->cpu_address_refcount--; -+ if (alloc->cpu_address_refcount != 0) { -+ done = true; -+ } else { -+ dxg_unmap_iospace(alloc->cpu_address, -+ alloc->num_pages << PAGE_SHIFT); -+ alloc->cpu_address_mapped = false; -+ alloc->cpu_address = NULL; -+ } -+ } else { -+ DXG_ERR("Invalid cpu access refcount"); -+ done = true; -+ } -+ } -+ } -+ hmgrtable_unlock(&process->handle_table, DXGLOCK_EXCL); -+ if (done) -+ goto success; -+ if (ret < 0) -+ goto cleanup; -+ -+ /* -+ * The call acquires reference on the device. It is safe to access the -+ * adapter, because the device holds reference on it. -+ */ -+ device = dxgprocess_device_by_handle(process, args.device); -+ if (device == NULL) { -+ ret = -EINVAL; -+ goto cleanup; -+ } -+ adapter = device->adapter; -+ ret = dxgadapter_acquire_lock_shared(adapter); -+ if (ret < 0) { -+ adapter = NULL; -+ goto cleanup; -+ } -+ -+ ret = dxgvmb_send_unlock2(process, adapter, &args); -+ -+cleanup: -+ if (adapter) -+ dxgadapter_release_lock_shared(adapter); -+ -+ if (device) -+ kref_put(&device->device_kref, dxgdevice_release); -+ -+success: -+ DXG_TRACE("ioctl:%s %s %d", errorstr(ret), __func__, ret); -+ return ret; -+} -+ - static int - dxgkio_get_device_state(struct dxgprocess *process, void *__user inargs) - { -@@ -3909,7 +4065,7 @@ static struct ioctl_desc ioctls[] = { - /* 0x22 */ {}, - /* 0x23 */ {}, - /* 0x24 */ {}, --/* 0x25 */ {}, -+/* 0x25 */ {dxgkio_lock2, LX_DXLOCK2}, - /* 0x26 */ {}, - /* 0x27 */ {}, - /* 0x28 */ {}, -@@ -3932,7 +4088,7 @@ static struct ioctl_desc ioctls[] = { - LX_DXSUBMITSIGNALSYNCOBJECTSTOHWQUEUE}, - /* 0x36 */ {dxgkio_submit_wait_to_hwqueue, - LX_DXSUBMITWAITFORSYNCOBJECTSTOHWQUEUE}, --/* 0x37 */ {}, -+/* 0x37 */ {dxgkio_unlock2, LX_DXUNLOCK2}, - /* 0x38 */ {}, - /* 0x39 */ {}, - /* 0x3a */ {dxgkio_wait_sync_object_cpu, -diff --git a/include/uapi/misc/d3dkmthk.h b/include/uapi/misc/d3dkmthk.h -index 111111111111..222222222222 100644 ---- a/include/uapi/misc/d3dkmthk.h -+++ b/include/uapi/misc/d3dkmthk.h -@@ -668,6 +668,32 @@ struct d3dkmt_submitcommandtohwqueue { - #endif - }; - -+struct d3dddicb_lock2flags { -+ union { -+ struct { -+ __u32 reserved:32; -+ }; -+ __u32 value; -+ }; -+}; -+ -+struct d3dkmt_lock2 { -+ struct d3dkmthandle device; -+ struct d3dkmthandle allocation; -+ struct d3dddicb_lock2flags flags; -+ __u32 reserved; -+#ifdef __KERNEL__ -+ void *data; -+#else -+ __u64 data; -+#endif -+}; -+ -+struct d3dkmt_unlock2 { -+ struct d3dkmthandle device; -+ struct d3dkmthandle allocation; -+}; -+ - enum d3dkmt_standardallocationtype { - _D3DKMT_STANDARDALLOCATIONTYPE_EXISTINGHEAP = 1, - _D3DKMT_STANDARDALLOCATIONTYPE_CROSSADAPTER = 2, -@@ -1083,6 +1109,8 @@ struct d3dkmt_shareobjectwithhost { - _IOWR(0x47, 0x19, struct d3dkmt_destroydevice) - #define LX_DXDESTROYSYNCHRONIZATIONOBJECT \ - _IOWR(0x47, 0x1d, struct d3dkmt_destroysynchronizationobject) -+#define LX_DXLOCK2 \ -+ _IOWR(0x47, 0x25, struct d3dkmt_lock2) - #define LX_DXSIGNALSYNCHRONIZATIONOBJECTFROMCPU \ - _IOWR(0x47, 0x31, struct d3dkmt_signalsynchronizationobjectfromcpu) - #define LX_DXSIGNALSYNCHRONIZATIONOBJECTFROMGPU \ -@@ -1095,6 +1123,8 @@ struct d3dkmt_shareobjectwithhost { - _IOWR(0x47, 0x35, struct d3dkmt_submitsignalsyncobjectstohwqueue) - #define LX_DXSUBMITWAITFORSYNCOBJECTSTOHWQUEUE \ - _IOWR(0x47, 0x36, struct d3dkmt_submitwaitforsyncobjectstohwqueue) -+#define LX_DXUNLOCK2 \ -+ _IOWR(0x47, 0x37, struct d3dkmt_unlock2) - #define LX_DXWAITFORSYNCHRONIZATIONOBJECTFROMCPU \ - _IOWR(0x47, 0x3a, struct d3dkmt_waitforsynchronizationobjectfromcpu) - #define LX_DXWAITFORSYNCHRONIZATIONOBJECTFROMGPU \ --- -Armbian - diff --git a/patch/kernel/archive/wsl2-arm64-6.6/1685-drivers-hv-dxgkrnl-Manage-device-allocation-properties.patch b/patch/kernel/archive/wsl2-arm64-6.6/1685-drivers-hv-dxgkrnl-Manage-device-allocation-properties.patch deleted file mode 100644 index bd024641118d..000000000000 --- a/patch/kernel/archive/wsl2-arm64-6.6/1685-drivers-hv-dxgkrnl-Manage-device-allocation-properties.patch +++ /dev/null @@ -1,912 +0,0 @@ -From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001 -From: Iouri Tarassov -Date: Wed, 19 Jan 2022 11:14:22 -0800 -Subject: drivers: hv: dxgkrnl: Manage device allocation properties - -Implement ioctls to manage properties of a compute device allocation: - - LX_DXUPDATEALLOCPROPERTY, - - LX_DXSETALLOCATIONPRIORITY, - - LX_DXGETALLOCATIONPRIORITY, - - LX_DXQUERYALLOCATIONRESIDENCY. - - LX_DXCHANGEVIDEOMEMORYRESERVATION, - -The LX_DXUPDATEALLOCPROPERTY ioctl requests the host to update -various properties of a compute devoce allocation. - -The LX_DXSETALLOCATIONPRIORITY and LX_DXGETALLOCATIONPRIORITY ioctls -are used to set/get allocation priority, which defines the -importance of the allocation to be in the local device memory. - -The LX_DXQUERYALLOCATIONRESIDENCY ioctl queries if the allocation -is located in the compute device accessible memory. - -The LX_DXCHANGEVIDEOMEMORYRESERVATION ioctl changes compute device -memory reservation of an allocation. - -Signed-off-by: Iouri Tarassov -[kms: Forward port to v6.1] -Signed-off-by: Kelsey Steele ---- - drivers/hv/dxgkrnl/dxgkrnl.h | 21 + - drivers/hv/dxgkrnl/dxgvmbus.c | 300 ++++++++++ - drivers/hv/dxgkrnl/dxgvmbus.h | 50 ++ - drivers/hv/dxgkrnl/ioctl.c | 217 ++++++- - include/uapi/misc/d3dkmthk.h | 127 ++++ - 5 files changed, 708 insertions(+), 7 deletions(-) - -diff --git a/drivers/hv/dxgkrnl/dxgkrnl.h b/drivers/hv/dxgkrnl/dxgkrnl.h -index 111111111111..222222222222 100644 ---- a/drivers/hv/dxgkrnl/dxgkrnl.h -+++ b/drivers/hv/dxgkrnl/dxgkrnl.h -@@ -851,6 +851,23 @@ int dxgvmb_send_lock2(struct dxgprocess *process, - int dxgvmb_send_unlock2(struct dxgprocess *process, - struct dxgadapter *adapter, - struct d3dkmt_unlock2 *args); -+int dxgvmb_send_update_alloc_property(struct dxgprocess *process, -+ struct dxgadapter *adapter, -+ struct d3dddi_updateallocproperty *args, -+ struct d3dddi_updateallocproperty *__user -+ inargs); -+int dxgvmb_send_set_allocation_priority(struct dxgprocess *process, -+ struct dxgadapter *adapter, -+ struct d3dkmt_setallocationpriority *a); -+int dxgvmb_send_get_allocation_priority(struct dxgprocess *process, -+ struct dxgadapter *adapter, -+ struct d3dkmt_getallocationpriority *a); -+int dxgvmb_send_change_vidmem_reservation(struct dxgprocess *process, -+ struct dxgadapter *adapter, -+ struct d3dkmthandle other_process, -+ struct -+ d3dkmt_changevideomemoryreservation -+ *args); - int dxgvmb_send_create_hwqueue(struct dxgprocess *process, - struct dxgadapter *adapter, - struct d3dkmt_createhwqueue *args, -@@ -870,6 +887,10 @@ int dxgvmb_send_open_sync_object_nt(struct dxgprocess *process, - struct d3dkmt_opensyncobjectfromnthandle2 - *args, - struct dxgsyncobject *syncobj); -+int dxgvmb_send_query_alloc_residency(struct dxgprocess *process, -+ struct dxgadapter *adapter, -+ struct d3dkmt_queryallocationresidency -+ *args); - int dxgvmb_send_get_device_state(struct dxgprocess *process, - struct dxgadapter *adapter, - struct d3dkmt_getdevicestate *args, -diff --git a/drivers/hv/dxgkrnl/dxgvmbus.c b/drivers/hv/dxgkrnl/dxgvmbus.c -index 111111111111..222222222222 100644 ---- a/drivers/hv/dxgkrnl/dxgvmbus.c -+++ b/drivers/hv/dxgkrnl/dxgvmbus.c -@@ -1829,6 +1829,79 @@ int dxgvmb_send_destroy_allocation(struct dxgprocess *process, - return ret; - } - -+int dxgvmb_send_query_alloc_residency(struct dxgprocess *process, -+ struct dxgadapter *adapter, -+ struct d3dkmt_queryallocationresidency -+ *args) -+{ -+ int ret = -EINVAL; -+ struct dxgkvmb_command_queryallocationresidency *command = NULL; -+ u32 cmd_size = sizeof(*command); -+ u32 alloc_size = 0; -+ u32 result_allocation_size = 0; -+ struct dxgkvmb_command_queryallocationresidency_return *result = NULL; -+ u32 result_size = sizeof(*result); -+ struct dxgvmbusmsgres msg = {.hdr = NULL}; -+ -+ if (args->allocation_count > DXG_MAX_VM_BUS_PACKET_SIZE) { -+ ret = -EINVAL; -+ goto cleanup; -+ } -+ -+ if (args->allocation_count) { -+ alloc_size = args->allocation_count * -+ sizeof(struct d3dkmthandle); -+ cmd_size += alloc_size; -+ result_allocation_size = args->allocation_count * -+ sizeof(args->residency_status[0]); -+ } else { -+ result_allocation_size = sizeof(args->residency_status[0]); -+ } -+ result_size += result_allocation_size; -+ -+ ret = init_message_res(&msg, adapter, process, cmd_size, result_size); -+ if (ret) -+ goto cleanup; -+ command = (void *)msg.msg; -+ result = msg.res; -+ -+ command_vgpu_to_host_init2(&command->hdr, -+ DXGK_VMBCOMMAND_QUERYALLOCATIONRESIDENCY, -+ process->host_handle); -+ command->args = *args; -+ if (alloc_size) { -+ ret = copy_from_user(&command[1], args->allocations, -+ alloc_size); -+ if (ret) { -+ DXG_ERR("failed to copy alloc handles"); -+ ret = -EINVAL; -+ goto cleanup; -+ } -+ } -+ -+ ret = dxgvmb_send_sync_msg(msg.channel, msg.hdr, msg.size, -+ result, msg.res_size); -+ if (ret < 0) -+ goto cleanup; -+ -+ ret = ntstatus2int(result->status); -+ if (ret < 0) -+ goto cleanup; -+ -+ ret = copy_to_user(args->residency_status, &result[1], -+ result_allocation_size); -+ if (ret) { -+ DXG_ERR("failed to copy residency status"); -+ ret = -EINVAL; -+ } -+ -+cleanup: -+ free_message((struct dxgvmbusmsg *)&msg, process); -+ if (ret) -+ DXG_TRACE("err: %d", ret); -+ return ret; -+} -+ - int dxgvmb_send_get_device_state(struct dxgprocess *process, - struct dxgadapter *adapter, - struct d3dkmt_getdevicestate *args, -@@ -2461,6 +2534,233 @@ int dxgvmb_send_unlock2(struct dxgprocess *process, - return ret; - } - -+int dxgvmb_send_update_alloc_property(struct dxgprocess *process, -+ struct dxgadapter *adapter, -+ struct d3dddi_updateallocproperty *args, -+ struct d3dddi_updateallocproperty *__user -+ inargs) -+{ -+ int ret; -+ int ret1; -+ struct dxgkvmb_command_updateallocationproperty *command; -+ struct dxgkvmb_command_updateallocationproperty_return result = { }; -+ struct dxgvmbusmsg msg = {.hdr = NULL}; -+ -+ ret = init_message(&msg, adapter, process, sizeof(*command)); -+ if (ret) -+ goto cleanup; -+ command = (void *)msg.msg; -+ -+ command_vgpu_to_host_init2(&command->hdr, -+ DXGK_VMBCOMMAND_UPDATEALLOCATIONPROPERTY, -+ process->host_handle); -+ command->args = *args; -+ -+ ret = dxgvmb_send_sync_msg(msg.channel, msg.hdr, msg.size, -+ &result, sizeof(result)); -+ -+ if (ret < 0) -+ goto cleanup; -+ ret = ntstatus2int(result.status); -+ /* STATUS_PENING is a success code > 0 */ -+ if (ret == STATUS_PENDING) { -+ ret1 = copy_to_user(&inargs->paging_fence_value, -+ &result.paging_fence_value, -+ sizeof(u64)); -+ if (ret1) { -+ DXG_ERR("failed to copy paging fence"); -+ ret = -EINVAL; -+ } -+ } -+cleanup: -+ free_message(&msg, process); -+ if (ret) -+ DXG_TRACE("err: %d", ret); -+ return ret; -+} -+ -+int dxgvmb_send_set_allocation_priority(struct dxgprocess *process, -+ struct dxgadapter *adapter, -+ struct d3dkmt_setallocationpriority *args) -+{ -+ u32 cmd_size = sizeof(struct dxgkvmb_command_setallocationpriority); -+ u32 alloc_size = 0; -+ u32 priority_size = 0; -+ struct dxgkvmb_command_setallocationpriority *command; -+ int ret; -+ struct d3dkmthandle *allocations; -+ struct dxgvmbusmsg msg = {.hdr = NULL}; -+ -+ if (args->allocation_count > DXG_MAX_VM_BUS_PACKET_SIZE) { -+ ret = -EINVAL; -+ goto cleanup; -+ } -+ if (args->resource.v) { -+ priority_size = sizeof(u32); -+ if (args->allocation_count != 0) { -+ ret = -EINVAL; -+ goto cleanup; -+ } -+ } else { -+ if (args->allocation_count == 0) { -+ ret = -EINVAL; -+ goto cleanup; -+ } -+ alloc_size = args->allocation_count * -+ sizeof(struct d3dkmthandle); -+ cmd_size += alloc_size; -+ priority_size = sizeof(u32) * args->allocation_count; -+ } -+ cmd_size += priority_size; -+ -+ ret = init_message(&msg, adapter, process, cmd_size); -+ if (ret) -+ goto cleanup; -+ command = (void *)msg.msg; -+ -+ command_vgpu_to_host_init2(&command->hdr, -+ DXGK_VMBCOMMAND_SETALLOCATIONPRIORITY, -+ process->host_handle); -+ command->device = args->device; -+ command->allocation_count = args->allocation_count; -+ command->resource = args->resource; -+ allocations = (struct d3dkmthandle *) &command[1]; -+ ret = copy_from_user(allocations, args->allocation_list, -+ alloc_size); -+ if (ret) { -+ DXG_ERR("failed to copy alloc handle"); -+ ret = -EINVAL; -+ goto cleanup; -+ } -+ ret = copy_from_user((u8 *) allocations + alloc_size, -+ args->priorities, priority_size); -+ if (ret) { -+ DXG_ERR("failed to copy alloc priority"); -+ ret = -EINVAL; -+ goto cleanup; -+ } -+ -+ ret = dxgvmb_send_sync_msg_ntstatus(msg.channel, msg.hdr, msg.size); -+ -+cleanup: -+ free_message(&msg, process); -+ if (ret) -+ DXG_TRACE("err: %d", ret); -+ return ret; -+} -+ -+int dxgvmb_send_get_allocation_priority(struct dxgprocess *process, -+ struct dxgadapter *adapter, -+ struct d3dkmt_getallocationpriority *args) -+{ -+ u32 cmd_size = sizeof(struct dxgkvmb_command_getallocationpriority); -+ u32 result_size; -+ u32 alloc_size = 0; -+ u32 priority_size = 0; -+ struct dxgkvmb_command_getallocationpriority *command; -+ struct dxgkvmb_command_getallocationpriority_return *result; -+ int ret; -+ struct d3dkmthandle *allocations; -+ struct dxgvmbusmsgres msg = {.hdr = NULL}; -+ -+ if (args->allocation_count > DXG_MAX_VM_BUS_PACKET_SIZE) { -+ ret = -EINVAL; -+ goto cleanup; -+ } -+ if (args->resource.v) { -+ priority_size = sizeof(u32); -+ if (args->allocation_count != 0) { -+ ret = -EINVAL; -+ goto cleanup; -+ } -+ } else { -+ if (args->allocation_count == 0) { -+ ret = -EINVAL; -+ goto cleanup; -+ } -+ alloc_size = args->allocation_count * -+ sizeof(struct d3dkmthandle); -+ cmd_size += alloc_size; -+ priority_size = sizeof(u32) * args->allocation_count; -+ } -+ result_size = sizeof(*result) + priority_size; -+ -+ ret = init_message_res(&msg, adapter, process, cmd_size, result_size); -+ if (ret) -+ goto cleanup; -+ command = (void *)msg.msg; -+ result = msg.res; -+ -+ command_vgpu_to_host_init2(&command->hdr, -+ DXGK_VMBCOMMAND_GETALLOCATIONPRIORITY, -+ process->host_handle); -+ command->device = args->device; -+ command->allocation_count = args->allocation_count; -+ command->resource = args->resource; -+ allocations = (struct d3dkmthandle *) &command[1]; -+ ret = copy_from_user(allocations, args->allocation_list, -+ alloc_size); -+ if (ret) { -+ DXG_ERR("failed to copy alloc handles"); -+ ret = -EINVAL; -+ goto cleanup; -+ } -+ -+ ret = dxgvmb_send_sync_msg(msg.channel, msg.hdr, -+ msg.size + msg.res_size, -+ result, msg.res_size); -+ if (ret < 0) -+ goto cleanup; -+ -+ ret = ntstatus2int(result->status); -+ if (ret < 0) -+ goto cleanup; -+ -+ ret = copy_to_user(args->priorities, -+ (u8 *) result + sizeof(*result), -+ priority_size); -+ if (ret) { -+ DXG_ERR("failed to copy priorities"); -+ ret = -EINVAL; -+ } -+ -+cleanup: -+ free_message((struct dxgvmbusmsg *)&msg, process); -+ if (ret) -+ DXG_TRACE("err: %d", ret); -+ return ret; -+} -+ -+int dxgvmb_send_change_vidmem_reservation(struct dxgprocess *process, -+ struct dxgadapter *adapter, -+ struct d3dkmthandle other_process, -+ struct -+ d3dkmt_changevideomemoryreservation -+ *args) -+{ -+ struct dxgkvmb_command_changevideomemoryreservation *command; -+ int ret; -+ struct dxgvmbusmsg msg = {.hdr = NULL}; -+ -+ ret = init_message(&msg, adapter, process, sizeof(*command)); -+ if (ret) -+ goto cleanup; -+ command = (void *)msg.msg; -+ -+ command_vgpu_to_host_init2(&command->hdr, -+ DXGK_VMBCOMMAND_CHANGEVIDEOMEMORYRESERVATION, -+ process->host_handle); -+ command->args = *args; -+ command->args.process = other_process.v; -+ -+ ret = dxgvmb_send_sync_msg_ntstatus(msg.channel, msg.hdr, msg.size); -+cleanup: -+ free_message(&msg, process); -+ if (ret) -+ DXG_TRACE("err: %d", ret); -+ return ret; -+} -+ - int dxgvmb_send_create_hwqueue(struct dxgprocess *process, - struct dxgadapter *adapter, - struct d3dkmt_createhwqueue *args, -diff --git a/drivers/hv/dxgkrnl/dxgvmbus.h b/drivers/hv/dxgkrnl/dxgvmbus.h -index 111111111111..222222222222 100644 ---- a/drivers/hv/dxgkrnl/dxgvmbus.h -+++ b/drivers/hv/dxgkrnl/dxgvmbus.h -@@ -308,6 +308,29 @@ struct dxgkvmb_command_queryadapterinfo_return { - u8 private_data[1]; - }; - -+/* Returns ntstatus */ -+struct dxgkvmb_command_setallocationpriority { -+ struct dxgkvmb_command_vgpu_to_host hdr; -+ struct d3dkmthandle device; -+ struct d3dkmthandle resource; -+ u32 allocation_count; -+ /* struct d3dkmthandle allocations[allocation_count or 0]; */ -+ /* u32 priorities[allocation_count or 1]; */ -+}; -+ -+struct dxgkvmb_command_getallocationpriority { -+ struct dxgkvmb_command_vgpu_to_host hdr; -+ struct d3dkmthandle device; -+ struct d3dkmthandle resource; -+ u32 allocation_count; -+ /* struct d3dkmthandle allocations[allocation_count or 0]; */ -+}; -+ -+struct dxgkvmb_command_getallocationpriority_return { -+ struct ntstatus status; -+ /* u32 priorities[allocation_count or 1]; */ -+}; -+ - struct dxgkvmb_command_createdevice { - struct dxgkvmb_command_vgpu_to_host hdr; - struct d3dkmt_createdeviceflags flags; -@@ -589,6 +612,22 @@ struct dxgkvmb_command_unlock2 { - bool use_legacy_unlock; - }; - -+struct dxgkvmb_command_updateallocationproperty { -+ struct dxgkvmb_command_vgpu_to_host hdr; -+ struct d3dddi_updateallocproperty args; -+}; -+ -+struct dxgkvmb_command_updateallocationproperty_return { -+ u64 paging_fence_value; -+ struct ntstatus status; -+}; -+ -+/* Returns ntstatus */ -+struct dxgkvmb_command_changevideomemoryreservation { -+ struct dxgkvmb_command_vgpu_to_host hdr; -+ struct d3dkmt_changevideomemoryreservation args; -+}; -+ - /* Returns the same structure */ - struct dxgkvmb_command_createhwqueue { - struct dxgkvmb_command_vgpu_to_host hdr; -@@ -609,6 +648,17 @@ struct dxgkvmb_command_destroyhwqueue { - struct d3dkmthandle hwqueue; - }; - -+struct dxgkvmb_command_queryallocationresidency { -+ struct dxgkvmb_command_vgpu_to_host hdr; -+ struct d3dkmt_queryallocationresidency args; -+ /* struct d3dkmthandle allocations[0 or number of allocations] */ -+}; -+ -+struct dxgkvmb_command_queryallocationresidency_return { -+ struct ntstatus status; -+ /* d3dkmt_allocationresidencystatus[NumAllocations] */ -+}; -+ - struct dxgkvmb_command_getdevicestate { - struct dxgkvmb_command_vgpu_to_host hdr; - struct d3dkmt_getdevicestate args; -diff --git a/drivers/hv/dxgkrnl/ioctl.c b/drivers/hv/dxgkrnl/ioctl.c -index 111111111111..222222222222 100644 ---- a/drivers/hv/dxgkrnl/ioctl.c -+++ b/drivers/hv/dxgkrnl/ioctl.c -@@ -3214,7 +3214,7 @@ dxgkio_lock2(struct dxgprocess *process, void *__user inargs) - kref_put(&device->device_kref, dxgdevice_release); - - success: -- DXG_TRACE("ioctl:%s %s %d", errorstr(ret), __func__, ret); -+ DXG_TRACE("ioctl:%s %d", errorstr(ret), ret); - return ret; - } - -@@ -3294,7 +3294,209 @@ dxgkio_unlock2(struct dxgprocess *process, void *__user inargs) - kref_put(&device->device_kref, dxgdevice_release); - - success: -- DXG_TRACE("ioctl:%s %s %d", errorstr(ret), __func__, ret); -+ DXG_TRACE("ioctl:%s %d", errorstr(ret), ret); -+ return ret; -+} -+ -+static int -+dxgkio_update_alloc_property(struct dxgprocess *process, void *__user inargs) -+{ -+ struct d3dddi_updateallocproperty args; -+ int ret; -+ struct dxgadapter *adapter = NULL; -+ struct dxgdevice *device = NULL; -+ -+ ret = copy_from_user(&args, inargs, sizeof(args)); -+ if (ret) { -+ DXG_ERR("failed to copy input args"); -+ ret = -EINVAL; -+ goto cleanup; -+ } -+ -+ device = dxgprocess_device_by_object_handle(process, -+ HMGRENTRY_TYPE_DXGPAGINGQUEUE, -+ args.paging_queue); -+ if (device == NULL) { -+ ret = -EINVAL; -+ goto cleanup; -+ } -+ adapter = device->adapter; -+ ret = dxgadapter_acquire_lock_shared(adapter); -+ if (ret < 0) { -+ adapter = NULL; -+ goto cleanup; -+ } -+ -+ ret = dxgvmb_send_update_alloc_property(process, adapter, -+ &args, inargs); -+ -+cleanup: -+ if (adapter) -+ dxgadapter_release_lock_shared(adapter); -+ -+ if (device) -+ kref_put(&device->device_kref, dxgdevice_release); -+ -+ DXG_TRACE("ioctl:%s %d", errorstr(ret), ret); -+ return ret; -+} -+ -+static int -+dxgkio_query_alloc_residency(struct dxgprocess *process, void *__user inargs) -+{ -+ struct d3dkmt_queryallocationresidency args; -+ struct dxgadapter *adapter = NULL; -+ struct dxgdevice *device = NULL; -+ int ret; -+ -+ ret = copy_from_user(&args, inargs, sizeof(args)); -+ if (ret) { -+ DXG_ERR("failed to copy input args"); -+ ret = -EINVAL; -+ goto cleanup; -+ } -+ -+ if ((args.allocation_count == 0) == (args.resource.v == 0)) { -+ ret = -EINVAL; -+ goto cleanup; -+ } -+ -+ device = dxgprocess_device_by_handle(process, args.device); -+ if (device == NULL) { -+ ret = -EINVAL; -+ goto cleanup; -+ } -+ adapter = device->adapter; -+ ret = dxgadapter_acquire_lock_shared(adapter); -+ if (ret < 0) { -+ adapter = NULL; -+ goto cleanup; -+ } -+ ret = dxgvmb_send_query_alloc_residency(process, adapter, &args); -+cleanup: -+ if (adapter) -+ dxgadapter_release_lock_shared(adapter); -+ if (device) -+ kref_put(&device->device_kref, dxgdevice_release); -+ DXG_TRACE("ioctl:%s %d", errorstr(ret), ret); -+ return ret; -+} -+ -+static int -+dxgkio_set_allocation_priority(struct dxgprocess *process, void *__user inargs) -+{ -+ struct d3dkmt_setallocationpriority args; -+ struct dxgadapter *adapter = NULL; -+ struct dxgdevice *device = NULL; -+ int ret; -+ -+ ret = copy_from_user(&args, inargs, sizeof(args)); -+ if (ret) { -+ DXG_ERR("failed to copy input args"); -+ ret = -EINVAL; -+ goto cleanup; -+ } -+ device = dxgprocess_device_by_handle(process, args.device); -+ if (device == NULL) { -+ ret = -EINVAL; -+ goto cleanup; -+ } -+ adapter = device->adapter; -+ ret = dxgadapter_acquire_lock_shared(adapter); -+ if (ret < 0) { -+ adapter = NULL; -+ goto cleanup; -+ } -+ ret = dxgvmb_send_set_allocation_priority(process, adapter, &args); -+cleanup: -+ if (adapter) -+ dxgadapter_release_lock_shared(adapter); -+ if (device) -+ kref_put(&device->device_kref, dxgdevice_release); -+ DXG_TRACE("ioctl:%s %d", errorstr(ret), ret); -+ return ret; -+} -+ -+static int -+dxgkio_get_allocation_priority(struct dxgprocess *process, void *__user inargs) -+{ -+ struct d3dkmt_getallocationpriority args; -+ struct dxgadapter *adapter = NULL; -+ struct dxgdevice *device = NULL; -+ int ret; -+ -+ ret = copy_from_user(&args, inargs, sizeof(args)); -+ if (ret) { -+ DXG_ERR("failed to copy input args"); -+ ret = -EINVAL; -+ goto cleanup; -+ } -+ device = dxgprocess_device_by_handle(process, args.device); -+ if (device == NULL) { -+ ret = -EINVAL; -+ goto cleanup; -+ } -+ adapter = device->adapter; -+ ret = dxgadapter_acquire_lock_shared(adapter); -+ if (ret < 0) { -+ adapter = NULL; -+ goto cleanup; -+ } -+ ret = dxgvmb_send_get_allocation_priority(process, adapter, &args); -+cleanup: -+ if (adapter) -+ dxgadapter_release_lock_shared(adapter); -+ if (device) -+ kref_put(&device->device_kref, dxgdevice_release); -+ DXG_TRACE("ioctl:%s %d", errorstr(ret), ret); -+ return ret; -+} -+ -+static int -+dxgkio_change_vidmem_reservation(struct dxgprocess *process, void *__user inargs) -+{ -+ struct d3dkmt_changevideomemoryreservation args; -+ int ret; -+ struct dxgadapter *adapter = NULL; -+ bool adapter_locked = false; -+ -+ ret = copy_from_user(&args, inargs, sizeof(args)); -+ if (ret) { -+ DXG_ERR("failed to copy input args"); -+ ret = -EINVAL; -+ goto cleanup; -+ } -+ -+ if (args.process != 0) { -+ DXG_ERR("setting memory reservation for other process"); -+ ret = -EINVAL; -+ goto cleanup; -+ } -+ -+ adapter = dxgprocess_adapter_by_handle(process, args.adapter); -+ if (adapter == NULL) { -+ ret = -EINVAL; -+ goto cleanup; -+ } -+ -+ ret = dxgadapter_acquire_lock_shared(adapter); -+ if (ret < 0) { -+ adapter = NULL; -+ goto cleanup; -+ } -+ adapter_locked = true; -+ args.adapter.v = 0; -+ ret = dxgvmb_send_change_vidmem_reservation(process, adapter, -+ zerohandle, &args); -+ -+cleanup: -+ -+ if (adapter_locked) -+ dxgadapter_release_lock_shared(adapter); -+ if (adapter) -+ kref_put(&adapter->adapter_kref, dxgadapter_release); -+ -+ DXG_TRACE("ioctl:%s %d", errorstr(ret), ret); - return ret; - } - -@@ -4050,7 +4252,8 @@ static struct ioctl_desc ioctls[] = { - /* 0x13 */ {dxgkio_destroy_allocation, LX_DXDESTROYALLOCATION2}, - /* 0x14 */ {dxgkio_enum_adapters, LX_DXENUMADAPTERS2}, - /* 0x15 */ {dxgkio_close_adapter, LX_DXCLOSEADAPTER}, --/* 0x16 */ {}, -+/* 0x16 */ {dxgkio_change_vidmem_reservation, -+ LX_DXCHANGEVIDEOMEMORYRESERVATION}, - /* 0x17 */ {}, - /* 0x18 */ {dxgkio_create_hwqueue, LX_DXCREATEHWQUEUE}, - /* 0x19 */ {dxgkio_destroy_device, LX_DXDESTROYDEVICE}, -@@ -4070,11 +4273,11 @@ static struct ioctl_desc ioctls[] = { - /* 0x27 */ {}, - /* 0x28 */ {}, - /* 0x29 */ {}, --/* 0x2a */ {}, -+/* 0x2a */ {dxgkio_query_alloc_residency, LX_DXQUERYALLOCATIONRESIDENCY}, - /* 0x2b */ {}, - /* 0x2c */ {}, - /* 0x2d */ {}, --/* 0x2e */ {}, -+/* 0x2e */ {dxgkio_set_allocation_priority, LX_DXSETALLOCATIONPRIORITY}, - /* 0x2f */ {}, - /* 0x30 */ {}, - /* 0x31 */ {dxgkio_signal_sync_object_cpu, -@@ -4089,13 +4292,13 @@ static struct ioctl_desc ioctls[] = { - /* 0x36 */ {dxgkio_submit_wait_to_hwqueue, - LX_DXSUBMITWAITFORSYNCOBJECTSTOHWQUEUE}, - /* 0x37 */ {dxgkio_unlock2, LX_DXUNLOCK2}, --/* 0x38 */ {}, -+/* 0x38 */ {dxgkio_update_alloc_property, LX_DXUPDATEALLOCPROPERTY}, - /* 0x39 */ {}, - /* 0x3a */ {dxgkio_wait_sync_object_cpu, - LX_DXWAITFORSYNCHRONIZATIONOBJECTFROMCPU}, - /* 0x3b */ {dxgkio_wait_sync_object_gpu, - LX_DXWAITFORSYNCHRONIZATIONOBJECTFROMGPU}, --/* 0x3c */ {}, -+/* 0x3c */ {dxgkio_get_allocation_priority, LX_DXGETALLOCATIONPRIORITY}, - /* 0x3d */ {}, - /* 0x3e */ {dxgkio_enum_adapters3, LX_DXENUMADAPTERS3}, - /* 0x3f */ {dxgkio_share_objects, LX_DXSHAREOBJECTS}, -diff --git a/include/uapi/misc/d3dkmthk.h b/include/uapi/misc/d3dkmthk.h -index 111111111111..222222222222 100644 ---- a/include/uapi/misc/d3dkmthk.h -+++ b/include/uapi/misc/d3dkmthk.h -@@ -668,6 +668,63 @@ struct d3dkmt_submitcommandtohwqueue { - #endif - }; - -+struct d3dkmt_setallocationpriority { -+ struct d3dkmthandle device; -+ struct d3dkmthandle resource; -+#ifdef __KERNEL__ -+ const struct d3dkmthandle *allocation_list; -+#else -+ __u64 allocation_list; -+#endif -+ __u32 allocation_count; -+ __u32 reserved; -+#ifdef __KERNEL__ -+ const __u32 *priorities; -+#else -+ __u64 priorities; -+#endif -+}; -+ -+struct d3dkmt_getallocationpriority { -+ struct d3dkmthandle device; -+ struct d3dkmthandle resource; -+#ifdef __KERNEL__ -+ const struct d3dkmthandle *allocation_list; -+#else -+ __u64 allocation_list; -+#endif -+ __u32 allocation_count; -+ __u32 reserved; -+#ifdef __KERNEL__ -+ __u32 *priorities; -+#else -+ __u64 priorities; -+#endif -+}; -+ -+enum d3dkmt_allocationresidencystatus { -+ _D3DKMT_ALLOCATIONRESIDENCYSTATUS_RESIDENTINGPUMEMORY = 1, -+ _D3DKMT_ALLOCATIONRESIDENCYSTATUS_RESIDENTINSHAREDMEMORY = 2, -+ _D3DKMT_ALLOCATIONRESIDENCYSTATUS_NOTRESIDENT = 3, -+}; -+ -+struct d3dkmt_queryallocationresidency { -+ struct d3dkmthandle device; -+ struct d3dkmthandle resource; -+#ifdef __KERNEL__ -+ struct d3dkmthandle *allocations; -+#else -+ __u64 allocations; -+#endif -+ __u32 allocation_count; -+ __u32 reserved; -+#ifdef __KERNEL__ -+ enum d3dkmt_allocationresidencystatus *residency_status; -+#else -+ __u64 residency_status; -+#endif -+}; -+ - struct d3dddicb_lock2flags { - union { - struct { -@@ -835,6 +892,11 @@ struct d3dkmt_destroyallocation2 { - struct d3dddicb_destroyallocation2flags flags; - }; - -+enum d3dkmt_memory_segment_group { -+ _D3DKMT_MEMORY_SEGMENT_GROUP_LOCAL = 0, -+ _D3DKMT_MEMORY_SEGMENT_GROUP_NON_LOCAL = 1 -+}; -+ - struct d3dkmt_adaptertype { - union { - struct { -@@ -886,6 +948,61 @@ struct d3dddi_openallocationinfo2 { - __u64 reserved[6]; - }; - -+struct d3dddi_updateallocproperty_flags { -+ union { -+ struct { -+ __u32 accessed_physically:1; -+ __u32 reserved:31; -+ }; -+ __u32 value; -+ }; -+}; -+ -+struct d3dddi_segmentpreference { -+ union { -+ struct { -+ __u32 segment_id0:5; -+ __u32 direction0:1; -+ __u32 segment_id1:5; -+ __u32 direction1:1; -+ __u32 segment_id2:5; -+ __u32 direction2:1; -+ __u32 segment_id3:5; -+ __u32 direction3:1; -+ __u32 segment_id4:5; -+ __u32 direction4:1; -+ __u32 reserved:2; -+ }; -+ __u32 value; -+ }; -+}; -+ -+struct d3dddi_updateallocproperty { -+ struct d3dkmthandle paging_queue; -+ struct d3dkmthandle allocation; -+ __u32 supported_segment_set; -+ struct d3dddi_segmentpreference preferred_segment; -+ struct d3dddi_updateallocproperty_flags flags; -+ __u64 paging_fence_value; -+ union { -+ struct { -+ __u32 set_accessed_physically:1; -+ __u32 set_supported_segmentSet:1; -+ __u32 set_preferred_segment:1; -+ __u32 reserved:29; -+ }; -+ __u32 property_mask_value; -+ }; -+}; -+ -+struct d3dkmt_changevideomemoryreservation { -+ __u64 process; -+ struct d3dkmthandle adapter; -+ enum d3dkmt_memory_segment_group memory_segment_group; -+ __u64 reservation; -+ __u32 physical_adapter_index; -+}; -+ - struct d3dkmt_createhwqueue { - struct d3dkmthandle context; - struct d3dddi_createhwqueueflags flags; -@@ -1099,6 +1216,8 @@ struct d3dkmt_shareobjectwithhost { - _IOWR(0x47, 0x14, struct d3dkmt_enumadapters2) - #define LX_DXCLOSEADAPTER \ - _IOWR(0x47, 0x15, struct d3dkmt_closeadapter) -+#define LX_DXCHANGEVIDEOMEMORYRESERVATION \ -+ _IOWR(0x47, 0x16, struct d3dkmt_changevideomemoryreservation) - #define LX_DXCREATEHWQUEUE \ - _IOWR(0x47, 0x18, struct d3dkmt_createhwqueue) - #define LX_DXDESTROYHWQUEUE \ -@@ -1111,6 +1230,10 @@ struct d3dkmt_shareobjectwithhost { - _IOWR(0x47, 0x1d, struct d3dkmt_destroysynchronizationobject) - #define LX_DXLOCK2 \ - _IOWR(0x47, 0x25, struct d3dkmt_lock2) -+#define LX_DXQUERYALLOCATIONRESIDENCY \ -+ _IOWR(0x47, 0x2a, struct d3dkmt_queryallocationresidency) -+#define LX_DXSETALLOCATIONPRIORITY \ -+ _IOWR(0x47, 0x2e, struct d3dkmt_setallocationpriority) - #define LX_DXSIGNALSYNCHRONIZATIONOBJECTFROMCPU \ - _IOWR(0x47, 0x31, struct d3dkmt_signalsynchronizationobjectfromcpu) - #define LX_DXSIGNALSYNCHRONIZATIONOBJECTFROMGPU \ -@@ -1125,10 +1248,14 @@ struct d3dkmt_shareobjectwithhost { - _IOWR(0x47, 0x36, struct d3dkmt_submitwaitforsyncobjectstohwqueue) - #define LX_DXUNLOCK2 \ - _IOWR(0x47, 0x37, struct d3dkmt_unlock2) -+#define LX_DXUPDATEALLOCPROPERTY \ -+ _IOWR(0x47, 0x38, struct d3dddi_updateallocproperty) - #define LX_DXWAITFORSYNCHRONIZATIONOBJECTFROMCPU \ - _IOWR(0x47, 0x3a, struct d3dkmt_waitforsynchronizationobjectfromcpu) - #define LX_DXWAITFORSYNCHRONIZATIONOBJECTFROMGPU \ - _IOWR(0x47, 0x3b, struct d3dkmt_waitforsynchronizationobjectfromgpu) -+#define LX_DXGETALLOCATIONPRIORITY \ -+ _IOWR(0x47, 0x3c, struct d3dkmt_getallocationpriority) - #define LX_DXENUMADAPTERS3 \ - _IOWR(0x47, 0x3e, struct d3dkmt_enumadapters3) - #define LX_DXSHAREOBJECTS \ --- -Armbian - diff --git a/patch/kernel/archive/wsl2-arm64-6.6/1686-drivers-hv-dxgkrnl-Flush-heap-transitions.patch b/patch/kernel/archive/wsl2-arm64-6.6/1686-drivers-hv-dxgkrnl-Flush-heap-transitions.patch deleted file mode 100644 index 1dbb3e8773f3..000000000000 --- a/patch/kernel/archive/wsl2-arm64-6.6/1686-drivers-hv-dxgkrnl-Flush-heap-transitions.patch +++ /dev/null @@ -1,194 +0,0 @@ -From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001 -From: Iouri Tarassov -Date: Tue, 18 Jan 2022 17:25:37 -0800 -Subject: drivers: hv: dxgkrnl: Flush heap transitions - -Implement the ioctl to flush heap transitions -(LX_DXFLUSHHEAPTRANSITIONS). - -The ioctl is used to ensure that the video memory manager on the host -flushes all internal operations. - -Signed-off-by: Iouri Tarassov -[kms: Forward port to v6.1] -Signed-off-by: Kelsey Steele ---- - drivers/hv/dxgkrnl/dxgadapter.c | 2 +- - drivers/hv/dxgkrnl/dxgkrnl.h | 3 + - drivers/hv/dxgkrnl/dxgvmbus.c | 23 +++++ - drivers/hv/dxgkrnl/dxgvmbus.h | 5 + - drivers/hv/dxgkrnl/ioctl.c | 49 +++++++++- - include/uapi/misc/d3dkmthk.h | 6 ++ - 6 files changed, 86 insertions(+), 2 deletions(-) - -diff --git a/drivers/hv/dxgkrnl/dxgadapter.c b/drivers/hv/dxgkrnl/dxgadapter.c -index 111111111111..222222222222 100644 ---- a/drivers/hv/dxgkrnl/dxgadapter.c -+++ b/drivers/hv/dxgkrnl/dxgadapter.c -@@ -942,7 +942,7 @@ else - if (alloc->priv_drv_data) - vfree(alloc->priv_drv_data); - if (alloc->cpu_address_mapped) -- pr_err("Alloc IO space is mapped: %p", alloc); -+ DXG_ERR("Alloc IO space is mapped: %p", alloc); - kfree(alloc); - } - -diff --git a/drivers/hv/dxgkrnl/dxgkrnl.h b/drivers/hv/dxgkrnl/dxgkrnl.h -index 111111111111..222222222222 100644 ---- a/drivers/hv/dxgkrnl/dxgkrnl.h -+++ b/drivers/hv/dxgkrnl/dxgkrnl.h -@@ -882,6 +882,9 @@ int dxgvmb_send_query_adapter_info(struct dxgprocess *process, - int dxgvmb_send_submit_command_hwqueue(struct dxgprocess *process, - struct dxgadapter *adapter, - struct d3dkmt_submitcommandtohwqueue *a); -+int dxgvmb_send_flush_heap_transitions(struct dxgprocess *process, -+ struct dxgadapter *adapter, -+ struct d3dkmt_flushheaptransitions *arg); - int dxgvmb_send_open_sync_object_nt(struct dxgprocess *process, - struct dxgvmbuschannel *channel, - struct d3dkmt_opensyncobjectfromnthandle2 -diff --git a/drivers/hv/dxgkrnl/dxgvmbus.c b/drivers/hv/dxgkrnl/dxgvmbus.c -index 111111111111..222222222222 100644 ---- a/drivers/hv/dxgkrnl/dxgvmbus.c -+++ b/drivers/hv/dxgkrnl/dxgvmbus.c -@@ -1829,6 +1829,29 @@ int dxgvmb_send_destroy_allocation(struct dxgprocess *process, - return ret; - } - -+int dxgvmb_send_flush_heap_transitions(struct dxgprocess *process, -+ struct dxgadapter *adapter, -+ struct d3dkmt_flushheaptransitions *args) -+{ -+ struct dxgkvmb_command_flushheaptransitions *command; -+ int ret; -+ struct dxgvmbusmsg msg = {.hdr = NULL}; -+ -+ ret = init_message(&msg, adapter, process, sizeof(*command)); -+ if (ret) -+ goto cleanup; -+ command = (void *)msg.msg; -+ command_vgpu_to_host_init2(&command->hdr, -+ DXGK_VMBCOMMAND_FLUSHHEAPTRANSITIONS, -+ process->host_handle); -+ ret = dxgvmb_send_sync_msg_ntstatus(msg.channel, msg.hdr, msg.size); -+cleanup: -+ free_message(&msg, process); -+ if (ret) -+ DXG_TRACE("err: %d", ret); -+ return ret; -+} -+ - int dxgvmb_send_query_alloc_residency(struct dxgprocess *process, - struct dxgadapter *adapter, - struct d3dkmt_queryallocationresidency -diff --git a/drivers/hv/dxgkrnl/dxgvmbus.h b/drivers/hv/dxgkrnl/dxgvmbus.h -index 111111111111..222222222222 100644 ---- a/drivers/hv/dxgkrnl/dxgvmbus.h -+++ b/drivers/hv/dxgkrnl/dxgvmbus.h -@@ -367,6 +367,11 @@ struct dxgkvmb_command_submitcommandtohwqueue { - /* PrivateDriverData */ - }; - -+/* Returns ntstatus */ -+struct dxgkvmb_command_flushheaptransitions { -+ struct dxgkvmb_command_vgpu_to_host hdr; -+}; -+ - struct dxgkvmb_command_createallocation_allocinfo { - u32 flags; - u32 priv_drv_data_size; -diff --git a/drivers/hv/dxgkrnl/ioctl.c b/drivers/hv/dxgkrnl/ioctl.c -index 111111111111..222222222222 100644 ---- a/drivers/hv/dxgkrnl/ioctl.c -+++ b/drivers/hv/dxgkrnl/ioctl.c -@@ -3500,6 +3500,53 @@ dxgkio_change_vidmem_reservation(struct dxgprocess *process, void *__user inargs - return ret; - } - -+static int -+dxgkio_flush_heap_transitions(struct dxgprocess *process, void *__user inargs) -+{ -+ struct d3dkmt_flushheaptransitions args; -+ int ret; -+ struct dxgadapter *adapter = NULL; -+ bool adapter_locked = false; -+ -+ ret = copy_from_user(&args, inargs, sizeof(args)); -+ if (ret) { -+ DXG_ERR("failed to copy input args"); -+ ret = -EINVAL; -+ goto cleanup; -+ } -+ -+ adapter = dxgprocess_adapter_by_handle(process, args.adapter); -+ if (adapter == NULL) { -+ ret = -EINVAL; -+ goto cleanup; -+ } -+ -+ ret = dxgadapter_acquire_lock_shared(adapter); -+ if (ret < 0) { -+ adapter = NULL; -+ goto cleanup; -+ } -+ adapter_locked = true; -+ -+ args.adapter = adapter->host_handle; -+ ret = dxgvmb_send_flush_heap_transitions(process, adapter, &args); -+ if (ret < 0) -+ goto cleanup; -+ ret = copy_to_user(inargs, &args, sizeof(args)); -+ if (ret) { -+ DXG_ERR("failed to copy output args"); -+ ret = -EINVAL; -+ } -+ -+cleanup: -+ -+ if (adapter_locked) -+ dxgadapter_release_lock_shared(adapter); -+ if (adapter) -+ kref_put(&adapter->adapter_kref, dxgadapter_release); -+ return ret; -+} -+ - static int - dxgkio_get_device_state(struct dxgprocess *process, void *__user inargs) - { -@@ -4262,7 +4309,7 @@ static struct ioctl_desc ioctls[] = { - /* 0x1c */ {dxgkio_destroy_paging_queue, LX_DXDESTROYPAGINGQUEUE}, - /* 0x1d */ {dxgkio_destroy_sync_object, LX_DXDESTROYSYNCHRONIZATIONOBJECT}, - /* 0x1e */ {}, --/* 0x1f */ {}, -+/* 0x1f */ {dxgkio_flush_heap_transitions, LX_DXFLUSHHEAPTRANSITIONS}, - /* 0x20 */ {}, - /* 0x21 */ {}, - /* 0x22 */ {}, -diff --git a/include/uapi/misc/d3dkmthk.h b/include/uapi/misc/d3dkmthk.h -index 111111111111..222222222222 100644 ---- a/include/uapi/misc/d3dkmthk.h -+++ b/include/uapi/misc/d3dkmthk.h -@@ -936,6 +936,10 @@ struct d3dkmt_queryadapterinfo { - __u32 private_data_size; - }; - -+struct d3dkmt_flushheaptransitions { -+ struct d3dkmthandle adapter; -+}; -+ - struct d3dddi_openallocationinfo2 { - struct d3dkmthandle allocation; - #ifdef __KERNEL__ -@@ -1228,6 +1232,8 @@ struct d3dkmt_shareobjectwithhost { - _IOWR(0x47, 0x19, struct d3dkmt_destroydevice) - #define LX_DXDESTROYSYNCHRONIZATIONOBJECT \ - _IOWR(0x47, 0x1d, struct d3dkmt_destroysynchronizationobject) -+#define LX_DXFLUSHHEAPTRANSITIONS \ -+ _IOWR(0x47, 0x1f, struct d3dkmt_flushheaptransitions) - #define LX_DXLOCK2 \ - _IOWR(0x47, 0x25, struct d3dkmt_lock2) - #define LX_DXQUERYALLOCATIONRESIDENCY \ --- -Armbian - diff --git a/patch/kernel/archive/wsl2-arm64-6.6/1687-drivers-hv-dxgkrnl-Query-video-memory-information.patch b/patch/kernel/archive/wsl2-arm64-6.6/1687-drivers-hv-dxgkrnl-Query-video-memory-information.patch deleted file mode 100644 index beff76cde6d8..000000000000 --- a/patch/kernel/archive/wsl2-arm64-6.6/1687-drivers-hv-dxgkrnl-Query-video-memory-information.patch +++ /dev/null @@ -1,237 +0,0 @@ -From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001 -From: Iouri Tarassov -Date: Tue, 8 Feb 2022 18:34:07 -0800 -Subject: drivers: hv: dxgkrnl: Query video memory information - -Implement the ioctl to query video memory information from the host -(LX_DXQUERYVIDEOMEMORYINFO). - -Signed-off-by: Iouri Tarassov -[kms: Forward port to v6.1] -Signed-off-by: Kelsey Steele ---- - drivers/hv/dxgkrnl/dxgkrnl.h | 5 + - drivers/hv/dxgkrnl/dxgvmbus.c | 64 ++++++++++ - drivers/hv/dxgkrnl/dxgvmbus.h | 14 ++ - drivers/hv/dxgkrnl/ioctl.c | 50 +++++++- - include/uapi/misc/d3dkmthk.h | 13 ++ - 5 files changed, 145 insertions(+), 1 deletion(-) - -diff --git a/drivers/hv/dxgkrnl/dxgkrnl.h b/drivers/hv/dxgkrnl/dxgkrnl.h -index 111111111111..222222222222 100644 ---- a/drivers/hv/dxgkrnl/dxgkrnl.h -+++ b/drivers/hv/dxgkrnl/dxgkrnl.h -@@ -894,6 +894,11 @@ int dxgvmb_send_query_alloc_residency(struct dxgprocess *process, - struct dxgadapter *adapter, - struct d3dkmt_queryallocationresidency - *args); -+int dxgvmb_send_query_vidmem_info(struct dxgprocess *process, -+ struct dxgadapter *adapter, -+ struct d3dkmt_queryvideomemoryinfo *args, -+ struct d3dkmt_queryvideomemoryinfo -+ *__user iargs); - int dxgvmb_send_get_device_state(struct dxgprocess *process, - struct dxgadapter *adapter, - struct d3dkmt_getdevicestate *args, -diff --git a/drivers/hv/dxgkrnl/dxgvmbus.c b/drivers/hv/dxgkrnl/dxgvmbus.c -index 111111111111..222222222222 100644 ---- a/drivers/hv/dxgkrnl/dxgvmbus.c -+++ b/drivers/hv/dxgkrnl/dxgvmbus.c -@@ -1925,6 +1925,70 @@ int dxgvmb_send_query_alloc_residency(struct dxgprocess *process, - return ret; - } - -+int dxgvmb_send_query_vidmem_info(struct dxgprocess *process, -+ struct dxgadapter *adapter, -+ struct d3dkmt_queryvideomemoryinfo *args, -+ struct d3dkmt_queryvideomemoryinfo *__user -+ output) -+{ -+ int ret; -+ struct dxgkvmb_command_queryvideomemoryinfo *command; -+ struct dxgkvmb_command_queryvideomemoryinfo_return result = { }; -+ struct dxgvmbusmsg msg = {.hdr = NULL}; -+ -+ ret = init_message(&msg, adapter, process, sizeof(*command)); -+ if (ret) -+ goto cleanup; -+ command = (void *)msg.msg; -+ command_vgpu_to_host_init2(&command->hdr, -+ dxgk_vmbcommand_queryvideomemoryinfo, -+ process->host_handle); -+ command->adapter = args->adapter; -+ command->memory_segment_group = args->memory_segment_group; -+ command->physical_adapter_index = args->physical_adapter_index; -+ -+ ret = dxgvmb_send_sync_msg(msg.channel, msg.hdr, msg.size, -+ &result, sizeof(result)); -+ if (ret < 0) -+ goto cleanup; -+ -+ ret = copy_to_user(&output->budget, &result.budget, -+ sizeof(output->budget)); -+ if (ret) { -+ pr_err("%s failed to copy budget", __func__); -+ ret = -EINVAL; -+ goto cleanup; -+ } -+ ret = copy_to_user(&output->current_usage, &result.current_usage, -+ sizeof(output->current_usage)); -+ if (ret) { -+ pr_err("%s failed to copy current usage", __func__); -+ ret = -EINVAL; -+ goto cleanup; -+ } -+ ret = copy_to_user(&output->current_reservation, -+ &result.current_reservation, -+ sizeof(output->current_reservation)); -+ if (ret) { -+ pr_err("%s failed to copy reservation", __func__); -+ ret = -EINVAL; -+ goto cleanup; -+ } -+ ret = copy_to_user(&output->available_for_reservation, -+ &result.available_for_reservation, -+ sizeof(output->available_for_reservation)); -+ if (ret) { -+ pr_err("%s failed to copy avail reservation", __func__); -+ ret = -EINVAL; -+ } -+ -+cleanup: -+ free_message(&msg, process); -+ if (ret) -+ dev_dbg(DXGDEV, "err: %d", ret); -+ return ret; -+} -+ - int dxgvmb_send_get_device_state(struct dxgprocess *process, - struct dxgadapter *adapter, - struct d3dkmt_getdevicestate *args, -diff --git a/drivers/hv/dxgkrnl/dxgvmbus.h b/drivers/hv/dxgkrnl/dxgvmbus.h -index 111111111111..222222222222 100644 ---- a/drivers/hv/dxgkrnl/dxgvmbus.h -+++ b/drivers/hv/dxgkrnl/dxgvmbus.h -@@ -664,6 +664,20 @@ struct dxgkvmb_command_queryallocationresidency_return { - /* d3dkmt_allocationresidencystatus[NumAllocations] */ - }; - -+struct dxgkvmb_command_queryvideomemoryinfo { -+ struct dxgkvmb_command_vgpu_to_host hdr; -+ struct d3dkmthandle adapter; -+ enum d3dkmt_memory_segment_group memory_segment_group; -+ u32 physical_adapter_index; -+}; -+ -+struct dxgkvmb_command_queryvideomemoryinfo_return { -+ u64 budget; -+ u64 current_usage; -+ u64 current_reservation; -+ u64 available_for_reservation; -+}; -+ - struct dxgkvmb_command_getdevicestate { - struct dxgkvmb_command_vgpu_to_host hdr; - struct d3dkmt_getdevicestate args; -diff --git a/drivers/hv/dxgkrnl/ioctl.c b/drivers/hv/dxgkrnl/ioctl.c -index 111111111111..222222222222 100644 ---- a/drivers/hv/dxgkrnl/ioctl.c -+++ b/drivers/hv/dxgkrnl/ioctl.c -@@ -3547,6 +3547,54 @@ dxgkio_flush_heap_transitions(struct dxgprocess *process, void *__user inargs) - return ret; - } - -+static int -+dxgkio_query_vidmem_info(struct dxgprocess *process, void *__user inargs) -+{ -+ struct d3dkmt_queryvideomemoryinfo args; -+ int ret; -+ struct dxgadapter *adapter = NULL; -+ bool adapter_locked = false; -+ -+ ret = copy_from_user(&args, inargs, sizeof(args)); -+ if (ret) { -+ DXG_ERR("failed to copy input args"); -+ ret = -EINVAL; -+ goto cleanup; -+ } -+ -+ if (args.process != 0) { -+ DXG_ERR("query vidmem info from another process"); -+ ret = -EINVAL; -+ goto cleanup; -+ } -+ -+ adapter = dxgprocess_adapter_by_handle(process, args.adapter); -+ if (adapter == NULL) { -+ ret = -EINVAL; -+ goto cleanup; -+ } -+ -+ ret = dxgadapter_acquire_lock_shared(adapter); -+ if (ret < 0) { -+ adapter = NULL; -+ goto cleanup; -+ } -+ adapter_locked = true; -+ -+ args.adapter = adapter->host_handle; -+ ret = dxgvmb_send_query_vidmem_info(process, adapter, &args, inargs); -+ -+cleanup: -+ -+ if (adapter_locked) -+ dxgadapter_release_lock_shared(adapter); -+ if (adapter) -+ kref_put(&adapter->adapter_kref, dxgadapter_release); -+ if (ret < 0) -+ DXG_ERR("failed: %x", ret); -+ return ret; -+} -+ - static int - dxgkio_get_device_state(struct dxgprocess *process, void *__user inargs) - { -@@ -4287,7 +4335,7 @@ static struct ioctl_desc ioctls[] = { - /* 0x07 */ {dxgkio_create_paging_queue, LX_DXCREATEPAGINGQUEUE}, - /* 0x08 */ {}, - /* 0x09 */ {dxgkio_query_adapter_info, LX_DXQUERYADAPTERINFO}, --/* 0x0a */ {}, -+/* 0x0a */ {dxgkio_query_vidmem_info, LX_DXQUERYVIDEOMEMORYINFO}, - /* 0x0b */ {}, - /* 0x0c */ {}, - /* 0x0d */ {}, -diff --git a/include/uapi/misc/d3dkmthk.h b/include/uapi/misc/d3dkmthk.h -index 111111111111..222222222222 100644 ---- a/include/uapi/misc/d3dkmthk.h -+++ b/include/uapi/misc/d3dkmthk.h -@@ -897,6 +897,17 @@ enum d3dkmt_memory_segment_group { - _D3DKMT_MEMORY_SEGMENT_GROUP_NON_LOCAL = 1 - }; - -+struct d3dkmt_queryvideomemoryinfo { -+ __u64 process; -+ struct d3dkmthandle adapter; -+ enum d3dkmt_memory_segment_group memory_segment_group; -+ __u64 budget; -+ __u64 current_usage; -+ __u64 current_reservation; -+ __u64 available_for_reservation; -+ __u32 physical_adapter_index; -+}; -+ - struct d3dkmt_adaptertype { - union { - struct { -@@ -1204,6 +1215,8 @@ struct d3dkmt_shareobjectwithhost { - _IOWR(0x47, 0x07, struct d3dkmt_createpagingqueue) - #define LX_DXQUERYADAPTERINFO \ - _IOWR(0x47, 0x09, struct d3dkmt_queryadapterinfo) -+#define LX_DXQUERYVIDEOMEMORYINFO \ -+ _IOWR(0x47, 0x0a, struct d3dkmt_queryvideomemoryinfo) - #define LX_DXGETDEVICESTATE \ - _IOWR(0x47, 0x0e, struct d3dkmt_getdevicestate) - #define LX_DXSUBMITCOMMAND \ --- -Armbian - diff --git a/patch/kernel/archive/wsl2-arm64-6.6/1688-drivers-hv-dxgkrnl-The-escape-ioctl.patch b/patch/kernel/archive/wsl2-arm64-6.6/1688-drivers-hv-dxgkrnl-The-escape-ioctl.patch deleted file mode 100644 index ecb89843272b..000000000000 --- a/patch/kernel/archive/wsl2-arm64-6.6/1688-drivers-hv-dxgkrnl-The-escape-ioctl.patch +++ /dev/null @@ -1,305 +0,0 @@ -From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001 -From: Iouri Tarassov -Date: Tue, 18 Jan 2022 15:50:30 -0800 -Subject: drivers: hv: dxgkrnl: The escape ioctl - -Implement the escape ioctl (LX_DXESCAPE). - -This ioctl is used to send/receive private data between user mode -compute device driver (guest) and kernel mode compute device -driver (host). It allows the user mode driver to extend the virtual -compute device API. - -Signed-off-by: Iouri Tarassov -[kms: Forward port to v6.1] -Signed-off-by: Kelsey Steele ---- - drivers/hv/dxgkrnl/dxgkrnl.h | 3 + - drivers/hv/dxgkrnl/dxgvmbus.c | 75 +++++++++- - drivers/hv/dxgkrnl/dxgvmbus.h | 12 ++ - drivers/hv/dxgkrnl/ioctl.c | 42 +++++- - include/uapi/misc/d3dkmthk.h | 41 +++++ - 5 files changed, 167 insertions(+), 6 deletions(-) - -diff --git a/drivers/hv/dxgkrnl/dxgkrnl.h b/drivers/hv/dxgkrnl/dxgkrnl.h -index 111111111111..222222222222 100644 ---- a/drivers/hv/dxgkrnl/dxgkrnl.h -+++ b/drivers/hv/dxgkrnl/dxgkrnl.h -@@ -894,6 +894,9 @@ int dxgvmb_send_query_alloc_residency(struct dxgprocess *process, - struct dxgadapter *adapter, - struct d3dkmt_queryallocationresidency - *args); -+int dxgvmb_send_escape(struct dxgprocess *process, -+ struct dxgadapter *adapter, -+ struct d3dkmt_escape *args); - int dxgvmb_send_query_vidmem_info(struct dxgprocess *process, - struct dxgadapter *adapter, - struct d3dkmt_queryvideomemoryinfo *args, -diff --git a/drivers/hv/dxgkrnl/dxgvmbus.c b/drivers/hv/dxgkrnl/dxgvmbus.c -index 111111111111..222222222222 100644 ---- a/drivers/hv/dxgkrnl/dxgvmbus.c -+++ b/drivers/hv/dxgkrnl/dxgvmbus.c -@@ -1925,6 +1925,70 @@ int dxgvmb_send_query_alloc_residency(struct dxgprocess *process, - return ret; - } - -+int dxgvmb_send_escape(struct dxgprocess *process, -+ struct dxgadapter *adapter, -+ struct d3dkmt_escape *args) -+{ -+ int ret; -+ struct dxgkvmb_command_escape *command = NULL; -+ u32 cmd_size = sizeof(*command); -+ struct dxgvmbusmsg msg = {.hdr = NULL}; -+ -+ if (args->priv_drv_data_size > DXG_MAX_VM_BUS_PACKET_SIZE) { -+ ret = -EINVAL; -+ goto cleanup; -+ } -+ -+ cmd_size = cmd_size - sizeof(args->priv_drv_data[0]) + -+ args->priv_drv_data_size; -+ -+ ret = init_message(&msg, adapter, process, cmd_size); -+ if (ret) -+ goto cleanup; -+ command = (void *)msg.msg; -+ command_vgpu_to_host_init2(&command->hdr, -+ DXGK_VMBCOMMAND_ESCAPE, -+ process->host_handle); -+ command->adapter = args->adapter; -+ command->device = args->device; -+ command->type = args->type; -+ command->flags = args->flags; -+ command->priv_drv_data_size = args->priv_drv_data_size; -+ command->context = args->context; -+ if (args->priv_drv_data_size) { -+ ret = copy_from_user(command->priv_drv_data, -+ args->priv_drv_data, -+ args->priv_drv_data_size); -+ if (ret) { -+ DXG_ERR("failed to copy priv data"); -+ ret = -EINVAL; -+ goto cleanup; -+ } -+ } -+ -+ ret = dxgvmb_send_sync_msg(msg.channel, msg.hdr, msg.size, -+ command->priv_drv_data, -+ args->priv_drv_data_size); -+ if (ret < 0) -+ goto cleanup; -+ -+ if (args->priv_drv_data_size) { -+ ret = copy_to_user(args->priv_drv_data, -+ command->priv_drv_data, -+ args->priv_drv_data_size); -+ if (ret) { -+ DXG_ERR("failed to copy priv data"); -+ ret = -EINVAL; -+ } -+ } -+ -+cleanup: -+ free_message(&msg, process); -+ if (ret) -+ DXG_TRACE("err: %d", ret); -+ return ret; -+} -+ - int dxgvmb_send_query_vidmem_info(struct dxgprocess *process, - struct dxgadapter *adapter, - struct d3dkmt_queryvideomemoryinfo *args, -@@ -1955,14 +2019,14 @@ int dxgvmb_send_query_vidmem_info(struct dxgprocess *process, - ret = copy_to_user(&output->budget, &result.budget, - sizeof(output->budget)); - if (ret) { -- pr_err("%s failed to copy budget", __func__); -+ DXG_ERR("failed to copy budget"); - ret = -EINVAL; - goto cleanup; - } - ret = copy_to_user(&output->current_usage, &result.current_usage, - sizeof(output->current_usage)); - if (ret) { -- pr_err("%s failed to copy current usage", __func__); -+ DXG_ERR("failed to copy current usage"); - ret = -EINVAL; - goto cleanup; - } -@@ -1970,7 +2034,7 @@ int dxgvmb_send_query_vidmem_info(struct dxgprocess *process, - &result.current_reservation, - sizeof(output->current_reservation)); - if (ret) { -- pr_err("%s failed to copy reservation", __func__); -+ DXG_ERR("failed to copy reservation"); - ret = -EINVAL; - goto cleanup; - } -@@ -1978,14 +2042,14 @@ int dxgvmb_send_query_vidmem_info(struct dxgprocess *process, - &result.available_for_reservation, - sizeof(output->available_for_reservation)); - if (ret) { -- pr_err("%s failed to copy avail reservation", __func__); -+ DXG_ERR("failed to copy avail reservation"); - ret = -EINVAL; - } - - cleanup: - free_message(&msg, process); - if (ret) -- dev_dbg(DXGDEV, "err: %d", ret); -+ DXG_TRACE("err: %d", ret); - return ret; - } - -@@ -3152,3 +3216,4 @@ int dxgvmb_send_submit_command_hwqueue(struct dxgprocess *process, - DXG_TRACE("err: %d", ret); - return ret; - } -+ -diff --git a/drivers/hv/dxgkrnl/dxgvmbus.h b/drivers/hv/dxgkrnl/dxgvmbus.h -index 111111111111..222222222222 100644 ---- a/drivers/hv/dxgkrnl/dxgvmbus.h -+++ b/drivers/hv/dxgkrnl/dxgvmbus.h -@@ -664,6 +664,18 @@ struct dxgkvmb_command_queryallocationresidency_return { - /* d3dkmt_allocationresidencystatus[NumAllocations] */ - }; - -+/* Returns only private data */ -+struct dxgkvmb_command_escape { -+ struct dxgkvmb_command_vgpu_to_host hdr; -+ struct d3dkmthandle adapter; -+ struct d3dkmthandle device; -+ enum d3dkmt_escapetype type; -+ struct d3dddi_escapeflags flags; -+ u32 priv_drv_data_size; -+ struct d3dkmthandle context; -+ u8 priv_drv_data[1]; -+}; -+ - struct dxgkvmb_command_queryvideomemoryinfo { - struct dxgkvmb_command_vgpu_to_host hdr; - struct d3dkmthandle adapter; -diff --git a/drivers/hv/dxgkrnl/ioctl.c b/drivers/hv/dxgkrnl/ioctl.c -index 111111111111..222222222222 100644 ---- a/drivers/hv/dxgkrnl/ioctl.c -+++ b/drivers/hv/dxgkrnl/ioctl.c -@@ -3547,6 +3547,46 @@ dxgkio_flush_heap_transitions(struct dxgprocess *process, void *__user inargs) - return ret; - } - -+static int -+dxgkio_escape(struct dxgprocess *process, void *__user inargs) -+{ -+ struct d3dkmt_escape args; -+ int ret; -+ struct dxgadapter *adapter = NULL; -+ bool adapter_locked = false; -+ -+ ret = copy_from_user(&args, inargs, sizeof(args)); -+ if (ret) { -+ ret = -EINVAL; -+ goto cleanup; -+ } -+ -+ adapter = dxgprocess_adapter_by_handle(process, args.adapter); -+ if (adapter == NULL) { -+ ret = -EINVAL; -+ goto cleanup; -+ } -+ -+ ret = dxgadapter_acquire_lock_shared(adapter); -+ if (ret < 0) { -+ adapter = NULL; -+ goto cleanup; -+ } -+ adapter_locked = true; -+ -+ args.adapter = adapter->host_handle; -+ ret = dxgvmb_send_escape(process, adapter, &args); -+ -+cleanup: -+ -+ if (adapter_locked) -+ dxgadapter_release_lock_shared(adapter); -+ if (adapter) -+ kref_put(&adapter->adapter_kref, dxgadapter_release); -+ DXG_TRACE("ioctl:%s %d", errorstr(ret), ret); -+ return ret; -+} -+ - static int - dxgkio_query_vidmem_info(struct dxgprocess *process, void *__user inargs) - { -@@ -4338,7 +4378,7 @@ static struct ioctl_desc ioctls[] = { - /* 0x0a */ {dxgkio_query_vidmem_info, LX_DXQUERYVIDEOMEMORYINFO}, - /* 0x0b */ {}, - /* 0x0c */ {}, --/* 0x0d */ {}, -+/* 0x0d */ {dxgkio_escape, LX_DXESCAPE}, - /* 0x0e */ {dxgkio_get_device_state, LX_DXGETDEVICESTATE}, - /* 0x0f */ {dxgkio_submit_command, LX_DXSUBMITCOMMAND}, - /* 0x10 */ {dxgkio_create_sync_object, LX_DXCREATESYNCHRONIZATIONOBJECT}, -diff --git a/include/uapi/misc/d3dkmthk.h b/include/uapi/misc/d3dkmthk.h -index 111111111111..222222222222 100644 ---- a/include/uapi/misc/d3dkmthk.h -+++ b/include/uapi/misc/d3dkmthk.h -@@ -236,6 +236,45 @@ struct d3dddi_destroypagingqueue { - struct d3dkmthandle paging_queue; - }; - -+enum d3dkmt_escapetype { -+ _D3DKMT_ESCAPE_DRIVERPRIVATE = 0, -+ _D3DKMT_ESCAPE_VIDMM = 1, -+ _D3DKMT_ESCAPE_VIDSCH = 3, -+ _D3DKMT_ESCAPE_DEVICE = 4, -+ _D3DKMT_ESCAPE_DRT_TEST = 8, -+}; -+ -+struct d3dddi_escapeflags { -+ union { -+ struct { -+ __u32 hardware_access:1; -+ __u32 device_status_query:1; -+ __u32 change_frame_latency:1; -+ __u32 no_adapter_synchronization:1; -+ __u32 reserved:1; -+ __u32 virtual_machine_data:1; -+ __u32 driver_known_escape:1; -+ __u32 driver_common_escape:1; -+ __u32 reserved2:24; -+ }; -+ __u32 value; -+ }; -+}; -+ -+struct d3dkmt_escape { -+ struct d3dkmthandle adapter; -+ struct d3dkmthandle device; -+ enum d3dkmt_escapetype type; -+ struct d3dddi_escapeflags flags; -+#ifdef __KERNEL__ -+ void *priv_drv_data; -+#else -+ __u64 priv_drv_data; -+#endif -+ __u32 priv_drv_data_size; -+ struct d3dkmthandle context; -+}; -+ - enum dxgk_render_pipeline_stage { - _DXGK_RENDER_PIPELINE_STAGE_UNKNOWN = 0, - _DXGK_RENDER_PIPELINE_STAGE_INPUT_ASSEMBLER = 1, -@@ -1217,6 +1256,8 @@ struct d3dkmt_shareobjectwithhost { - _IOWR(0x47, 0x09, struct d3dkmt_queryadapterinfo) - #define LX_DXQUERYVIDEOMEMORYINFO \ - _IOWR(0x47, 0x0a, struct d3dkmt_queryvideomemoryinfo) -+#define LX_DXESCAPE \ -+ _IOWR(0x47, 0x0d, struct d3dkmt_escape) - #define LX_DXGETDEVICESTATE \ - _IOWR(0x47, 0x0e, struct d3dkmt_getdevicestate) - #define LX_DXSUBMITCOMMAND \ --- -Armbian - diff --git a/patch/kernel/archive/wsl2-arm64-6.6/1689-drivers-hv-dxgkrnl-Ioctl-to-put-device-to-error-state.patch b/patch/kernel/archive/wsl2-arm64-6.6/1689-drivers-hv-dxgkrnl-Ioctl-to-put-device-to-error-state.patch deleted file mode 100644 index 89911a1cfc92..000000000000 --- a/patch/kernel/archive/wsl2-arm64-6.6/1689-drivers-hv-dxgkrnl-Ioctl-to-put-device-to-error-state.patch +++ /dev/null @@ -1,180 +0,0 @@ -From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001 -From: Iouri Tarassov -Date: Wed, 9 Feb 2022 10:57:57 -0800 -Subject: drivers: hv: dxgkrnl: Ioctl to put device to error state - -Implement the ioctl to put the virtual compute device to the error -state (LX_DXMARKDEVICEASERROR). - -This ioctl is used by the user mode driver when it detects an -unrecoverable error condition. - -When a compute device is put to the error state, all subsequent -ioctl calls to the device will fail. - -Signed-off-by: Iouri Tarassov -[kms: Forward port to v6.1] -Signed-off-by: Kelsey Steele ---- - drivers/hv/dxgkrnl/dxgkrnl.h | 3 + - drivers/hv/dxgkrnl/dxgvmbus.c | 25 ++++++ - drivers/hv/dxgkrnl/dxgvmbus.h | 5 ++ - drivers/hv/dxgkrnl/ioctl.c | 38 +++++++++- - include/uapi/misc/d3dkmthk.h | 12 +++ - 5 files changed, 82 insertions(+), 1 deletion(-) - -diff --git a/drivers/hv/dxgkrnl/dxgkrnl.h b/drivers/hv/dxgkrnl/dxgkrnl.h -index 111111111111..222222222222 100644 ---- a/drivers/hv/dxgkrnl/dxgkrnl.h -+++ b/drivers/hv/dxgkrnl/dxgkrnl.h -@@ -856,6 +856,9 @@ int dxgvmb_send_update_alloc_property(struct dxgprocess *process, - struct d3dddi_updateallocproperty *args, - struct d3dddi_updateallocproperty *__user - inargs); -+int dxgvmb_send_mark_device_as_error(struct dxgprocess *process, -+ struct dxgadapter *adapter, -+ struct d3dkmt_markdeviceaserror *args); - int dxgvmb_send_set_allocation_priority(struct dxgprocess *process, - struct dxgadapter *adapter, - struct d3dkmt_setallocationpriority *a); -diff --git a/drivers/hv/dxgkrnl/dxgvmbus.c b/drivers/hv/dxgkrnl/dxgvmbus.c -index 111111111111..222222222222 100644 ---- a/drivers/hv/dxgkrnl/dxgvmbus.c -+++ b/drivers/hv/dxgkrnl/dxgvmbus.c -@@ -2730,6 +2730,31 @@ int dxgvmb_send_update_alloc_property(struct dxgprocess *process, - return ret; - } - -+int dxgvmb_send_mark_device_as_error(struct dxgprocess *process, -+ struct dxgadapter *adapter, -+ struct d3dkmt_markdeviceaserror *args) -+{ -+ struct dxgkvmb_command_markdeviceaserror *command; -+ int ret; -+ struct dxgvmbusmsg msg = {.hdr = NULL}; -+ -+ ret = init_message(&msg, adapter, process, sizeof(*command)); -+ if (ret) -+ goto cleanup; -+ command = (void *)msg.msg; -+ -+ command_vgpu_to_host_init2(&command->hdr, -+ DXGK_VMBCOMMAND_MARKDEVICEASERROR, -+ process->host_handle); -+ command->args = *args; -+ ret = dxgvmb_send_sync_msg_ntstatus(msg.channel, msg.hdr, msg.size); -+cleanup: -+ free_message(&msg, process); -+ if (ret) -+ DXG_TRACE("err: %d", ret); -+ return ret; -+} -+ - int dxgvmb_send_set_allocation_priority(struct dxgprocess *process, - struct dxgadapter *adapter, - struct d3dkmt_setallocationpriority *args) -diff --git a/drivers/hv/dxgkrnl/dxgvmbus.h b/drivers/hv/dxgkrnl/dxgvmbus.h -index 111111111111..222222222222 100644 ---- a/drivers/hv/dxgkrnl/dxgvmbus.h -+++ b/drivers/hv/dxgkrnl/dxgvmbus.h -@@ -627,6 +627,11 @@ struct dxgkvmb_command_updateallocationproperty_return { - struct ntstatus status; - }; - -+struct dxgkvmb_command_markdeviceaserror { -+ struct dxgkvmb_command_vgpu_to_host hdr; -+ struct d3dkmt_markdeviceaserror args; -+}; -+ - /* Returns ntstatus */ - struct dxgkvmb_command_changevideomemoryreservation { - struct dxgkvmb_command_vgpu_to_host hdr; -diff --git a/drivers/hv/dxgkrnl/ioctl.c b/drivers/hv/dxgkrnl/ioctl.c -index 111111111111..222222222222 100644 ---- a/drivers/hv/dxgkrnl/ioctl.c -+++ b/drivers/hv/dxgkrnl/ioctl.c -@@ -3341,6 +3341,42 @@ dxgkio_update_alloc_property(struct dxgprocess *process, void *__user inargs) - return ret; - } - -+static int -+dxgkio_mark_device_as_error(struct dxgprocess *process, void *__user inargs) -+{ -+ struct d3dkmt_markdeviceaserror args; -+ struct dxgadapter *adapter = NULL; -+ struct dxgdevice *device = NULL; -+ int ret; -+ -+ ret = copy_from_user(&args, inargs, sizeof(args)); -+ if (ret) { -+ DXG_ERR("failed to copy input args"); -+ ret = -EINVAL; -+ goto cleanup; -+ } -+ device = dxgprocess_device_by_handle(process, args.device); -+ if (device == NULL) { -+ ret = -EINVAL; -+ goto cleanup; -+ } -+ adapter = device->adapter; -+ ret = dxgadapter_acquire_lock_shared(adapter); -+ if (ret < 0) { -+ adapter = NULL; -+ goto cleanup; -+ } -+ device->execution_state = _D3DKMT_DEVICEEXECUTION_RESET; -+ ret = dxgvmb_send_mark_device_as_error(process, adapter, &args); -+cleanup: -+ if (adapter) -+ dxgadapter_release_lock_shared(adapter); -+ if (device) -+ kref_put(&device->device_kref, dxgdevice_release); -+ DXG_TRACE("ioctl:%s %d", errorstr(ret), ret); -+ return ret; -+} -+ - static int - dxgkio_query_alloc_residency(struct dxgprocess *process, void *__user inargs) - { -@@ -4404,7 +4440,7 @@ static struct ioctl_desc ioctls[] = { - /* 0x23 */ {}, - /* 0x24 */ {}, - /* 0x25 */ {dxgkio_lock2, LX_DXLOCK2}, --/* 0x26 */ {}, -+/* 0x26 */ {dxgkio_mark_device_as_error, LX_DXMARKDEVICEASERROR}, - /* 0x27 */ {}, - /* 0x28 */ {}, - /* 0x29 */ {}, -diff --git a/include/uapi/misc/d3dkmthk.h b/include/uapi/misc/d3dkmthk.h -index 111111111111..222222222222 100644 ---- a/include/uapi/misc/d3dkmthk.h -+++ b/include/uapi/misc/d3dkmthk.h -@@ -790,6 +790,16 @@ struct d3dkmt_unlock2 { - struct d3dkmthandle allocation; - }; - -+enum d3dkmt_device_error_reason { -+ _D3DKMT_DEVICE_ERROR_REASON_GENERIC = 0x80000000, -+ _D3DKMT_DEVICE_ERROR_REASON_DRIVER_ERROR = 0x80000006, -+}; -+ -+struct d3dkmt_markdeviceaserror { -+ struct d3dkmthandle device; -+ enum d3dkmt_device_error_reason reason; -+}; -+ - enum d3dkmt_standardallocationtype { - _D3DKMT_STANDARDALLOCATIONTYPE_EXISTINGHEAP = 1, - _D3DKMT_STANDARDALLOCATIONTYPE_CROSSADAPTER = 2, -@@ -1290,6 +1300,8 @@ struct d3dkmt_shareobjectwithhost { - _IOWR(0x47, 0x1f, struct d3dkmt_flushheaptransitions) - #define LX_DXLOCK2 \ - _IOWR(0x47, 0x25, struct d3dkmt_lock2) -+#define LX_DXMARKDEVICEASERROR \ -+ _IOWR(0x47, 0x26, struct d3dkmt_markdeviceaserror) - #define LX_DXQUERYALLOCATIONRESIDENCY \ - _IOWR(0x47, 0x2a, struct d3dkmt_queryallocationresidency) - #define LX_DXSETALLOCATIONPRIORITY \ --- -Armbian - diff --git a/patch/kernel/archive/wsl2-arm64-6.6/1690-drivers-hv-dxgkrnl-Ioctls-to-query-statistics-and-clock-calibration.patch b/patch/kernel/archive/wsl2-arm64-6.6/1690-drivers-hv-dxgkrnl-Ioctls-to-query-statistics-and-clock-calibration.patch deleted file mode 100644 index 61dc6cd5c752..000000000000 --- a/patch/kernel/archive/wsl2-arm64-6.6/1690-drivers-hv-dxgkrnl-Ioctls-to-query-statistics-and-clock-calibration.patch +++ /dev/null @@ -1,423 +0,0 @@ -From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001 -From: Iouri Tarassov -Date: Wed, 9 Feb 2022 11:01:57 -0800 -Subject: drivers: hv: dxgkrnl: Ioctls to query statistics and clock - calibration - -Implement ioctls to query statistics from the VGPU device -(LX_DXQUERYSTATISTICS) and to query clock calibration -(LX_DXQUERYCLOCKCALIBRATION). - -The LX_DXQUERYSTATISTICS ioctl is used to query various statistics from -the compute device on the host. - -The LX_DXQUERYCLOCKCALIBRATION ioctl queries the compute device clock -and is used for performance monitoring. - -Signed-off-by: Iouri Tarassov -[kms: Forward port to v6.1] -Signed-off-by: Kelsey Steele ---- - drivers/hv/dxgkrnl/dxgkrnl.h | 8 + - drivers/hv/dxgkrnl/dxgvmbus.c | 77 +++++++ - drivers/hv/dxgkrnl/dxgvmbus.h | 21 ++ - drivers/hv/dxgkrnl/ioctl.c | 111 +++++++++- - include/uapi/misc/d3dkmthk.h | 62 ++++++ - 5 files changed, 277 insertions(+), 2 deletions(-) - -diff --git a/drivers/hv/dxgkrnl/dxgkrnl.h b/drivers/hv/dxgkrnl/dxgkrnl.h -index 111111111111..222222222222 100644 ---- a/drivers/hv/dxgkrnl/dxgkrnl.h -+++ b/drivers/hv/dxgkrnl/dxgkrnl.h -@@ -885,6 +885,11 @@ int dxgvmb_send_query_adapter_info(struct dxgprocess *process, - int dxgvmb_send_submit_command_hwqueue(struct dxgprocess *process, - struct dxgadapter *adapter, - struct d3dkmt_submitcommandtohwqueue *a); -+int dxgvmb_send_query_clock_calibration(struct dxgprocess *process, -+ struct dxgadapter *adapter, -+ struct d3dkmt_queryclockcalibration *a, -+ struct d3dkmt_queryclockcalibration -+ *__user inargs); - int dxgvmb_send_flush_heap_transitions(struct dxgprocess *process, - struct dxgadapter *adapter, - struct d3dkmt_flushheaptransitions *arg); -@@ -929,6 +934,9 @@ int dxgvmb_send_get_stdalloc_data(struct dxgdevice *device, - void *prive_alloc_data, - u32 *res_priv_data_size, - void *priv_res_data); -+int dxgvmb_send_query_statistics(struct dxgprocess *process, -+ struct dxgadapter *adapter, -+ struct d3dkmt_querystatistics *args); - int dxgvmb_send_async_msg(struct dxgvmbuschannel *channel, - void *command, - u32 cmd_size); -diff --git a/drivers/hv/dxgkrnl/dxgvmbus.c b/drivers/hv/dxgkrnl/dxgvmbus.c -index 111111111111..222222222222 100644 ---- a/drivers/hv/dxgkrnl/dxgvmbus.c -+++ b/drivers/hv/dxgkrnl/dxgvmbus.c -@@ -1829,6 +1829,48 @@ int dxgvmb_send_destroy_allocation(struct dxgprocess *process, - return ret; - } - -+int dxgvmb_send_query_clock_calibration(struct dxgprocess *process, -+ struct dxgadapter *adapter, -+ struct d3dkmt_queryclockcalibration -+ *args, -+ struct d3dkmt_queryclockcalibration -+ *__user inargs) -+{ -+ struct dxgkvmb_command_queryclockcalibration *command; -+ struct dxgkvmb_command_queryclockcalibration_return result; -+ int ret; -+ struct dxgvmbusmsg msg = {.hdr = NULL}; -+ -+ ret = init_message(&msg, adapter, process, sizeof(*command)); -+ if (ret) -+ goto cleanup; -+ command = (void *)msg.msg; -+ -+ command_vgpu_to_host_init2(&command->hdr, -+ DXGK_VMBCOMMAND_QUERYCLOCKCALIBRATION, -+ process->host_handle); -+ command->args = *args; -+ -+ ret = dxgvmb_send_sync_msg(msg.channel, msg.hdr, msg.size, -+ &result, sizeof(result)); -+ if (ret < 0) -+ goto cleanup; -+ ret = copy_to_user(&inargs->clock_data, &result.clock_data, -+ sizeof(result.clock_data)); -+ if (ret) { -+ pr_err("%s failed to copy clock data", __func__); -+ ret = -EINVAL; -+ goto cleanup; -+ } -+ ret = ntstatus2int(result.status); -+ -+cleanup: -+ free_message(&msg, process); -+ if (ret) -+ DXG_TRACE("err: %d", ret); -+ return ret; -+} -+ - int dxgvmb_send_flush_heap_transitions(struct dxgprocess *process, - struct dxgadapter *adapter, - struct d3dkmt_flushheaptransitions *args) -@@ -3242,3 +3284,38 @@ int dxgvmb_send_submit_command_hwqueue(struct dxgprocess *process, - return ret; - } - -+int dxgvmb_send_query_statistics(struct dxgprocess *process, -+ struct dxgadapter *adapter, -+ struct d3dkmt_querystatistics *args) -+{ -+ struct dxgkvmb_command_querystatistics *command; -+ struct dxgkvmb_command_querystatistics_return *result; -+ int ret; -+ struct dxgvmbusmsgres msg = {.hdr = NULL}; -+ -+ ret = init_message_res(&msg, adapter, process, sizeof(*command), -+ sizeof(*result)); -+ if (ret) -+ goto cleanup; -+ command = msg.msg; -+ result = msg.res; -+ -+ command_vgpu_to_host_init2(&command->hdr, -+ DXGK_VMBCOMMAND_QUERYSTATISTICS, -+ process->host_handle); -+ command->args = *args; -+ -+ ret = dxgvmb_send_sync_msg(msg.channel, msg.hdr, msg.size, -+ result, msg.res_size); -+ if (ret < 0) -+ goto cleanup; -+ -+ args->result = result->result; -+ ret = ntstatus2int(result->status); -+ -+cleanup: -+ free_message((struct dxgvmbusmsg *)&msg, process); -+ if (ret) -+ DXG_TRACE("err: %d", ret); -+ return ret; -+} -diff --git a/drivers/hv/dxgkrnl/dxgvmbus.h b/drivers/hv/dxgkrnl/dxgvmbus.h -index 111111111111..222222222222 100644 ---- a/drivers/hv/dxgkrnl/dxgvmbus.h -+++ b/drivers/hv/dxgkrnl/dxgvmbus.h -@@ -372,6 +372,16 @@ struct dxgkvmb_command_flushheaptransitions { - struct dxgkvmb_command_vgpu_to_host hdr; - }; - -+struct dxgkvmb_command_queryclockcalibration { -+ struct dxgkvmb_command_vgpu_to_host hdr; -+ struct d3dkmt_queryclockcalibration args; -+}; -+ -+struct dxgkvmb_command_queryclockcalibration_return { -+ struct ntstatus status; -+ struct dxgk_gpuclockdata clock_data; -+}; -+ - struct dxgkvmb_command_createallocation_allocinfo { - u32 flags; - u32 priv_drv_data_size; -@@ -408,6 +418,17 @@ struct dxgkvmb_command_openresource_return { - /* struct d3dkmthandle allocation[allocation_count]; */ - }; - -+struct dxgkvmb_command_querystatistics { -+ struct dxgkvmb_command_vgpu_to_host hdr; -+ struct d3dkmt_querystatistics args; -+}; -+ -+struct dxgkvmb_command_querystatistics_return { -+ struct ntstatus status; -+ u32 reserved; -+ struct d3dkmt_querystatistics_result result; -+}; -+ - struct dxgkvmb_command_getstandardallocprivdata { - struct dxgkvmb_command_vgpu_to_host hdr; - enum d3dkmdt_standardallocationtype alloc_type; -diff --git a/drivers/hv/dxgkrnl/ioctl.c b/drivers/hv/dxgkrnl/ioctl.c -index 111111111111..222222222222 100644 ---- a/drivers/hv/dxgkrnl/ioctl.c -+++ b/drivers/hv/dxgkrnl/ioctl.c -@@ -149,6 +149,65 @@ static int dxgkio_open_adapter_from_luid(struct dxgprocess *process, - return ret; - } - -+static int dxgkio_query_statistics(struct dxgprocess *process, -+ void __user *inargs) -+{ -+ struct d3dkmt_querystatistics *args; -+ int ret; -+ struct dxgadapter *entry; -+ struct dxgadapter *adapter = NULL; -+ struct winluid tmp; -+ struct dxgglobal *dxgglobal = dxggbl(); -+ -+ args = vzalloc(sizeof(struct d3dkmt_querystatistics)); -+ if (args == NULL) { -+ ret = -ENOMEM; -+ goto cleanup; -+ } -+ -+ ret = copy_from_user(args, inargs, sizeof(*args)); -+ if (ret) { -+ DXG_ERR("failed to copy input args"); -+ ret = -EINVAL; -+ goto cleanup; -+ } -+ -+ dxgglobal_acquire_adapter_list_lock(DXGLOCK_SHARED); -+ list_for_each_entry(entry, &dxgglobal->adapter_list_head, -+ adapter_list_entry) { -+ if (dxgadapter_acquire_lock_shared(entry) == 0) { -+ if (*(u64 *) &entry->luid == -+ *(u64 *) &args->adapter_luid) { -+ adapter = entry; -+ break; -+ } -+ dxgadapter_release_lock_shared(entry); -+ } -+ } -+ dxgglobal_release_adapter_list_lock(DXGLOCK_SHARED); -+ if (adapter) { -+ tmp = args->adapter_luid; -+ args->adapter_luid = adapter->host_adapter_luid; -+ ret = dxgvmb_send_query_statistics(process, adapter, args); -+ if (ret >= 0) { -+ args->adapter_luid = tmp; -+ ret = copy_to_user(inargs, args, sizeof(*args)); -+ if (ret) { -+ DXG_ERR("failed to copy args"); -+ ret = -EINVAL; -+ } -+ } -+ dxgadapter_release_lock_shared(adapter); -+ } -+ -+cleanup: -+ if (args) -+ vfree(args); -+ -+ DXG_TRACE("ioctl:%s %d", errorstr(ret), ret); -+ return ret; -+} -+ - static int - dxgkp_enum_adapters(struct dxgprocess *process, - union d3dkmt_enumadapters_filter filter, -@@ -3536,6 +3595,54 @@ dxgkio_change_vidmem_reservation(struct dxgprocess *process, void *__user inargs - return ret; - } - -+static int -+dxgkio_query_clock_calibration(struct dxgprocess *process, void *__user inargs) -+{ -+ struct d3dkmt_queryclockcalibration args; -+ int ret; -+ struct dxgadapter *adapter = NULL; -+ bool adapter_locked = false; -+ -+ ret = copy_from_user(&args, inargs, sizeof(args)); -+ if (ret) { -+ DXG_ERR("failed to copy input args"); -+ ret = -EINVAL; -+ goto cleanup; -+ } -+ -+ adapter = dxgprocess_adapter_by_handle(process, args.adapter); -+ if (adapter == NULL) { -+ ret = -EINVAL; -+ goto cleanup; -+ } -+ -+ ret = dxgadapter_acquire_lock_shared(adapter); -+ if (ret < 0) { -+ adapter = NULL; -+ goto cleanup; -+ } -+ adapter_locked = true; -+ -+ args.adapter = adapter->host_handle; -+ ret = dxgvmb_send_query_clock_calibration(process, adapter, -+ &args, inargs); -+ if (ret < 0) -+ goto cleanup; -+ ret = copy_to_user(inargs, &args, sizeof(args)); -+ if (ret) { -+ DXG_ERR("failed to copy output args"); -+ ret = -EINVAL; -+ } -+ -+cleanup: -+ -+ if (adapter_locked) -+ dxgadapter_release_lock_shared(adapter); -+ if (adapter) -+ kref_put(&adapter->adapter_kref, dxgadapter_release); -+ return ret; -+} -+ - static int - dxgkio_flush_heap_transitions(struct dxgprocess *process, void *__user inargs) - { -@@ -4470,14 +4577,14 @@ static struct ioctl_desc ioctls[] = { - /* 0x3b */ {dxgkio_wait_sync_object_gpu, - LX_DXWAITFORSYNCHRONIZATIONOBJECTFROMGPU}, - /* 0x3c */ {dxgkio_get_allocation_priority, LX_DXGETALLOCATIONPRIORITY}, --/* 0x3d */ {}, -+/* 0x3d */ {dxgkio_query_clock_calibration, LX_DXQUERYCLOCKCALIBRATION}, - /* 0x3e */ {dxgkio_enum_adapters3, LX_DXENUMADAPTERS3}, - /* 0x3f */ {dxgkio_share_objects, LX_DXSHAREOBJECTS}, - /* 0x40 */ {dxgkio_open_sync_object_nt, LX_DXOPENSYNCOBJECTFROMNTHANDLE2}, - /* 0x41 */ {dxgkio_query_resource_info_nt, - LX_DXQUERYRESOURCEINFOFROMNTHANDLE}, - /* 0x42 */ {dxgkio_open_resource_nt, LX_DXOPENRESOURCEFROMNTHANDLE}, --/* 0x43 */ {}, -+/* 0x43 */ {dxgkio_query_statistics, LX_DXQUERYSTATISTICS}, - /* 0x44 */ {dxgkio_share_object_with_host, LX_DXSHAREOBJECTWITHHOST}, - /* 0x45 */ {}, - }; -diff --git a/include/uapi/misc/d3dkmthk.h b/include/uapi/misc/d3dkmthk.h -index 111111111111..222222222222 100644 ---- a/include/uapi/misc/d3dkmthk.h -+++ b/include/uapi/misc/d3dkmthk.h -@@ -996,6 +996,34 @@ struct d3dkmt_queryadapterinfo { - __u32 private_data_size; - }; - -+#pragma pack(push, 1) -+ -+struct dxgk_gpuclockdata_flags { -+ union { -+ struct { -+ __u32 context_management_processor:1; -+ __u32 reserved:31; -+ }; -+ __u32 value; -+ }; -+}; -+ -+struct dxgk_gpuclockdata { -+ __u64 gpu_frequency; -+ __u64 gpu_clock_counter; -+ __u64 cpu_clock_counter; -+ struct dxgk_gpuclockdata_flags flags; -+} __packed; -+ -+struct d3dkmt_queryclockcalibration { -+ struct d3dkmthandle adapter; -+ __u32 node_ordinal; -+ __u32 physical_adapter_index; -+ struct dxgk_gpuclockdata clock_data; -+}; -+ -+#pragma pack(pop) -+ - struct d3dkmt_flushheaptransitions { - struct d3dkmthandle adapter; - }; -@@ -1238,6 +1266,36 @@ struct d3dkmt_enumadapters3 { - #endif - }; - -+enum d3dkmt_querystatistics_type { -+ _D3DKMT_QUERYSTATISTICS_ADAPTER = 0, -+ _D3DKMT_QUERYSTATISTICS_PROCESS = 1, -+ _D3DKMT_QUERYSTATISTICS_PROCESS_ADAPTER = 2, -+ _D3DKMT_QUERYSTATISTICS_SEGMENT = 3, -+ _D3DKMT_QUERYSTATISTICS_PROCESS_SEGMENT = 4, -+ _D3DKMT_QUERYSTATISTICS_NODE = 5, -+ _D3DKMT_QUERYSTATISTICS_PROCESS_NODE = 6, -+ _D3DKMT_QUERYSTATISTICS_VIDPNSOURCE = 7, -+ _D3DKMT_QUERYSTATISTICS_PROCESS_VIDPNSOURCE = 8, -+ _D3DKMT_QUERYSTATISTICS_PROCESS_SEGMENT_GROUP = 9, -+ _D3DKMT_QUERYSTATISTICS_PHYSICAL_ADAPTER = 10, -+}; -+ -+struct d3dkmt_querystatistics_result { -+ char size[0x308]; -+}; -+ -+struct d3dkmt_querystatistics { -+ union { -+ struct { -+ enum d3dkmt_querystatistics_type type; -+ struct winluid adapter_luid; -+ __u64 process; -+ struct d3dkmt_querystatistics_result result; -+ }; -+ char size[0x328]; -+ }; -+}; -+ - struct d3dkmt_shareobjectwithhost { - struct d3dkmthandle device_handle; - struct d3dkmthandle object_handle; -@@ -1328,6 +1386,8 @@ struct d3dkmt_shareobjectwithhost { - _IOWR(0x47, 0x3b, struct d3dkmt_waitforsynchronizationobjectfromgpu) - #define LX_DXGETALLOCATIONPRIORITY \ - _IOWR(0x47, 0x3c, struct d3dkmt_getallocationpriority) -+#define LX_DXQUERYCLOCKCALIBRATION \ -+ _IOWR(0x47, 0x3d, struct d3dkmt_queryclockcalibration) - #define LX_DXENUMADAPTERS3 \ - _IOWR(0x47, 0x3e, struct d3dkmt_enumadapters3) - #define LX_DXSHAREOBJECTS \ -@@ -1338,6 +1398,8 @@ struct d3dkmt_shareobjectwithhost { - _IOWR(0x47, 0x41, struct d3dkmt_queryresourceinfofromnthandle) - #define LX_DXOPENRESOURCEFROMNTHANDLE \ - _IOWR(0x47, 0x42, struct d3dkmt_openresourcefromnthandle) -+#define LX_DXQUERYSTATISTICS \ -+ _IOWR(0x47, 0x43, struct d3dkmt_querystatistics) - #define LX_DXSHAREOBJECTWITHHOST \ - _IOWR(0x47, 0x44, struct d3dkmt_shareobjectwithhost) - --- -Armbian - diff --git a/patch/kernel/archive/wsl2-arm64-6.6/1691-drivers-hv-dxgkrnl-Offer-and-reclaim-allocations.patch b/patch/kernel/archive/wsl2-arm64-6.6/1691-drivers-hv-dxgkrnl-Offer-and-reclaim-allocations.patch deleted file mode 100644 index 87535045c65f..000000000000 --- a/patch/kernel/archive/wsl2-arm64-6.6/1691-drivers-hv-dxgkrnl-Offer-and-reclaim-allocations.patch +++ /dev/null @@ -1,466 +0,0 @@ -From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001 -From: Iouri Tarassov -Date: Tue, 18 Jan 2022 15:01:55 -0800 -Subject: drivers: hv: dxgkrnl: Offer and reclaim allocations - -Implement ioctls to offer and reclaim compute device allocations: - - LX_DXOFFERALLOCATIONS, - - LX_DXRECLAIMALLOCATIONS2 - -When a user mode driver (UMD) does not need to access an allocation, -it can "offer" it by issuing the LX_DXOFFERALLOCATIONS ioctl. This -means that the allocation is not in use and its local device memory -could be evicted. The freed space could be given to another allocation. -When the allocation is again needed, the UMD can attempt to"reclaim" -the allocation by issuing the LX_DXRECLAIMALLOCATIONS2 ioctl. If the -allocation is still not evicted, the reclaim operation succeeds and no -other action is required. If the reclaim operation fails, the caller -must restore the content of the allocation before it can be used by -the device. - -Signed-off-by: Iouri Tarassov -[kms: Forward port to v6.1] -Signed-off-by: Kelsey Steele ---- - drivers/hv/dxgkrnl/dxgkrnl.h | 8 + - drivers/hv/dxgkrnl/dxgvmbus.c | 124 +++++++++- - drivers/hv/dxgkrnl/dxgvmbus.h | 27 ++ - drivers/hv/dxgkrnl/ioctl.c | 117 ++++++++- - include/uapi/misc/d3dkmthk.h | 67 +++++ - 5 files changed, 340 insertions(+), 3 deletions(-) - -diff --git a/drivers/hv/dxgkrnl/dxgkrnl.h b/drivers/hv/dxgkrnl/dxgkrnl.h -index 111111111111..222222222222 100644 ---- a/drivers/hv/dxgkrnl/dxgkrnl.h -+++ b/drivers/hv/dxgkrnl/dxgkrnl.h -@@ -865,6 +865,14 @@ int dxgvmb_send_set_allocation_priority(struct dxgprocess *process, - int dxgvmb_send_get_allocation_priority(struct dxgprocess *process, - struct dxgadapter *adapter, - struct d3dkmt_getallocationpriority *a); -+int dxgvmb_send_offer_allocations(struct dxgprocess *process, -+ struct dxgadapter *adapter, -+ struct d3dkmt_offerallocations *args); -+int dxgvmb_send_reclaim_allocations(struct dxgprocess *process, -+ struct dxgadapter *adapter, -+ struct d3dkmthandle device, -+ struct d3dkmt_reclaimallocations2 *args, -+ u64 __user *paging_fence_value); - int dxgvmb_send_change_vidmem_reservation(struct dxgprocess *process, - struct dxgadapter *adapter, - struct d3dkmthandle other_process, -diff --git a/drivers/hv/dxgkrnl/dxgvmbus.c b/drivers/hv/dxgkrnl/dxgvmbus.c -index 111111111111..222222222222 100644 ---- a/drivers/hv/dxgkrnl/dxgvmbus.c -+++ b/drivers/hv/dxgkrnl/dxgvmbus.c -@@ -1858,7 +1858,7 @@ int dxgvmb_send_query_clock_calibration(struct dxgprocess *process, - ret = copy_to_user(&inargs->clock_data, &result.clock_data, - sizeof(result.clock_data)); - if (ret) { -- pr_err("%s failed to copy clock data", __func__); -+ DXG_ERR("failed to copy clock data"); - ret = -EINVAL; - goto cleanup; - } -@@ -2949,6 +2949,128 @@ int dxgvmb_send_get_allocation_priority(struct dxgprocess *process, - return ret; - } - -+int dxgvmb_send_offer_allocations(struct dxgprocess *process, -+ struct dxgadapter *adapter, -+ struct d3dkmt_offerallocations *args) -+{ -+ struct dxgkvmb_command_offerallocations *command; -+ int ret = -EINVAL; -+ u32 alloc_size = sizeof(struct d3dkmthandle) * args->allocation_count; -+ u32 cmd_size = sizeof(struct dxgkvmb_command_offerallocations) + -+ alloc_size - sizeof(struct d3dkmthandle); -+ struct dxgvmbusmsg msg = {.hdr = NULL}; -+ -+ ret = init_message(&msg, adapter, process, cmd_size); -+ if (ret) -+ goto cleanup; -+ command = (void *)msg.msg; -+ -+ command_vgpu_to_host_init2(&command->hdr, -+ DXGK_VMBCOMMAND_OFFERALLOCATIONS, -+ process->host_handle); -+ command->flags = args->flags; -+ command->priority = args->priority; -+ command->device = args->device; -+ command->allocation_count = args->allocation_count; -+ if (args->resources) { -+ command->resources = true; -+ ret = copy_from_user(command->allocations, args->resources, -+ alloc_size); -+ } else { -+ ret = copy_from_user(command->allocations, -+ args->allocations, alloc_size); -+ } -+ if (ret) { -+ DXG_ERR("failed to copy input handles"); -+ ret = -EINVAL; -+ goto cleanup; -+ } -+ -+ ret = dxgvmb_send_sync_msg_ntstatus(msg.channel, msg.hdr, msg.size); -+ -+cleanup: -+ free_message(&msg, process); -+ if (ret) -+ pr_debug("err: %s %d", __func__, ret); -+ return ret; -+} -+ -+int dxgvmb_send_reclaim_allocations(struct dxgprocess *process, -+ struct dxgadapter *adapter, -+ struct d3dkmthandle device, -+ struct d3dkmt_reclaimallocations2 *args, -+ u64 __user *paging_fence_value) -+{ -+ struct dxgkvmb_command_reclaimallocations *command; -+ struct dxgkvmb_command_reclaimallocations_return *result; -+ int ret; -+ u32 alloc_size = sizeof(struct d3dkmthandle) * args->allocation_count; -+ u32 cmd_size = sizeof(struct dxgkvmb_command_reclaimallocations) + -+ alloc_size - sizeof(struct d3dkmthandle); -+ u32 result_size = sizeof(*result); -+ struct dxgvmbusmsgres msg = {.hdr = NULL}; -+ -+ if (args->results) -+ result_size += (args->allocation_count - 1) * -+ sizeof(enum d3dddi_reclaim_result); -+ -+ ret = init_message_res(&msg, adapter, process, cmd_size, result_size); -+ if (ret) -+ goto cleanup; -+ command = (void *)msg.msg; -+ result = msg.res; -+ -+ command_vgpu_to_host_init2(&command->hdr, -+ DXGK_VMBCOMMAND_RECLAIMALLOCATIONS, -+ process->host_handle); -+ command->device = device; -+ command->paging_queue = args->paging_queue; -+ command->allocation_count = args->allocation_count; -+ command->write_results = args->results != NULL; -+ if (args->resources) { -+ command->resources = true; -+ ret = copy_from_user(command->allocations, args->resources, -+ alloc_size); -+ } else { -+ ret = copy_from_user(command->allocations, -+ args->allocations, alloc_size); -+ } -+ if (ret) { -+ DXG_ERR("failed to copy input handles"); -+ ret = -EINVAL; -+ goto cleanup; -+ } -+ -+ ret = dxgvmb_send_sync_msg(msg.channel, msg.hdr, msg.size, -+ result, msg.res_size); -+ if (ret < 0) -+ goto cleanup; -+ ret = copy_to_user(paging_fence_value, -+ &result->paging_fence_value, sizeof(u64)); -+ if (ret) { -+ DXG_ERR("failed to copy paging fence"); -+ ret = -EINVAL; -+ goto cleanup; -+ } -+ -+ ret = ntstatus2int(result->status); -+ if (NT_SUCCESS(result->status) && args->results) { -+ ret = copy_to_user(args->results, result->discarded, -+ sizeof(result->discarded[0]) * -+ args->allocation_count); -+ if (ret) { -+ DXG_ERR("failed to copy results"); -+ ret = -EINVAL; -+ } -+ } -+ -+cleanup: -+ free_message((struct dxgvmbusmsg *)&msg, process); -+ if (ret) -+ pr_debug("err: %s %d", __func__, ret); -+ return ret; -+} -+ - int dxgvmb_send_change_vidmem_reservation(struct dxgprocess *process, - struct dxgadapter *adapter, - struct d3dkmthandle other_process, -diff --git a/drivers/hv/dxgkrnl/dxgvmbus.h b/drivers/hv/dxgkrnl/dxgvmbus.h -index 111111111111..222222222222 100644 ---- a/drivers/hv/dxgkrnl/dxgvmbus.h -+++ b/drivers/hv/dxgkrnl/dxgvmbus.h -@@ -653,6 +653,33 @@ struct dxgkvmb_command_markdeviceaserror { - struct d3dkmt_markdeviceaserror args; - }; - -+/* Returns ntstatus */ -+struct dxgkvmb_command_offerallocations { -+ struct dxgkvmb_command_vgpu_to_host hdr; -+ struct d3dkmthandle device; -+ u32 allocation_count; -+ enum d3dkmt_offer_priority priority; -+ struct d3dkmt_offer_flags flags; -+ bool resources; -+ struct d3dkmthandle allocations[1]; -+}; -+ -+struct dxgkvmb_command_reclaimallocations { -+ struct dxgkvmb_command_vgpu_to_host hdr; -+ struct d3dkmthandle device; -+ struct d3dkmthandle paging_queue; -+ u32 allocation_count; -+ bool resources; -+ bool write_results; -+ struct d3dkmthandle allocations[1]; -+}; -+ -+struct dxgkvmb_command_reclaimallocations_return { -+ u64 paging_fence_value; -+ struct ntstatus status; -+ enum d3dddi_reclaim_result discarded[1]; -+}; -+ - /* Returns ntstatus */ - struct dxgkvmb_command_changevideomemoryreservation { - struct dxgkvmb_command_vgpu_to_host hdr; -diff --git a/drivers/hv/dxgkrnl/ioctl.c b/drivers/hv/dxgkrnl/ioctl.c -index 111111111111..222222222222 100644 ---- a/drivers/hv/dxgkrnl/ioctl.c -+++ b/drivers/hv/dxgkrnl/ioctl.c -@@ -1961,6 +1961,119 @@ dxgkio_destroy_allocation(struct dxgprocess *process, void *__user inargs) - return ret; - } - -+static int -+dxgkio_offer_allocations(struct dxgprocess *process, void *__user inargs) -+{ -+ int ret; -+ struct d3dkmt_offerallocations args; -+ struct dxgdevice *device = NULL; -+ struct dxgadapter *adapter = NULL; -+ -+ ret = copy_from_user(&args, inargs, sizeof(args)); -+ if (ret) { -+ DXG_ERR("failed to copy input args"); -+ ret = -EINVAL; -+ goto cleanup; -+ } -+ -+ if (args.allocation_count > D3DKMT_MAKERESIDENT_ALLOC_MAX || -+ args.allocation_count == 0) { -+ DXG_ERR("invalid number of allocations"); -+ ret = -EINVAL; -+ goto cleanup; -+ } -+ -+ if ((args.resources == NULL) == (args.allocations == NULL)) { -+ DXG_ERR("invalid pointer to resources/allocations"); -+ ret = -EINVAL; -+ goto cleanup; -+ } -+ -+ device = dxgprocess_device_by_handle(process, args.device); -+ if (device == NULL) { -+ ret = -EINVAL; -+ goto cleanup; -+ } -+ -+ adapter = device->adapter; -+ ret = dxgadapter_acquire_lock_shared(adapter); -+ if (ret < 0) { -+ adapter = NULL; -+ goto cleanup; -+ } -+ -+ ret = dxgvmb_send_offer_allocations(process, adapter, &args); -+ -+cleanup: -+ -+ if (adapter) -+ dxgadapter_release_lock_shared(adapter); -+ if (device) -+ kref_put(&device->device_kref, dxgdevice_release); -+ -+ DXG_TRACE("ioctl:%s %d", errorstr(ret), ret); -+ return ret; -+} -+ -+static int -+dxgkio_reclaim_allocations(struct dxgprocess *process, void *__user inargs) -+{ -+ int ret; -+ struct d3dkmt_reclaimallocations2 args; -+ struct dxgdevice *device = NULL; -+ struct dxgadapter *adapter = NULL; -+ struct d3dkmt_reclaimallocations2 * __user in_args = inargs; -+ -+ ret = copy_from_user(&args, inargs, sizeof(args)); -+ if (ret) { -+ DXG_ERR("failed to copy input args"); -+ ret = -EINVAL; -+ goto cleanup; -+ } -+ -+ if (args.allocation_count > D3DKMT_MAKERESIDENT_ALLOC_MAX || -+ args.allocation_count == 0) { -+ DXG_ERR("invalid number of allocations"); -+ ret = -EINVAL; -+ goto cleanup; -+ } -+ -+ if ((args.resources == NULL) == (args.allocations == NULL)) { -+ DXG_ERR("invalid pointer to resources/allocations"); -+ ret = -EINVAL; -+ goto cleanup; -+ } -+ -+ device = dxgprocess_device_by_object_handle(process, -+ HMGRENTRY_TYPE_DXGPAGINGQUEUE, -+ args.paging_queue); -+ if (device == NULL) { -+ ret = -EINVAL; -+ goto cleanup; -+ } -+ -+ adapter = device->adapter; -+ ret = dxgadapter_acquire_lock_shared(adapter); -+ if (ret < 0) { -+ adapter = NULL; -+ goto cleanup; -+ } -+ -+ ret = dxgvmb_send_reclaim_allocations(process, adapter, -+ device->handle, &args, -+ &in_args->paging_fence_value); -+ -+cleanup: -+ -+ if (adapter) -+ dxgadapter_release_lock_shared(adapter); -+ if (device) -+ kref_put(&device->device_kref, dxgdevice_release); -+ -+ DXG_TRACE("ioctl:%s %d", errorstr(ret), ret); -+ return ret; -+} -+ - static int - dxgkio_submit_command(struct dxgprocess *process, void *__user inargs) - { -@@ -4548,12 +4661,12 @@ static struct ioctl_desc ioctls[] = { - /* 0x24 */ {}, - /* 0x25 */ {dxgkio_lock2, LX_DXLOCK2}, - /* 0x26 */ {dxgkio_mark_device_as_error, LX_DXMARKDEVICEASERROR}, --/* 0x27 */ {}, -+/* 0x27 */ {dxgkio_offer_allocations, LX_DXOFFERALLOCATIONS}, - /* 0x28 */ {}, - /* 0x29 */ {}, - /* 0x2a */ {dxgkio_query_alloc_residency, LX_DXQUERYALLOCATIONRESIDENCY}, - /* 0x2b */ {}, --/* 0x2c */ {}, -+/* 0x2c */ {dxgkio_reclaim_allocations, LX_DXRECLAIMALLOCATIONS2}, - /* 0x2d */ {}, - /* 0x2e */ {dxgkio_set_allocation_priority, LX_DXSETALLOCATIONPRIORITY}, - /* 0x2f */ {}, -diff --git a/include/uapi/misc/d3dkmthk.h b/include/uapi/misc/d3dkmthk.h -index 111111111111..222222222222 100644 ---- a/include/uapi/misc/d3dkmthk.h -+++ b/include/uapi/misc/d3dkmthk.h -@@ -61,6 +61,7 @@ struct winluid { - #define D3DDDI_MAX_WRITTEN_PRIMARIES 16 - - #define D3DKMT_CREATEALLOCATION_MAX 1024 -+#define D3DKMT_MAKERESIDENT_ALLOC_MAX (1024 * 10) - #define D3DKMT_ADAPTERS_MAX 64 - #define D3DDDI_MAX_BROADCAST_CONTEXT 64 - #define D3DDDI_MAX_OBJECT_WAITED_ON 32 -@@ -1087,6 +1088,68 @@ struct d3dddi_updateallocproperty { - }; - }; - -+enum d3dkmt_offer_priority { -+ _D3DKMT_OFFER_PRIORITY_LOW = 1, -+ _D3DKMT_OFFER_PRIORITY_NORMAL = 2, -+ _D3DKMT_OFFER_PRIORITY_HIGH = 3, -+ _D3DKMT_OFFER_PRIORITY_AUTO = 4, -+}; -+ -+struct d3dkmt_offer_flags { -+ union { -+ struct { -+ __u32 offer_immediately:1; -+ __u32 allow_decommit:1; -+ __u32 reserved:30; -+ }; -+ __u32 value; -+ }; -+}; -+ -+struct d3dkmt_offerallocations { -+ struct d3dkmthandle device; -+ __u32 reserved; -+#ifdef __KERNEL__ -+ struct d3dkmthandle *resources; -+ const struct d3dkmthandle *allocations; -+#else -+ __u64 resources; -+ __u64 allocations; -+#endif -+ __u32 allocation_count; -+ enum d3dkmt_offer_priority priority; -+ struct d3dkmt_offer_flags flags; -+ __u32 reserved1; -+}; -+ -+enum d3dddi_reclaim_result { -+ _D3DDDI_RECLAIM_RESULT_OK = 0, -+ _D3DDDI_RECLAIM_RESULT_DISCARDED = 1, -+ _D3DDDI_RECLAIM_RESULT_NOT_COMMITTED = 2, -+}; -+ -+struct d3dkmt_reclaimallocations2 { -+ struct d3dkmthandle paging_queue; -+ __u32 allocation_count; -+#ifdef __KERNEL__ -+ struct d3dkmthandle *resources; -+ struct d3dkmthandle *allocations; -+#else -+ __u64 resources; -+ __u64 allocations; -+#endif -+ union { -+#ifdef __KERNEL__ -+ __u32 *discarded; -+ enum d3dddi_reclaim_result *results; -+#else -+ __u64 discarded; -+ __u64 results; -+#endif -+ }; -+ __u64 paging_fence_value; -+}; -+ - struct d3dkmt_changevideomemoryreservation { - __u64 process; - struct d3dkmthandle adapter; -@@ -1360,8 +1423,12 @@ struct d3dkmt_shareobjectwithhost { - _IOWR(0x47, 0x25, struct d3dkmt_lock2) - #define LX_DXMARKDEVICEASERROR \ - _IOWR(0x47, 0x26, struct d3dkmt_markdeviceaserror) -+#define LX_DXOFFERALLOCATIONS \ -+ _IOWR(0x47, 0x27, struct d3dkmt_offerallocations) - #define LX_DXQUERYALLOCATIONRESIDENCY \ - _IOWR(0x47, 0x2a, struct d3dkmt_queryallocationresidency) -+#define LX_DXRECLAIMALLOCATIONS2 \ -+ _IOWR(0x47, 0x2c, struct d3dkmt_reclaimallocations2) - #define LX_DXSETALLOCATIONPRIORITY \ - _IOWR(0x47, 0x2e, struct d3dkmt_setallocationpriority) - #define LX_DXSIGNALSYNCHRONIZATIONOBJECTFROMCPU \ --- -Armbian - diff --git a/patch/kernel/archive/wsl2-arm64-6.6/1692-drivers-hv-dxgkrnl-Ioctls-to-manage-scheduling-priority.patch b/patch/kernel/archive/wsl2-arm64-6.6/1692-drivers-hv-dxgkrnl-Ioctls-to-manage-scheduling-priority.patch deleted file mode 100644 index 4ff04c894bd6..000000000000 --- a/patch/kernel/archive/wsl2-arm64-6.6/1692-drivers-hv-dxgkrnl-Ioctls-to-manage-scheduling-priority.patch +++ /dev/null @@ -1,427 +0,0 @@ -From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001 -From: Iouri Tarassov -Date: Fri, 14 Jan 2022 17:57:41 -0800 -Subject: drivers: hv: dxgkrnl: Ioctls to manage scheduling priority - -Implement iocts to manage compute device scheduling priority: - - LX_DXGETCONTEXTINPROCESSSCHEDULINGPRIORITY - - LX_DXGETCONTEXTSCHEDULINGPRIORITY - - LX_DXSETCONTEXTINPROCESSSCHEDULINGPRIORITY - - LX_DXSETCONTEXTSCHEDULINGPRIORITY - -Each compute device execution context has an assigned scheduling -priority. It is used by the compute device scheduler on the host to -pick contexts for execution. There is a global priority and a -priority within a process. - -Signed-off-by: Iouri Tarassov -[kms: Forward port to v6.1] -Signed-off-by: Kelsey Steele ---- - drivers/hv/dxgkrnl/dxgkrnl.h | 9 + - drivers/hv/dxgkrnl/dxgvmbus.c | 67 +++- - drivers/hv/dxgkrnl/dxgvmbus.h | 19 + - drivers/hv/dxgkrnl/ioctl.c | 177 +++++++++- - include/uapi/misc/d3dkmthk.h | 28 ++ - 5 files changed, 294 insertions(+), 6 deletions(-) - -diff --git a/drivers/hv/dxgkrnl/dxgkrnl.h b/drivers/hv/dxgkrnl/dxgkrnl.h -index 111111111111..222222222222 100644 ---- a/drivers/hv/dxgkrnl/dxgkrnl.h -+++ b/drivers/hv/dxgkrnl/dxgkrnl.h -@@ -865,6 +865,15 @@ int dxgvmb_send_set_allocation_priority(struct dxgprocess *process, - int dxgvmb_send_get_allocation_priority(struct dxgprocess *process, - struct dxgadapter *adapter, - struct d3dkmt_getallocationpriority *a); -+int dxgvmb_send_set_context_sch_priority(struct dxgprocess *process, -+ struct dxgadapter *adapter, -+ struct d3dkmthandle context, -+ int priority, bool in_process); -+int dxgvmb_send_get_context_sch_priority(struct dxgprocess *process, -+ struct dxgadapter *adapter, -+ struct d3dkmthandle context, -+ int *priority, -+ bool in_process); - int dxgvmb_send_offer_allocations(struct dxgprocess *process, - struct dxgadapter *adapter, - struct d3dkmt_offerallocations *args); -diff --git a/drivers/hv/dxgkrnl/dxgvmbus.c b/drivers/hv/dxgkrnl/dxgvmbus.c -index 111111111111..222222222222 100644 ---- a/drivers/hv/dxgkrnl/dxgvmbus.c -+++ b/drivers/hv/dxgkrnl/dxgvmbus.c -@@ -2949,6 +2949,69 @@ int dxgvmb_send_get_allocation_priority(struct dxgprocess *process, - return ret; - } - -+int dxgvmb_send_set_context_sch_priority(struct dxgprocess *process, -+ struct dxgadapter *adapter, -+ struct d3dkmthandle context, -+ int priority, -+ bool in_process) -+{ -+ struct dxgkvmb_command_setcontextschedulingpriority2 *command; -+ int ret; -+ struct dxgvmbusmsg msg = {.hdr = NULL}; -+ -+ ret = init_message(&msg, adapter, process, sizeof(*command)); -+ if (ret) -+ goto cleanup; -+ command = (void *)msg.msg; -+ -+ command_vgpu_to_host_init2(&command->hdr, -+ DXGK_VMBCOMMAND_SETCONTEXTSCHEDULINGPRIORITY, -+ process->host_handle); -+ command->context = context; -+ command->priority = priority; -+ command->in_process = in_process; -+ ret = dxgvmb_send_sync_msg_ntstatus(msg.channel, msg.hdr, msg.size); -+cleanup: -+ free_message(&msg, process); -+ if (ret) -+ DXG_TRACE("err: %d", ret); -+ return ret; -+} -+ -+int dxgvmb_send_get_context_sch_priority(struct dxgprocess *process, -+ struct dxgadapter *adapter, -+ struct d3dkmthandle context, -+ int *priority, -+ bool in_process) -+{ -+ struct dxgkvmb_command_getcontextschedulingpriority *command; -+ struct dxgkvmb_command_getcontextschedulingpriority_return result = { }; -+ int ret; -+ struct dxgvmbusmsg msg = {.hdr = NULL}; -+ -+ ret = init_message(&msg, adapter, process, sizeof(*command)); -+ if (ret) -+ goto cleanup; -+ command = (void *)msg.msg; -+ -+ command_vgpu_to_host_init2(&command->hdr, -+ DXGK_VMBCOMMAND_GETCONTEXTSCHEDULINGPRIORITY, -+ process->host_handle); -+ command->context = context; -+ command->in_process = in_process; -+ ret = dxgvmb_send_sync_msg(msg.channel, msg.hdr, msg.size, -+ &result, sizeof(result)); -+ if (ret >= 0) { -+ ret = ntstatus2int(result.status); -+ *priority = result.priority; -+ } -+cleanup: -+ free_message(&msg, process); -+ if (ret) -+ DXG_TRACE("err: %d", ret); -+ return ret; -+} -+ - int dxgvmb_send_offer_allocations(struct dxgprocess *process, - struct dxgadapter *adapter, - struct d3dkmt_offerallocations *args) -@@ -2991,7 +3054,7 @@ int dxgvmb_send_offer_allocations(struct dxgprocess *process, - cleanup: - free_message(&msg, process); - if (ret) -- pr_debug("err: %s %d", __func__, ret); -+ DXG_TRACE("err: %d", ret); - return ret; - } - -@@ -3067,7 +3130,7 @@ int dxgvmb_send_reclaim_allocations(struct dxgprocess *process, - cleanup: - free_message((struct dxgvmbusmsg *)&msg, process); - if (ret) -- pr_debug("err: %s %d", __func__, ret); -+ DXG_TRACE("err: %d", ret); - return ret; - } - -diff --git a/drivers/hv/dxgkrnl/dxgvmbus.h b/drivers/hv/dxgkrnl/dxgvmbus.h -index 111111111111..222222222222 100644 ---- a/drivers/hv/dxgkrnl/dxgvmbus.h -+++ b/drivers/hv/dxgkrnl/dxgvmbus.h -@@ -331,6 +331,25 @@ struct dxgkvmb_command_getallocationpriority_return { - /* u32 priorities[allocation_count or 1]; */ - }; - -+/* Returns ntstatus */ -+struct dxgkvmb_command_setcontextschedulingpriority2 { -+ struct dxgkvmb_command_vgpu_to_host hdr; -+ struct d3dkmthandle context; -+ int priority; -+ bool in_process; -+}; -+ -+struct dxgkvmb_command_getcontextschedulingpriority { -+ struct dxgkvmb_command_vgpu_to_host hdr; -+ struct d3dkmthandle context; -+ bool in_process; -+}; -+ -+struct dxgkvmb_command_getcontextschedulingpriority_return { -+ struct ntstatus status; -+ int priority; -+}; -+ - struct dxgkvmb_command_createdevice { - struct dxgkvmb_command_vgpu_to_host hdr; - struct d3dkmt_createdeviceflags flags; -diff --git a/drivers/hv/dxgkrnl/ioctl.c b/drivers/hv/dxgkrnl/ioctl.c -index 111111111111..222222222222 100644 ---- a/drivers/hv/dxgkrnl/ioctl.c -+++ b/drivers/hv/dxgkrnl/ioctl.c -@@ -3660,6 +3660,171 @@ dxgkio_get_allocation_priority(struct dxgprocess *process, void *__user inargs) - return ret; - } - -+static int -+set_context_scheduling_priority(struct dxgprocess *process, -+ struct d3dkmthandle hcontext, -+ int priority, bool in_process) -+{ -+ int ret = 0; -+ struct dxgdevice *device = NULL; -+ struct dxgadapter *adapter = NULL; -+ -+ device = dxgprocess_device_by_object_handle(process, -+ HMGRENTRY_TYPE_DXGCONTEXT, -+ hcontext); -+ if (device == NULL) { -+ ret = -EINVAL; -+ goto cleanup; -+ } -+ adapter = device->adapter; -+ ret = dxgadapter_acquire_lock_shared(adapter); -+ if (ret < 0) { -+ adapter = NULL; -+ goto cleanup; -+ } -+ ret = dxgvmb_send_set_context_sch_priority(process, adapter, -+ hcontext, priority, -+ in_process); -+ if (ret < 0) -+ DXG_ERR("send_set_context_scheduling_priority failed"); -+cleanup: -+ if (adapter) -+ dxgadapter_release_lock_shared(adapter); -+ if (device) -+ kref_put(&device->device_kref, dxgdevice_release); -+ -+ return ret; -+} -+ -+static int -+dxgkio_set_context_scheduling_priority(struct dxgprocess *process, -+ void *__user inargs) -+{ -+ struct d3dkmt_setcontextschedulingpriority args; -+ int ret; -+ -+ ret = copy_from_user(&args, inargs, sizeof(args)); -+ if (ret) { -+ DXG_ERR("failed to copy input args"); -+ ret = -EINVAL; -+ goto cleanup; -+ } -+ -+ ret = set_context_scheduling_priority(process, args.context, -+ args.priority, false); -+cleanup: -+ DXG_TRACE("ioctl:%s %d", errorstr(ret), ret); -+ return ret; -+} -+ -+static int -+get_context_scheduling_priority(struct dxgprocess *process, -+ struct d3dkmthandle hcontext, -+ int __user *priority, -+ bool in_process) -+{ -+ int ret; -+ struct dxgdevice *device = NULL; -+ struct dxgadapter *adapter = NULL; -+ int pri = 0; -+ -+ device = dxgprocess_device_by_object_handle(process, -+ HMGRENTRY_TYPE_DXGCONTEXT, -+ hcontext); -+ if (device == NULL) { -+ ret = -EINVAL; -+ goto cleanup; -+ } -+ adapter = device->adapter; -+ ret = dxgadapter_acquire_lock_shared(adapter); -+ if (ret < 0) { -+ adapter = NULL; -+ goto cleanup; -+ } -+ ret = dxgvmb_send_get_context_sch_priority(process, adapter, -+ hcontext, &pri, in_process); -+ if (ret < 0) -+ goto cleanup; -+ ret = copy_to_user(priority, &pri, sizeof(pri)); -+ if (ret) { -+ DXG_ERR("failed to copy priority to user"); -+ ret = -EINVAL; -+ } -+ -+cleanup: -+ if (adapter) -+ dxgadapter_release_lock_shared(adapter); -+ if (device) -+ kref_put(&device->device_kref, dxgdevice_release); -+ -+ return ret; -+} -+ -+static int -+dxgkio_get_context_scheduling_priority(struct dxgprocess *process, -+ void *__user inargs) -+{ -+ struct d3dkmt_getcontextschedulingpriority args; -+ struct d3dkmt_getcontextschedulingpriority __user *input = inargs; -+ int ret; -+ -+ ret = copy_from_user(&args, inargs, sizeof(args)); -+ if (ret) { -+ DXG_ERR("failed to copy input args"); -+ ret = -EINVAL; -+ goto cleanup; -+ } -+ -+ ret = get_context_scheduling_priority(process, args.context, -+ &input->priority, false); -+cleanup: -+ DXG_TRACE("ioctl:%s %d", errorstr(ret), ret); -+ return ret; -+} -+ -+static int -+dxgkio_set_context_process_scheduling_priority(struct dxgprocess *process, -+ void *__user inargs) -+{ -+ struct d3dkmt_setcontextinprocessschedulingpriority args; -+ int ret; -+ -+ ret = copy_from_user(&args, inargs, sizeof(args)); -+ if (ret) { -+ DXG_ERR("failed to copy input args"); -+ ret = -EINVAL; -+ goto cleanup; -+ } -+ -+ ret = set_context_scheduling_priority(process, args.context, -+ args.priority, true); -+cleanup: -+ DXG_TRACE("ioctl:%s %d", errorstr(ret), ret); -+ return ret; -+} -+ -+static int -+dxgkio_get_context_process_scheduling_priority(struct dxgprocess *process, -+ void __user *inargs) -+{ -+ struct d3dkmt_getcontextinprocessschedulingpriority args; -+ int ret; -+ -+ ret = copy_from_user(&args, inargs, sizeof(args)); -+ if (ret) { -+ DXG_ERR("failed to copy input args"); -+ ret = -EINVAL; -+ goto cleanup; -+ } -+ -+ ret = get_context_scheduling_priority(process, args.context, -+ &((struct d3dkmt_getcontextinprocessschedulingpriority *) -+ inargs)->priority, true); -+cleanup: -+ DXG_TRACE("ioctl:%s %d", errorstr(ret), ret); -+ return ret; -+} -+ - static int - dxgkio_change_vidmem_reservation(struct dxgprocess *process, void *__user inargs) - { -@@ -4655,8 +4820,10 @@ static struct ioctl_desc ioctls[] = { - /* 0x1e */ {}, - /* 0x1f */ {dxgkio_flush_heap_transitions, LX_DXFLUSHHEAPTRANSITIONS}, - /* 0x20 */ {}, --/* 0x21 */ {}, --/* 0x22 */ {}, -+/* 0x21 */ {dxgkio_get_context_process_scheduling_priority, -+ LX_DXGETCONTEXTINPROCESSSCHEDULINGPRIORITY}, -+/* 0x22 */ {dxgkio_get_context_scheduling_priority, -+ LX_DXGETCONTEXTSCHEDULINGPRIORITY}, - /* 0x23 */ {}, - /* 0x24 */ {}, - /* 0x25 */ {dxgkio_lock2, LX_DXLOCK2}, -@@ -4669,8 +4836,10 @@ static struct ioctl_desc ioctls[] = { - /* 0x2c */ {dxgkio_reclaim_allocations, LX_DXRECLAIMALLOCATIONS2}, - /* 0x2d */ {}, - /* 0x2e */ {dxgkio_set_allocation_priority, LX_DXSETALLOCATIONPRIORITY}, --/* 0x2f */ {}, --/* 0x30 */ {}, -+/* 0x2f */ {dxgkio_set_context_process_scheduling_priority, -+ LX_DXSETCONTEXTINPROCESSSCHEDULINGPRIORITY}, -+/* 0x30 */ {dxgkio_set_context_scheduling_priority, -+ LX_DXSETCONTEXTSCHEDULINGPRIORITY}, - /* 0x31 */ {dxgkio_signal_sync_object_cpu, - LX_DXSIGNALSYNCHRONIZATIONOBJECTFROMCPU}, - /* 0x32 */ {dxgkio_signal_sync_object_gpu, -diff --git a/include/uapi/misc/d3dkmthk.h b/include/uapi/misc/d3dkmthk.h -index 111111111111..222222222222 100644 ---- a/include/uapi/misc/d3dkmthk.h -+++ b/include/uapi/misc/d3dkmthk.h -@@ -708,6 +708,26 @@ struct d3dkmt_submitcommandtohwqueue { - #endif - }; - -+struct d3dkmt_setcontextschedulingpriority { -+ struct d3dkmthandle context; -+ int priority; -+}; -+ -+struct d3dkmt_setcontextinprocessschedulingpriority { -+ struct d3dkmthandle context; -+ int priority; -+}; -+ -+struct d3dkmt_getcontextschedulingpriority { -+ struct d3dkmthandle context; -+ int priority; -+}; -+ -+struct d3dkmt_getcontextinprocessschedulingpriority { -+ struct d3dkmthandle context; -+ int priority; -+}; -+ - struct d3dkmt_setallocationpriority { - struct d3dkmthandle device; - struct d3dkmthandle resource; -@@ -1419,6 +1439,10 @@ struct d3dkmt_shareobjectwithhost { - _IOWR(0x47, 0x1d, struct d3dkmt_destroysynchronizationobject) - #define LX_DXFLUSHHEAPTRANSITIONS \ - _IOWR(0x47, 0x1f, struct d3dkmt_flushheaptransitions) -+#define LX_DXGETCONTEXTINPROCESSSCHEDULINGPRIORITY \ -+ _IOWR(0x47, 0x21, struct d3dkmt_getcontextinprocessschedulingpriority) -+#define LX_DXGETCONTEXTSCHEDULINGPRIORITY \ -+ _IOWR(0x47, 0x22, struct d3dkmt_getcontextschedulingpriority) - #define LX_DXLOCK2 \ - _IOWR(0x47, 0x25, struct d3dkmt_lock2) - #define LX_DXMARKDEVICEASERROR \ -@@ -1431,6 +1455,10 @@ struct d3dkmt_shareobjectwithhost { - _IOWR(0x47, 0x2c, struct d3dkmt_reclaimallocations2) - #define LX_DXSETALLOCATIONPRIORITY \ - _IOWR(0x47, 0x2e, struct d3dkmt_setallocationpriority) -+#define LX_DXSETCONTEXTINPROCESSSCHEDULINGPRIORITY \ -+ _IOWR(0x47, 0x2f, struct d3dkmt_setcontextinprocessschedulingpriority) -+#define LX_DXSETCONTEXTSCHEDULINGPRIORITY \ -+ _IOWR(0x47, 0x30, struct d3dkmt_setcontextschedulingpriority) - #define LX_DXSIGNALSYNCHRONIZATIONOBJECTFROMCPU \ - _IOWR(0x47, 0x31, struct d3dkmt_signalsynchronizationobjectfromcpu) - #define LX_DXSIGNALSYNCHRONIZATIONOBJECTFROMGPU \ --- -Armbian - diff --git a/patch/kernel/archive/wsl2-arm64-6.6/1693-drivers-hv-dxgkrnl-Manage-residency-of-allocations.patch b/patch/kernel/archive/wsl2-arm64-6.6/1693-drivers-hv-dxgkrnl-Manage-residency-of-allocations.patch deleted file mode 100644 index f991dfd2bdca..000000000000 --- a/patch/kernel/archive/wsl2-arm64-6.6/1693-drivers-hv-dxgkrnl-Manage-residency-of-allocations.patch +++ /dev/null @@ -1,447 +0,0 @@ -From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001 -From: Iouri Tarassov -Date: Fri, 14 Jan 2022 17:33:52 -0800 -Subject: drivers: hv: dxgkrnl: Manage residency of allocations - -Implement ioctls to manage residency of compute device allocations: - - LX_DXMAKERESIDENT, - - LX_DXEVICT. - -An allocation is "resident" when the compute devoce is setup to -access it. It means that the allocation is in the local device -memory or in non-pageable system memory. - -The current design does not support on demand compute device page -faulting. An allocation must be resident before the compute device -is allowed to access it. - -The LX_DXMAKERESIDENT ioctl instructs the video memory manager to -make the given allocations resident. The operation is submitted to -a paging queue (dxgpagingqueue). When the ioctl returns a "pending" -status, a monitored fence sync object can be used to synchronize -with the completion of the operation. - -The LX_DXEVICT ioctl istructs the video memory manager to evict -the given allocations from device accessible memory. - -Signed-off-by: Iouri Tarassov -[kms: Forward port to v6.1] -Signed-off-by: Kelsey Steele ---- - drivers/hv/dxgkrnl/dxgkrnl.h | 4 + - drivers/hv/dxgkrnl/dxgvmbus.c | 98 +++++++ - drivers/hv/dxgkrnl/dxgvmbus.h | 27 ++ - drivers/hv/dxgkrnl/ioctl.c | 141 +++++++++- - include/uapi/misc/d3dkmthk.h | 54 ++++ - 5 files changed, 322 insertions(+), 2 deletions(-) - -diff --git a/drivers/hv/dxgkrnl/dxgkrnl.h b/drivers/hv/dxgkrnl/dxgkrnl.h -index 111111111111..222222222222 100644 ---- a/drivers/hv/dxgkrnl/dxgkrnl.h -+++ b/drivers/hv/dxgkrnl/dxgkrnl.h -@@ -810,6 +810,10 @@ int dxgvmb_send_create_allocation(struct dxgprocess *pr, struct dxgdevice *dev, - int dxgvmb_send_destroy_allocation(struct dxgprocess *pr, struct dxgdevice *dev, - struct d3dkmt_destroyallocation2 *args, - struct d3dkmthandle *alloc_handles); -+int dxgvmb_send_make_resident(struct dxgprocess *pr, struct dxgadapter *adapter, -+ struct d3dddi_makeresident *args); -+int dxgvmb_send_evict(struct dxgprocess *pr, struct dxgadapter *adapter, -+ struct d3dkmt_evict *args); - int dxgvmb_send_submit_command(struct dxgprocess *pr, - struct dxgadapter *adapter, - struct d3dkmt_submitcommand *args); -diff --git a/drivers/hv/dxgkrnl/dxgvmbus.c b/drivers/hv/dxgkrnl/dxgvmbus.c -index 111111111111..222222222222 100644 ---- a/drivers/hv/dxgkrnl/dxgvmbus.c -+++ b/drivers/hv/dxgkrnl/dxgvmbus.c -@@ -2279,6 +2279,104 @@ int dxgvmb_send_get_stdalloc_data(struct dxgdevice *device, - return ret; - } - -+int dxgvmb_send_make_resident(struct dxgprocess *process, -+ struct dxgadapter *adapter, -+ struct d3dddi_makeresident *args) -+{ -+ int ret; -+ u32 cmd_size; -+ struct dxgkvmb_command_makeresident_return result = { }; -+ struct dxgkvmb_command_makeresident *command = NULL; -+ struct dxgvmbusmsg msg = {.hdr = NULL}; -+ -+ cmd_size = (args->alloc_count - 1) * sizeof(struct d3dkmthandle) + -+ sizeof(struct dxgkvmb_command_makeresident); -+ -+ ret = init_message(&msg, adapter, process, cmd_size); -+ if (ret) -+ goto cleanup; -+ command = (void *)msg.msg; -+ -+ ret = copy_from_user(command->allocations, args->allocation_list, -+ args->alloc_count * -+ sizeof(struct d3dkmthandle)); -+ if (ret) { -+ DXG_ERR("failed to copy alloc handles"); -+ ret = -EINVAL; -+ goto cleanup; -+ } -+ command_vgpu_to_host_init2(&command->hdr, -+ DXGK_VMBCOMMAND_MAKERESIDENT, -+ process->host_handle); -+ command->alloc_count = args->alloc_count; -+ command->paging_queue = args->paging_queue; -+ command->flags = args->flags; -+ -+ ret = dxgvmb_send_sync_msg(msg.channel, msg.hdr, msg.size, -+ &result, sizeof(result)); -+ if (ret < 0) { -+ DXG_ERR("send_make_resident failed %x", ret); -+ goto cleanup; -+ } -+ -+ args->paging_fence_value = result.paging_fence_value; -+ args->num_bytes_to_trim = result.num_bytes_to_trim; -+ ret = ntstatus2int(result.status); -+ -+cleanup: -+ -+ free_message(&msg, process); -+ if (ret) -+ DXG_TRACE("err: %d", ret); -+ return ret; -+} -+ -+int dxgvmb_send_evict(struct dxgprocess *process, -+ struct dxgadapter *adapter, -+ struct d3dkmt_evict *args) -+{ -+ int ret; -+ u32 cmd_size; -+ struct dxgkvmb_command_evict_return result = { }; -+ struct dxgkvmb_command_evict *command = NULL; -+ struct dxgvmbusmsg msg = {.hdr = NULL}; -+ -+ cmd_size = (args->alloc_count - 1) * sizeof(struct d3dkmthandle) + -+ sizeof(struct dxgkvmb_command_evict); -+ ret = init_message(&msg, adapter, process, cmd_size); -+ if (ret) -+ goto cleanup; -+ command = (void *)msg.msg; -+ ret = copy_from_user(command->allocations, args->allocations, -+ args->alloc_count * -+ sizeof(struct d3dkmthandle)); -+ if (ret) { -+ DXG_ERR("failed to copy alloc handles"); -+ ret = -EINVAL; -+ goto cleanup; -+ } -+ command_vgpu_to_host_init2(&command->hdr, -+ DXGK_VMBCOMMAND_EVICT, process->host_handle); -+ command->alloc_count = args->alloc_count; -+ command->device = args->device; -+ command->flags = args->flags; -+ -+ ret = dxgvmb_send_sync_msg(msg.channel, msg.hdr, msg.size, -+ &result, sizeof(result)); -+ if (ret < 0) { -+ DXG_ERR("send_evict failed %x", ret); -+ goto cleanup; -+ } -+ args->num_bytes_to_trim = result.num_bytes_to_trim; -+ -+cleanup: -+ -+ free_message(&msg, process); -+ if (ret) -+ DXG_TRACE("err: %d", ret); -+ return ret; -+} -+ - int dxgvmb_send_submit_command(struct dxgprocess *process, - struct dxgadapter *adapter, - struct d3dkmt_submitcommand *args) -diff --git a/drivers/hv/dxgkrnl/dxgvmbus.h b/drivers/hv/dxgkrnl/dxgvmbus.h -index 111111111111..222222222222 100644 ---- a/drivers/hv/dxgkrnl/dxgvmbus.h -+++ b/drivers/hv/dxgkrnl/dxgvmbus.h -@@ -372,6 +372,33 @@ struct dxgkvmb_command_flushdevice { - enum dxgdevice_flushschedulerreason reason; - }; - -+struct dxgkvmb_command_makeresident { -+ struct dxgkvmb_command_vgpu_to_host hdr; -+ struct d3dkmthandle device; -+ struct d3dkmthandle paging_queue; -+ struct d3dddi_makeresident_flags flags; -+ u32 alloc_count; -+ struct d3dkmthandle allocations[1]; -+}; -+ -+struct dxgkvmb_command_makeresident_return { -+ u64 paging_fence_value; -+ u64 num_bytes_to_trim; -+ struct ntstatus status; -+}; -+ -+struct dxgkvmb_command_evict { -+ struct dxgkvmb_command_vgpu_to_host hdr; -+ struct d3dkmthandle device; -+ struct d3dddi_evict_flags flags; -+ u32 alloc_count; -+ struct d3dkmthandle allocations[1]; -+}; -+ -+struct dxgkvmb_command_evict_return { -+ u64 num_bytes_to_trim; -+}; -+ - struct dxgkvmb_command_submitcommand { - struct dxgkvmb_command_vgpu_to_host hdr; - struct d3dkmt_submitcommand args; -diff --git a/drivers/hv/dxgkrnl/ioctl.c b/drivers/hv/dxgkrnl/ioctl.c -index 111111111111..222222222222 100644 ---- a/drivers/hv/dxgkrnl/ioctl.c -+++ b/drivers/hv/dxgkrnl/ioctl.c -@@ -1961,6 +1961,143 @@ dxgkio_destroy_allocation(struct dxgprocess *process, void *__user inargs) - return ret; - } - -+static int -+dxgkio_make_resident(struct dxgprocess *process, void *__user inargs) -+{ -+ int ret, ret2; -+ struct d3dddi_makeresident args; -+ struct d3dddi_makeresident *input = inargs; -+ struct dxgdevice *device = NULL; -+ struct dxgadapter *adapter = NULL; -+ -+ ret = copy_from_user(&args, inargs, sizeof(args)); -+ if (ret) { -+ DXG_ERR("failed to copy input args"); -+ ret = -EINVAL; -+ goto cleanup; -+ } -+ -+ if (args.alloc_count > D3DKMT_MAKERESIDENT_ALLOC_MAX || -+ args.alloc_count == 0) { -+ DXG_ERR("invalid number of allocations"); -+ ret = -EINVAL; -+ goto cleanup; -+ } -+ if (args.paging_queue.v == 0) { -+ DXG_ERR("paging queue is missing"); -+ ret = -EINVAL; -+ goto cleanup; -+ } -+ -+ device = dxgprocess_device_by_object_handle(process, -+ HMGRENTRY_TYPE_DXGPAGINGQUEUE, -+ args.paging_queue); -+ if (device == NULL) { -+ ret = -EINVAL; -+ goto cleanup; -+ } -+ -+ adapter = device->adapter; -+ ret = dxgadapter_acquire_lock_shared(adapter); -+ if (ret < 0) { -+ adapter = NULL; -+ goto cleanup; -+ } -+ -+ ret = dxgvmb_send_make_resident(process, adapter, &args); -+ if (ret < 0) -+ goto cleanup; -+ /* STATUS_PENING is a success code > 0. It is returned to user mode */ -+ if (!(ret == STATUS_PENDING || ret == 0)) { -+ DXG_ERR("Unexpected error %x", ret); -+ goto cleanup; -+ } -+ -+ ret2 = copy_to_user(&input->paging_fence_value, -+ &args.paging_fence_value, sizeof(u64)); -+ if (ret2) { -+ DXG_ERR("failed to copy paging fence"); -+ ret = -EINVAL; -+ goto cleanup; -+ } -+ -+ ret2 = copy_to_user(&input->num_bytes_to_trim, -+ &args.num_bytes_to_trim, sizeof(u64)); -+ if (ret2) { -+ DXG_ERR("failed to copy bytes to trim"); -+ ret = -EINVAL; -+ goto cleanup; -+ } -+ -+cleanup: -+ -+ if (adapter) -+ dxgadapter_release_lock_shared(adapter); -+ if (device) -+ kref_put(&device->device_kref, dxgdevice_release); -+ -+ DXG_TRACE("ioctl:%s %d", errorstr(ret), ret); -+ -+ return ret; -+} -+ -+static int -+dxgkio_evict(struct dxgprocess *process, void *__user inargs) -+{ -+ int ret; -+ struct d3dkmt_evict args; -+ struct d3dkmt_evict *input = inargs; -+ struct dxgdevice *device = NULL; -+ struct dxgadapter *adapter = NULL; -+ -+ ret = copy_from_user(&args, inargs, sizeof(args)); -+ if (ret) { -+ DXG_ERR("failed to copy input args"); -+ ret = -EINVAL; -+ goto cleanup; -+ } -+ -+ if (args.alloc_count > D3DKMT_MAKERESIDENT_ALLOC_MAX || -+ args.alloc_count == 0) { -+ DXG_ERR("invalid number of allocations"); -+ ret = -EINVAL; -+ goto cleanup; -+ } -+ -+ device = dxgprocess_device_by_handle(process, args.device); -+ if (device == NULL) { -+ ret = -EINVAL; -+ goto cleanup; -+ } -+ -+ adapter = device->adapter; -+ ret = dxgadapter_acquire_lock_shared(adapter); -+ if (ret < 0) { -+ adapter = NULL; -+ goto cleanup; -+ } -+ -+ ret = dxgvmb_send_evict(process, adapter, &args); -+ if (ret < 0) -+ goto cleanup; -+ -+ ret = copy_to_user(&input->num_bytes_to_trim, -+ &args.num_bytes_to_trim, sizeof(u64)); -+ if (ret) { -+ DXG_ERR("failed to copy bytes to trim to user"); -+ ret = -EINVAL; -+ } -+cleanup: -+ -+ if (adapter) -+ dxgadapter_release_lock_shared(adapter); -+ if (device) -+ kref_put(&device->device_kref, dxgdevice_release); -+ -+ DXG_TRACE("ioctl:%s %d", errorstr(ret), ret); -+ return ret; -+} -+ - static int - dxgkio_offer_allocations(struct dxgprocess *process, void *__user inargs) - { -@@ -4797,7 +4934,7 @@ static struct ioctl_desc ioctls[] = { - /* 0x08 */ {}, - /* 0x09 */ {dxgkio_query_adapter_info, LX_DXQUERYADAPTERINFO}, - /* 0x0a */ {dxgkio_query_vidmem_info, LX_DXQUERYVIDEOMEMORYINFO}, --/* 0x0b */ {}, -+/* 0x0b */ {dxgkio_make_resident, LX_DXMAKERESIDENT}, - /* 0x0c */ {}, - /* 0x0d */ {dxgkio_escape, LX_DXESCAPE}, - /* 0x0e */ {dxgkio_get_device_state, LX_DXGETDEVICESTATE}, -@@ -4817,7 +4954,7 @@ static struct ioctl_desc ioctls[] = { - /* 0x1b */ {dxgkio_destroy_hwqueue, LX_DXDESTROYHWQUEUE}, - /* 0x1c */ {dxgkio_destroy_paging_queue, LX_DXDESTROYPAGINGQUEUE}, - /* 0x1d */ {dxgkio_destroy_sync_object, LX_DXDESTROYSYNCHRONIZATIONOBJECT}, --/* 0x1e */ {}, -+/* 0x1e */ {dxgkio_evict, LX_DXEVICT}, - /* 0x1f */ {dxgkio_flush_heap_transitions, LX_DXFLUSHHEAPTRANSITIONS}, - /* 0x20 */ {}, - /* 0x21 */ {dxgkio_get_context_process_scheduling_priority, -diff --git a/include/uapi/misc/d3dkmthk.h b/include/uapi/misc/d3dkmthk.h -index 111111111111..222222222222 100644 ---- a/include/uapi/misc/d3dkmthk.h -+++ b/include/uapi/misc/d3dkmthk.h -@@ -962,6 +962,56 @@ struct d3dkmt_destroyallocation2 { - struct d3dddicb_destroyallocation2flags flags; - }; - -+struct d3dddi_makeresident_flags { -+ union { -+ struct { -+ __u32 cant_trim_further:1; -+ __u32 must_succeed:1; -+ __u32 reserved:30; -+ }; -+ __u32 value; -+ }; -+}; -+ -+struct d3dddi_makeresident { -+ struct d3dkmthandle paging_queue; -+ __u32 alloc_count; -+#ifdef __KERNEL__ -+ const struct d3dkmthandle *allocation_list; -+ const __u32 *priority_list; -+#else -+ __u64 allocation_list; -+ __u64 priority_list; -+#endif -+ struct d3dddi_makeresident_flags flags; -+ __u64 paging_fence_value; -+ __u64 num_bytes_to_trim; -+}; -+ -+struct d3dddi_evict_flags { -+ union { -+ struct { -+ __u32 evict_only_if_necessary:1; -+ __u32 not_written_to:1; -+ __u32 reserved:30; -+ }; -+ __u32 value; -+ }; -+}; -+ -+struct d3dkmt_evict { -+ struct d3dkmthandle device; -+ __u32 alloc_count; -+#ifdef __KERNEL__ -+ const struct d3dkmthandle *allocations; -+#else -+ __u64 allocations; -+#endif -+ struct d3dddi_evict_flags flags; -+ __u32 reserved; -+ __u64 num_bytes_to_trim; -+}; -+ - enum d3dkmt_memory_segment_group { - _D3DKMT_MEMORY_SEGMENT_GROUP_LOCAL = 0, - _D3DKMT_MEMORY_SEGMENT_GROUP_NON_LOCAL = 1 -@@ -1407,6 +1457,8 @@ struct d3dkmt_shareobjectwithhost { - _IOWR(0x47, 0x09, struct d3dkmt_queryadapterinfo) - #define LX_DXQUERYVIDEOMEMORYINFO \ - _IOWR(0x47, 0x0a, struct d3dkmt_queryvideomemoryinfo) -+#define LX_DXMAKERESIDENT \ -+ _IOWR(0x47, 0x0b, struct d3dddi_makeresident) - #define LX_DXESCAPE \ - _IOWR(0x47, 0x0d, struct d3dkmt_escape) - #define LX_DXGETDEVICESTATE \ -@@ -1437,6 +1489,8 @@ struct d3dkmt_shareobjectwithhost { - _IOWR(0x47, 0x19, struct d3dkmt_destroydevice) - #define LX_DXDESTROYSYNCHRONIZATIONOBJECT \ - _IOWR(0x47, 0x1d, struct d3dkmt_destroysynchronizationobject) -+#define LX_DXEVICT \ -+ _IOWR(0x47, 0x1e, struct d3dkmt_evict) - #define LX_DXFLUSHHEAPTRANSITIONS \ - _IOWR(0x47, 0x1f, struct d3dkmt_flushheaptransitions) - #define LX_DXGETCONTEXTINPROCESSSCHEDULINGPRIORITY \ --- -Armbian - diff --git a/patch/kernel/archive/wsl2-arm64-6.6/1694-drivers-hv-dxgkrnl-Manage-compute-device-virtual-addresses.patch b/patch/kernel/archive/wsl2-arm64-6.6/1694-drivers-hv-dxgkrnl-Manage-compute-device-virtual-addresses.patch deleted file mode 100644 index 66ab6b6a7527..000000000000 --- a/patch/kernel/archive/wsl2-arm64-6.6/1694-drivers-hv-dxgkrnl-Manage-compute-device-virtual-addresses.patch +++ /dev/null @@ -1,703 +0,0 @@ -From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001 -From: Iouri Tarassov -Date: Fri, 14 Jan 2022 17:13:04 -0800 -Subject: drivers: hv: dxgkrnl: Manage compute device virtual addresses - -Implement ioctls to manage compute device virtual addresses (VA): - - LX_DXRESERVEGPUVIRTUALADDRESS, - - LX_DXFREEGPUVIRTUALADDRESS, - - LX_DXMAPGPUVIRTUALADDRESS, - - LX_DXUPDATEGPUVIRTUALADDRESS. - -Compute devices access memory by using virtual addressses. -Each process has a dedicated VA space. The video memory manager -on the host is responsible with updating device page tables -before submitting a DMA buffer for execution. - -The LX_DXRESERVEGPUVIRTUALADDRESS ioctl reserves a portion of the -process compute device VA space. - -The LX_DXMAPGPUVIRTUALADDRESS ioctl reserves a portion of the process -compute device VA space and maps it to the given compute device -allocation. - -The LX_DXFREEGPUVIRTUALADDRESS frees the previously reserved portion -of the compute device VA space. - -The LX_DXUPDATEGPUVIRTUALADDRESS ioctl adds operations to modify the -compute device VA space to a compute device execution context. It -allows the operations to be queued and synchronized with execution -of other compute device DMA buffers.. - -Signed-off-by: Iouri Tarassov -[kms: Forward port to v6.1] -Signed-off-by: Kelsey Steele ---- - drivers/hv/dxgkrnl/dxgkrnl.h | 10 + - drivers/hv/dxgkrnl/dxgvmbus.c | 150 ++++++ - drivers/hv/dxgkrnl/dxgvmbus.h | 38 ++ - drivers/hv/dxgkrnl/ioctl.c | 228 +++++++++- - include/uapi/misc/d3dkmthk.h | 126 +++++ - 5 files changed, 548 insertions(+), 4 deletions(-) - -diff --git a/drivers/hv/dxgkrnl/dxgkrnl.h b/drivers/hv/dxgkrnl/dxgkrnl.h -index 111111111111..222222222222 100644 ---- a/drivers/hv/dxgkrnl/dxgkrnl.h -+++ b/drivers/hv/dxgkrnl/dxgkrnl.h -@@ -817,6 +817,16 @@ int dxgvmb_send_evict(struct dxgprocess *pr, struct dxgadapter *adapter, - int dxgvmb_send_submit_command(struct dxgprocess *pr, - struct dxgadapter *adapter, - struct d3dkmt_submitcommand *args); -+int dxgvmb_send_map_gpu_va(struct dxgprocess *pr, struct d3dkmthandle h, -+ struct dxgadapter *adapter, -+ struct d3dddi_mapgpuvirtualaddress *args); -+int dxgvmb_send_reserve_gpu_va(struct dxgprocess *pr, -+ struct dxgadapter *adapter, -+ struct d3dddi_reservegpuvirtualaddress *args); -+int dxgvmb_send_free_gpu_va(struct dxgprocess *pr, struct dxgadapter *adapter, -+ struct d3dkmt_freegpuvirtualaddress *args); -+int dxgvmb_send_update_gpu_va(struct dxgprocess *pr, struct dxgadapter *adapter, -+ struct d3dkmt_updategpuvirtualaddress *args); - int dxgvmb_send_create_sync_object(struct dxgprocess *pr, - struct dxgadapter *adapter, - struct d3dkmt_createsynchronizationobject2 -diff --git a/drivers/hv/dxgkrnl/dxgvmbus.c b/drivers/hv/dxgkrnl/dxgvmbus.c -index 111111111111..222222222222 100644 ---- a/drivers/hv/dxgkrnl/dxgvmbus.c -+++ b/drivers/hv/dxgkrnl/dxgvmbus.c -@@ -2432,6 +2432,156 @@ int dxgvmb_send_submit_command(struct dxgprocess *process, - return ret; - } - -+int dxgvmb_send_map_gpu_va(struct dxgprocess *process, -+ struct d3dkmthandle device, -+ struct dxgadapter *adapter, -+ struct d3dddi_mapgpuvirtualaddress *args) -+{ -+ struct dxgkvmb_command_mapgpuvirtualaddress *command; -+ struct dxgkvmb_command_mapgpuvirtualaddress_return result; -+ int ret; -+ struct dxgvmbusmsg msg = {.hdr = NULL}; -+ -+ ret = init_message(&msg, adapter, process, sizeof(*command)); -+ if (ret) -+ goto cleanup; -+ command = (void *)msg.msg; -+ -+ command_vgpu_to_host_init2(&command->hdr, -+ DXGK_VMBCOMMAND_MAPGPUVIRTUALADDRESS, -+ process->host_handle); -+ command->args = *args; -+ command->device = device; -+ -+ ret = dxgvmb_send_sync_msg(msg.channel, msg.hdr, msg.size, &result, -+ sizeof(result)); -+ if (ret < 0) -+ goto cleanup; -+ args->virtual_address = result.virtual_address; -+ args->paging_fence_value = result.paging_fence_value; -+ ret = ntstatus2int(result.status); -+ -+cleanup: -+ -+ free_message(&msg, process); -+ if (ret) -+ DXG_TRACE("err: %d", ret); -+ return ret; -+} -+ -+int dxgvmb_send_reserve_gpu_va(struct dxgprocess *process, -+ struct dxgadapter *adapter, -+ struct d3dddi_reservegpuvirtualaddress *args) -+{ -+ struct dxgkvmb_command_reservegpuvirtualaddress *command; -+ struct dxgkvmb_command_reservegpuvirtualaddress_return result; -+ int ret; -+ struct dxgvmbusmsg msg = {.hdr = NULL}; -+ -+ ret = init_message(&msg, adapter, process, sizeof(*command)); -+ if (ret) -+ goto cleanup; -+ command = (void *)msg.msg; -+ -+ command_vgpu_to_host_init2(&command->hdr, -+ DXGK_VMBCOMMAND_RESERVEGPUVIRTUALADDRESS, -+ process->host_handle); -+ command->args = *args; -+ -+ ret = dxgvmb_send_sync_msg(msg.channel, msg.hdr, msg.size, &result, -+ sizeof(result)); -+ args->virtual_address = result.virtual_address; -+ -+cleanup: -+ free_message(&msg, process); -+ if (ret) -+ DXG_TRACE("err: %d", ret); -+ return ret; -+} -+ -+int dxgvmb_send_free_gpu_va(struct dxgprocess *process, -+ struct dxgadapter *adapter, -+ struct d3dkmt_freegpuvirtualaddress *args) -+{ -+ struct dxgkvmb_command_freegpuvirtualaddress *command; -+ int ret; -+ struct dxgvmbusmsg msg = {.hdr = NULL}; -+ -+ ret = init_message(&msg, adapter, process, sizeof(*command)); -+ if (ret) -+ goto cleanup; -+ command = (void *)msg.msg; -+ -+ command_vgpu_to_host_init2(&command->hdr, -+ DXGK_VMBCOMMAND_FREEGPUVIRTUALADDRESS, -+ process->host_handle); -+ command->args = *args; -+ -+ ret = dxgvmb_send_sync_msg_ntstatus(msg.channel, msg.hdr, msg.size); -+ -+cleanup: -+ free_message(&msg, process); -+ if (ret) -+ DXG_TRACE("err: %d", ret); -+ return ret; -+} -+ -+int dxgvmb_send_update_gpu_va(struct dxgprocess *process, -+ struct dxgadapter *adapter, -+ struct d3dkmt_updategpuvirtualaddress *args) -+{ -+ struct dxgkvmb_command_updategpuvirtualaddress *command; -+ u32 cmd_size; -+ u32 op_size; -+ int ret; -+ struct dxgvmbusmsg msg = {.hdr = NULL}; -+ -+ if (args->num_operations == 0 || -+ (DXG_MAX_VM_BUS_PACKET_SIZE / -+ sizeof(struct d3dddi_updategpuvirtualaddress_operation)) < -+ args->num_operations) { -+ ret = -EINVAL; -+ DXG_ERR("Invalid number of operations: %d", -+ args->num_operations); -+ goto cleanup; -+ } -+ -+ op_size = args->num_operations * -+ sizeof(struct d3dddi_updategpuvirtualaddress_operation); -+ cmd_size = sizeof(struct dxgkvmb_command_updategpuvirtualaddress) + -+ op_size - sizeof(args->operations[0]); -+ -+ ret = init_message(&msg, adapter, process, cmd_size); -+ if (ret) -+ goto cleanup; -+ command = (void *)msg.msg; -+ -+ command_vgpu_to_host_init2(&command->hdr, -+ DXGK_VMBCOMMAND_UPDATEGPUVIRTUALADDRESS, -+ process->host_handle); -+ command->fence_value = args->fence_value; -+ command->device = args->device; -+ command->context = args->context; -+ command->fence_object = args->fence_object; -+ command->num_operations = args->num_operations; -+ command->flags = args->flags.value; -+ ret = copy_from_user(command->operations, args->operations, -+ op_size); -+ if (ret) { -+ DXG_ERR("failed to copy operations"); -+ ret = -EINVAL; -+ goto cleanup; -+ } -+ -+ ret = dxgvmb_send_sync_msg_ntstatus(msg.channel, msg.hdr, msg.size); -+ -+cleanup: -+ free_message(&msg, process); -+ if (ret) -+ DXG_TRACE("err: %d", ret); -+ return ret; -+} -+ - static void set_result(struct d3dkmt_createsynchronizationobject2 *args, - u64 fence_gpu_va, u8 *va) - { -diff --git a/drivers/hv/dxgkrnl/dxgvmbus.h b/drivers/hv/dxgkrnl/dxgvmbus.h -index 111111111111..222222222222 100644 ---- a/drivers/hv/dxgkrnl/dxgvmbus.h -+++ b/drivers/hv/dxgkrnl/dxgvmbus.h -@@ -418,6 +418,44 @@ struct dxgkvmb_command_flushheaptransitions { - struct dxgkvmb_command_vgpu_to_host hdr; - }; - -+struct dxgkvmb_command_freegpuvirtualaddress { -+ struct dxgkvmb_command_vgpu_to_host hdr; -+ struct d3dkmt_freegpuvirtualaddress args; -+}; -+ -+struct dxgkvmb_command_mapgpuvirtualaddress { -+ struct dxgkvmb_command_vgpu_to_host hdr; -+ struct d3dddi_mapgpuvirtualaddress args; -+ struct d3dkmthandle device; -+}; -+ -+struct dxgkvmb_command_mapgpuvirtualaddress_return { -+ u64 virtual_address; -+ u64 paging_fence_value; -+ struct ntstatus status; -+}; -+ -+struct dxgkvmb_command_reservegpuvirtualaddress { -+ struct dxgkvmb_command_vgpu_to_host hdr; -+ struct d3dddi_reservegpuvirtualaddress args; -+}; -+ -+struct dxgkvmb_command_reservegpuvirtualaddress_return { -+ u64 virtual_address; -+ u64 paging_fence_value; -+}; -+ -+struct dxgkvmb_command_updategpuvirtualaddress { -+ struct dxgkvmb_command_vgpu_to_host hdr; -+ u64 fence_value; -+ struct d3dkmthandle device; -+ struct d3dkmthandle context; -+ struct d3dkmthandle fence_object; -+ u32 num_operations; -+ u32 flags; -+ struct d3dddi_updategpuvirtualaddress_operation operations[1]; -+}; -+ - struct dxgkvmb_command_queryclockcalibration { - struct dxgkvmb_command_vgpu_to_host hdr; - struct d3dkmt_queryclockcalibration args; -diff --git a/drivers/hv/dxgkrnl/ioctl.c b/drivers/hv/dxgkrnl/ioctl.c -index 111111111111..222222222222 100644 ---- a/drivers/hv/dxgkrnl/ioctl.c -+++ b/drivers/hv/dxgkrnl/ioctl.c -@@ -2492,6 +2492,226 @@ dxgkio_submit_wait_to_hwqueue(struct dxgprocess *process, void *__user inargs) - return ret; - } - -+static int -+dxgkio_map_gpu_va(struct dxgprocess *process, void *__user inargs) -+{ -+ int ret, ret2; -+ struct d3dddi_mapgpuvirtualaddress args; -+ struct d3dddi_mapgpuvirtualaddress *input = inargs; -+ struct dxgdevice *device = NULL; -+ struct dxgadapter *adapter = NULL; -+ -+ ret = copy_from_user(&args, inargs, sizeof(args)); -+ if (ret) { -+ DXG_ERR("failed to copy input args"); -+ ret = -EINVAL; -+ goto cleanup; -+ } -+ -+ device = dxgprocess_device_by_object_handle(process, -+ HMGRENTRY_TYPE_DXGPAGINGQUEUE, -+ args.paging_queue); -+ if (device == NULL) { -+ ret = -EINVAL; -+ goto cleanup; -+ } -+ -+ adapter = device->adapter; -+ ret = dxgadapter_acquire_lock_shared(adapter); -+ if (ret < 0) { -+ adapter = NULL; -+ goto cleanup; -+ } -+ -+ ret = dxgvmb_send_map_gpu_va(process, zerohandle, adapter, &args); -+ if (ret < 0) -+ goto cleanup; -+ /* STATUS_PENING is a success code > 0. It is returned to user mode */ -+ if (!(ret == STATUS_PENDING || ret == 0)) { -+ DXG_ERR("Unexpected error %x", ret); -+ goto cleanup; -+ } -+ -+ ret2 = copy_to_user(&input->paging_fence_value, -+ &args.paging_fence_value, sizeof(u64)); -+ if (ret2) { -+ DXG_ERR("failed to copy paging fence to user"); -+ ret = -EINVAL; -+ goto cleanup; -+ } -+ -+ ret2 = copy_to_user(&input->virtual_address, &args.virtual_address, -+ sizeof(args.virtual_address)); -+ if (ret2) { -+ DXG_ERR("failed to copy va to user"); -+ ret = -EINVAL; -+ goto cleanup; -+ } -+ -+cleanup: -+ -+ if (adapter) -+ dxgadapter_release_lock_shared(adapter); -+ if (device) -+ kref_put(&device->device_kref, dxgdevice_release); -+ -+ DXG_TRACE("ioctl:%s %d", errorstr(ret), ret); -+ return ret; -+} -+ -+static int -+dxgkio_reserve_gpu_va(struct dxgprocess *process, void *__user inargs) -+{ -+ int ret; -+ struct d3dddi_reservegpuvirtualaddress args; -+ struct d3dddi_reservegpuvirtualaddress *input = inargs; -+ struct dxgadapter *adapter = NULL; -+ struct dxgdevice *device = NULL; -+ -+ ret = copy_from_user(&args, inargs, sizeof(args)); -+ if (ret) { -+ DXG_ERR("failed to copy input args"); -+ ret = -EINVAL; -+ goto cleanup; -+ } -+ -+ adapter = dxgprocess_adapter_by_handle(process, args.adapter); -+ if (adapter == NULL) { -+ device = dxgprocess_device_by_object_handle(process, -+ HMGRENTRY_TYPE_DXGPAGINGQUEUE, -+ args.adapter); -+ if (device == NULL) { -+ DXG_ERR("invalid adapter or paging queue: 0x%x", -+ args.adapter.v); -+ ret = -EINVAL; -+ goto cleanup; -+ } -+ adapter = device->adapter; -+ kref_get(&adapter->adapter_kref); -+ kref_put(&device->device_kref, dxgdevice_release); -+ } else { -+ args.adapter = adapter->host_handle; -+ } -+ -+ ret = dxgadapter_acquire_lock_shared(adapter); -+ if (ret < 0) { -+ kref_put(&adapter->adapter_kref, dxgadapter_release); -+ adapter = NULL; -+ goto cleanup; -+ } -+ -+ ret = dxgvmb_send_reserve_gpu_va(process, adapter, &args); -+ if (ret < 0) -+ goto cleanup; -+ -+ ret = copy_to_user(&input->virtual_address, &args.virtual_address, -+ sizeof(args.virtual_address)); -+ if (ret) { -+ DXG_ERR("failed to copy VA to user"); -+ ret = -EINVAL; -+ } -+ -+cleanup: -+ -+ if (adapter) { -+ dxgadapter_release_lock_shared(adapter); -+ kref_put(&adapter->adapter_kref, dxgadapter_release); -+ } -+ -+ DXG_TRACE("ioctl:%s %d", errorstr(ret), ret); -+ return ret; -+} -+ -+static int -+dxgkio_free_gpu_va(struct dxgprocess *process, void *__user inargs) -+{ -+ int ret; -+ struct d3dkmt_freegpuvirtualaddress args; -+ struct dxgadapter *adapter = NULL; -+ -+ ret = copy_from_user(&args, inargs, sizeof(args)); -+ if (ret) { -+ DXG_ERR("failed to copy input args"); -+ ret = -EINVAL; -+ goto cleanup; -+ } -+ -+ adapter = dxgprocess_adapter_by_handle(process, args.adapter); -+ if (adapter == NULL) { -+ ret = -EINVAL; -+ goto cleanup; -+ } -+ -+ ret = dxgadapter_acquire_lock_shared(adapter); -+ if (ret < 0) { -+ kref_put(&adapter->adapter_kref, dxgadapter_release); -+ adapter = NULL; -+ goto cleanup; -+ } -+ -+ args.adapter = adapter->host_handle; -+ ret = dxgvmb_send_free_gpu_va(process, adapter, &args); -+ -+cleanup: -+ -+ if (adapter) { -+ dxgadapter_release_lock_shared(adapter); -+ kref_put(&adapter->adapter_kref, dxgadapter_release); -+ } -+ -+ return ret; -+} -+ -+static int -+dxgkio_update_gpu_va(struct dxgprocess *process, void *__user inargs) -+{ -+ int ret; -+ struct d3dkmt_updategpuvirtualaddress args; -+ struct d3dkmt_updategpuvirtualaddress *input = inargs; -+ struct dxgadapter *adapter = NULL; -+ struct dxgdevice *device = NULL; -+ -+ ret = copy_from_user(&args, inargs, sizeof(args)); -+ if (ret) { -+ DXG_ERR("failed to copy input args"); -+ ret = -EINVAL; -+ goto cleanup; -+ } -+ -+ device = dxgprocess_device_by_handle(process, args.device); -+ if (device == NULL) { -+ ret = -EINVAL; -+ goto cleanup; -+ } -+ -+ adapter = device->adapter; -+ ret = dxgadapter_acquire_lock_shared(adapter); -+ if (ret < 0) { -+ adapter = NULL; -+ goto cleanup; -+ } -+ -+ ret = dxgvmb_send_update_gpu_va(process, adapter, &args); -+ if (ret < 0) -+ goto cleanup; -+ -+ ret = copy_to_user(&input->fence_value, &args.fence_value, -+ sizeof(args.fence_value)); -+ if (ret) { -+ DXG_ERR("failed to copy fence value to user"); -+ ret = -EINVAL; -+ } -+ -+cleanup: -+ -+ if (adapter) -+ dxgadapter_release_lock_shared(adapter); -+ if (device) -+ kref_put(&device->device_kref, dxgdevice_release); -+ -+ return ret; -+} -+ - static int - dxgkio_create_sync_object(struct dxgprocess *process, void *__user inargs) - { -@@ -4931,11 +5151,11 @@ static struct ioctl_desc ioctls[] = { - /* 0x05 */ {dxgkio_destroy_context, LX_DXDESTROYCONTEXT}, - /* 0x06 */ {dxgkio_create_allocation, LX_DXCREATEALLOCATION}, - /* 0x07 */ {dxgkio_create_paging_queue, LX_DXCREATEPAGINGQUEUE}, --/* 0x08 */ {}, -+/* 0x08 */ {dxgkio_reserve_gpu_va, LX_DXRESERVEGPUVIRTUALADDRESS}, - /* 0x09 */ {dxgkio_query_adapter_info, LX_DXQUERYADAPTERINFO}, - /* 0x0a */ {dxgkio_query_vidmem_info, LX_DXQUERYVIDEOMEMORYINFO}, - /* 0x0b */ {dxgkio_make_resident, LX_DXMAKERESIDENT}, --/* 0x0c */ {}, -+/* 0x0c */ {dxgkio_map_gpu_va, LX_DXMAPGPUVIRTUALADDRESS}, - /* 0x0d */ {dxgkio_escape, LX_DXESCAPE}, - /* 0x0e */ {dxgkio_get_device_state, LX_DXGETDEVICESTATE}, - /* 0x0f */ {dxgkio_submit_command, LX_DXSUBMITCOMMAND}, -@@ -4956,7 +5176,7 @@ static struct ioctl_desc ioctls[] = { - /* 0x1d */ {dxgkio_destroy_sync_object, LX_DXDESTROYSYNCHRONIZATIONOBJECT}, - /* 0x1e */ {dxgkio_evict, LX_DXEVICT}, - /* 0x1f */ {dxgkio_flush_heap_transitions, LX_DXFLUSHHEAPTRANSITIONS}, --/* 0x20 */ {}, -+/* 0x20 */ {dxgkio_free_gpu_va, LX_DXFREEGPUVIRTUALADDRESS}, - /* 0x21 */ {dxgkio_get_context_process_scheduling_priority, - LX_DXGETCONTEXTINPROCESSSCHEDULINGPRIORITY}, - /* 0x22 */ {dxgkio_get_context_scheduling_priority, -@@ -4990,7 +5210,7 @@ static struct ioctl_desc ioctls[] = { - LX_DXSUBMITWAITFORSYNCOBJECTSTOHWQUEUE}, - /* 0x37 */ {dxgkio_unlock2, LX_DXUNLOCK2}, - /* 0x38 */ {dxgkio_update_alloc_property, LX_DXUPDATEALLOCPROPERTY}, --/* 0x39 */ {}, -+/* 0x39 */ {dxgkio_update_gpu_va, LX_DXUPDATEGPUVIRTUALADDRESS}, - /* 0x3a */ {dxgkio_wait_sync_object_cpu, - LX_DXWAITFORSYNCHRONIZATIONOBJECTFROMCPU}, - /* 0x3b */ {dxgkio_wait_sync_object_gpu, -diff --git a/include/uapi/misc/d3dkmthk.h b/include/uapi/misc/d3dkmthk.h -index 111111111111..222222222222 100644 ---- a/include/uapi/misc/d3dkmthk.h -+++ b/include/uapi/misc/d3dkmthk.h -@@ -1012,6 +1012,124 @@ struct d3dkmt_evict { - __u64 num_bytes_to_trim; - }; - -+struct d3dddigpuva_protection_type { -+ union { -+ struct { -+ __u64 write:1; -+ __u64 execute:1; -+ __u64 zero:1; -+ __u64 no_access:1; -+ __u64 system_use_only:1; -+ __u64 reserved:59; -+ }; -+ __u64 value; -+ }; -+}; -+ -+enum d3dddi_updategpuvirtualaddress_operation_type { -+ _D3DDDI_UPDATEGPUVIRTUALADDRESS_MAP = 0, -+ _D3DDDI_UPDATEGPUVIRTUALADDRESS_UNMAP = 1, -+ _D3DDDI_UPDATEGPUVIRTUALADDRESS_COPY = 2, -+ _D3DDDI_UPDATEGPUVIRTUALADDRESS_MAP_PROTECT = 3, -+}; -+ -+struct d3dddi_updategpuvirtualaddress_operation { -+ enum d3dddi_updategpuvirtualaddress_operation_type operation; -+ union { -+ struct { -+ __u64 base_address; -+ __u64 size; -+ struct d3dkmthandle allocation; -+ __u64 allocation_offset; -+ __u64 allocation_size; -+ } map; -+ struct { -+ __u64 base_address; -+ __u64 size; -+ struct d3dkmthandle allocation; -+ __u64 allocation_offset; -+ __u64 allocation_size; -+ struct d3dddigpuva_protection_type protection; -+ __u64 driver_protection; -+ } map_protect; -+ struct { -+ __u64 base_address; -+ __u64 size; -+ struct d3dddigpuva_protection_type protection; -+ } unmap; -+ struct { -+ __u64 source_address; -+ __u64 size; -+ __u64 dest_address; -+ } copy; -+ }; -+}; -+ -+enum d3dddigpuva_reservation_type { -+ _D3DDDIGPUVA_RESERVE_NO_ACCESS = 0, -+ _D3DDDIGPUVA_RESERVE_ZERO = 1, -+ _D3DDDIGPUVA_RESERVE_NO_COMMIT = 2 -+}; -+ -+struct d3dkmt_updategpuvirtualaddress { -+ struct d3dkmthandle device; -+ struct d3dkmthandle context; -+ struct d3dkmthandle fence_object; -+ __u32 num_operations; -+#ifdef __KERNEL__ -+ struct d3dddi_updategpuvirtualaddress_operation *operations; -+#else -+ __u64 operations; -+#endif -+ __u32 reserved0; -+ __u32 reserved1; -+ __u64 reserved2; -+ __u64 fence_value; -+ union { -+ struct { -+ __u32 do_not_wait:1; -+ __u32 reserved:31; -+ }; -+ __u32 value; -+ } flags; -+ __u32 reserved3; -+}; -+ -+struct d3dddi_mapgpuvirtualaddress { -+ struct d3dkmthandle paging_queue; -+ __u64 base_address; -+ __u64 minimum_address; -+ __u64 maximum_address; -+ struct d3dkmthandle allocation; -+ __u64 offset_in_pages; -+ __u64 size_in_pages; -+ struct d3dddigpuva_protection_type protection; -+ __u64 driver_protection; -+ __u32 reserved0; -+ __u64 reserved1; -+ __u64 virtual_address; -+ __u64 paging_fence_value; -+}; -+ -+struct d3dddi_reservegpuvirtualaddress { -+ struct d3dkmthandle adapter; -+ __u64 base_address; -+ __u64 minimum_address; -+ __u64 maximum_address; -+ __u64 size; -+ enum d3dddigpuva_reservation_type reservation_type; -+ __u64 driver_protection; -+ __u64 virtual_address; -+ __u64 paging_fence_value; -+}; -+ -+struct d3dkmt_freegpuvirtualaddress { -+ struct d3dkmthandle adapter; -+ __u32 reserved; -+ __u64 base_address; -+ __u64 size; -+}; -+ - enum d3dkmt_memory_segment_group { - _D3DKMT_MEMORY_SEGMENT_GROUP_LOCAL = 0, - _D3DKMT_MEMORY_SEGMENT_GROUP_NON_LOCAL = 1 -@@ -1453,12 +1571,16 @@ struct d3dkmt_shareobjectwithhost { - _IOWR(0x47, 0x06, struct d3dkmt_createallocation) - #define LX_DXCREATEPAGINGQUEUE \ - _IOWR(0x47, 0x07, struct d3dkmt_createpagingqueue) -+#define LX_DXRESERVEGPUVIRTUALADDRESS \ -+ _IOWR(0x47, 0x08, struct d3dddi_reservegpuvirtualaddress) - #define LX_DXQUERYADAPTERINFO \ - _IOWR(0x47, 0x09, struct d3dkmt_queryadapterinfo) - #define LX_DXQUERYVIDEOMEMORYINFO \ - _IOWR(0x47, 0x0a, struct d3dkmt_queryvideomemoryinfo) - #define LX_DXMAKERESIDENT \ - _IOWR(0x47, 0x0b, struct d3dddi_makeresident) -+#define LX_DXMAPGPUVIRTUALADDRESS \ -+ _IOWR(0x47, 0x0c, struct d3dddi_mapgpuvirtualaddress) - #define LX_DXESCAPE \ - _IOWR(0x47, 0x0d, struct d3dkmt_escape) - #define LX_DXGETDEVICESTATE \ -@@ -1493,6 +1615,8 @@ struct d3dkmt_shareobjectwithhost { - _IOWR(0x47, 0x1e, struct d3dkmt_evict) - #define LX_DXFLUSHHEAPTRANSITIONS \ - _IOWR(0x47, 0x1f, struct d3dkmt_flushheaptransitions) -+#define LX_DXFREEGPUVIRTUALADDRESS \ -+ _IOWR(0x47, 0x20, struct d3dkmt_freegpuvirtualaddress) - #define LX_DXGETCONTEXTINPROCESSSCHEDULINGPRIORITY \ - _IOWR(0x47, 0x21, struct d3dkmt_getcontextinprocessschedulingpriority) - #define LX_DXGETCONTEXTSCHEDULINGPRIORITY \ -@@ -1529,6 +1653,8 @@ struct d3dkmt_shareobjectwithhost { - _IOWR(0x47, 0x37, struct d3dkmt_unlock2) - #define LX_DXUPDATEALLOCPROPERTY \ - _IOWR(0x47, 0x38, struct d3dddi_updateallocproperty) -+#define LX_DXUPDATEGPUVIRTUALADDRESS \ -+ _IOWR(0x47, 0x39, struct d3dkmt_updategpuvirtualaddress) - #define LX_DXWAITFORSYNCHRONIZATIONOBJECTFROMCPU \ - _IOWR(0x47, 0x3a, struct d3dkmt_waitforsynchronizationobjectfromcpu) - #define LX_DXWAITFORSYNCHRONIZATIONOBJECTFROMGPU \ --- -Armbian - diff --git a/patch/kernel/archive/wsl2-arm64-6.6/1695-drivers-hv-dxgkrnl-Add-support-to-map-guest-pages-by-host.patch b/patch/kernel/archive/wsl2-arm64-6.6/1695-drivers-hv-dxgkrnl-Add-support-to-map-guest-pages-by-host.patch deleted file mode 100644 index 767630abc3c9..000000000000 --- a/patch/kernel/archive/wsl2-arm64-6.6/1695-drivers-hv-dxgkrnl-Add-support-to-map-guest-pages-by-host.patch +++ /dev/null @@ -1,313 +0,0 @@ -From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001 -From: Iouri Tarassov -Date: Fri, 8 Oct 2021 14:17:39 -0700 -Subject: drivers: hv: dxgkrnl: Add support to map guest pages by host - -Implement support for mapping guest memory pages by the host. -This removes hyper-v limitations of using GPADL (guest physical -address list). - -Dxgkrnl uses hyper-v GPADLs to share guest system memory with the -host. This method has limitations: -- a single GPADL can represent only ~32MB of memory -- there is a limit of how much memory the total size of GPADLs - in a VM can represent. -To avoid these limitations the host implemented mapping guest memory -pages. Presence of this support is determined by reading PCI config -space. When the support is enabled, dxgkrnl does not use GPADLs and -instead uses the following code flow: -- memory pages of an existing system memory buffer are pinned -- PFNs of the pages are sent to the host via a VM bus message -- the host maps the PFNs to get access to the memory - -Signed-off-by: Iouri Tarassov -[kms: Forward port to v6.1] -Signed-off-by: Kelsey Steele ---- - drivers/hv/dxgkrnl/Makefile | 2 +- - drivers/hv/dxgkrnl/dxgkrnl.h | 1 + - drivers/hv/dxgkrnl/dxgmodule.c | 33 ++- - drivers/hv/dxgkrnl/dxgvmbus.c | 117 +++++++--- - drivers/hv/dxgkrnl/dxgvmbus.h | 10 + - drivers/hv/dxgkrnl/misc.c | 1 + - 6 files changed, 129 insertions(+), 35 deletions(-) - -diff --git a/drivers/hv/dxgkrnl/Makefile b/drivers/hv/dxgkrnl/Makefile -index 111111111111..222222222222 100644 ---- a/drivers/hv/dxgkrnl/Makefile -+++ b/drivers/hv/dxgkrnl/Makefile -@@ -2,4 +2,4 @@ - # Makefile for the hyper-v compute device driver (dxgkrnl). - - obj-$(CONFIG_DXGKRNL) += dxgkrnl.o --dxgkrnl-y := dxgmodule.o hmgr.o misc.o dxgadapter.o ioctl.o dxgvmbus.o dxgprocess.o -+dxgkrnl-y := dxgmodule.o hmgr.o misc.o dxgadapter.o ioctl.o dxgvmbus.o dxgprocess.o -diff --git a/drivers/hv/dxgkrnl/dxgkrnl.h b/drivers/hv/dxgkrnl/dxgkrnl.h -index 111111111111..222222222222 100644 ---- a/drivers/hv/dxgkrnl/dxgkrnl.h -+++ b/drivers/hv/dxgkrnl/dxgkrnl.h -@@ -316,6 +316,7 @@ struct dxgglobal { - bool misc_registered; - bool pci_registered; - bool vmbus_registered; -+ bool map_guest_pages_enabled; - }; - - static inline struct dxgglobal *dxggbl(void) -diff --git a/drivers/hv/dxgkrnl/dxgmodule.c b/drivers/hv/dxgkrnl/dxgmodule.c -index 111111111111..222222222222 100644 ---- a/drivers/hv/dxgkrnl/dxgmodule.c -+++ b/drivers/hv/dxgkrnl/dxgmodule.c -@@ -147,7 +147,7 @@ void dxgglobal_remove_host_event(struct dxghostevent *event) - - void signal_host_cpu_event(struct dxghostevent *eventhdr) - { -- struct dxghosteventcpu *event = (struct dxghosteventcpu *)eventhdr; -+ struct dxghosteventcpu *event = (struct dxghosteventcpu *)eventhdr; - - if (event->remove_from_list || - event->destroy_after_signal) { -@@ -426,7 +426,11 @@ const struct file_operations dxgk_fops = { - #define DXGK_VMBUS_VGPU_LUID_OFFSET (DXGK_VMBUS_VERSION_OFFSET + \ - sizeof(u32)) - --/* The guest writes its capabilities to this address */ -+/* The host caps (dxgk_vmbus_hostcaps) */ -+#define DXGK_VMBUS_HOSTCAPS_OFFSET (DXGK_VMBUS_VGPU_LUID_OFFSET + \ -+ sizeof(struct winluid)) -+ -+/* The guest writes its capavilities to this adderss */ - #define DXGK_VMBUS_GUESTCAPS_OFFSET (DXGK_VMBUS_VERSION_OFFSET + \ - sizeof(u32)) - -@@ -441,6 +445,23 @@ struct dxgk_vmbus_guestcaps { - }; - }; - -+/* -+ * The structure defines features, supported by the host. -+ * -+ * map_guest_memory -+ * Host can map guest memory pages, so the guest can avoid using GPADLs -+ * to represent existing system memory allocations. -+ */ -+struct dxgk_vmbus_hostcaps { -+ union { -+ struct { -+ u32 map_guest_memory : 1; -+ u32 reserved : 31; -+ }; -+ u32 host_caps; -+ }; -+}; -+ - /* - * A helper function to read PCI config space. - */ -@@ -475,6 +496,7 @@ static int dxg_pci_probe_device(struct pci_dev *dev, - struct winluid vgpu_luid = {}; - struct dxgk_vmbus_guestcaps guest_caps = {.wsl2 = 1}; - struct dxgglobal *dxgglobal = dxggbl(); -+ struct dxgk_vmbus_hostcaps host_caps = {}; - - mutex_lock(&dxgglobal->device_mutex); - -@@ -503,6 +525,13 @@ static int dxg_pci_probe_device(struct pci_dev *dev, - if (ret) - goto cleanup; - -+ ret = pci_read_config_dword(dev, DXGK_VMBUS_HOSTCAPS_OFFSET, -+ &host_caps.host_caps); -+ if (ret == 0) { -+ if (host_caps.map_guest_memory) -+ dxgglobal->map_guest_pages_enabled = true; -+ } -+ - if (dxgglobal->vmbus_ver > DXGK_VMBUS_INTERFACE_VERSION) - dxgglobal->vmbus_ver = DXGK_VMBUS_INTERFACE_VERSION; - } -diff --git a/drivers/hv/dxgkrnl/dxgvmbus.c b/drivers/hv/dxgkrnl/dxgvmbus.c -index 111111111111..222222222222 100644 ---- a/drivers/hv/dxgkrnl/dxgvmbus.c -+++ b/drivers/hv/dxgkrnl/dxgvmbus.c -@@ -1383,15 +1383,19 @@ int create_existing_sysmem(struct dxgdevice *device, - void *kmem = NULL; - int ret = 0; - struct dxgkvmb_command_setexistingsysmemstore *set_store_command; -+ struct dxgkvmb_command_setexistingsysmempages *set_pages_command; - u64 alloc_size = host_alloc->allocation_size; - u32 npages = alloc_size >> PAGE_SHIFT; - struct dxgvmbusmsg msg = {.hdr = NULL}; -- -- ret = init_message(&msg, device->adapter, device->process, -- sizeof(*set_store_command)); -- if (ret) -- goto cleanup; -- set_store_command = (void *)msg.msg; -+ const u32 max_pfns_in_message = -+ (DXG_MAX_VM_BUS_PACKET_SIZE - sizeof(*set_pages_command) - -+ PAGE_SIZE) / sizeof(__u64); -+ u32 alloc_offset_in_pages = 0; -+ struct page **page_in; -+ u64 *pfn; -+ u32 pages_to_send; -+ u32 i; -+ struct dxgglobal *dxgglobal = dxggbl(); - - /* - * Create a guest physical address list and set it as the allocation -@@ -1402,6 +1406,7 @@ int create_existing_sysmem(struct dxgdevice *device, - DXG_TRACE("Alloc size: %lld", alloc_size); - - dxgalloc->cpu_address = (void *)sysmem; -+ - dxgalloc->pages = vzalloc(npages * sizeof(void *)); - if (dxgalloc->pages == NULL) { - DXG_ERR("failed to allocate pages"); -@@ -1419,39 +1424,87 @@ int create_existing_sysmem(struct dxgdevice *device, - ret = -ENOMEM; - goto cleanup; - } -- kmem = vmap(dxgalloc->pages, npages, VM_MAP, PAGE_KERNEL); -- if (kmem == NULL) { -- DXG_ERR("vmap failed"); -- ret = -ENOMEM; -- goto cleanup; -- } -- ret1 = vmbus_establish_gpadl(dxgglobal_get_vmbus(), kmem, -- alloc_size, &dxgalloc->gpadl); -- if (ret1) { -- DXG_ERR("establish_gpadl failed: %d", ret1); -- ret = -ENOMEM; -- goto cleanup; -- } -+ if (!dxgglobal->map_guest_pages_enabled) { -+ ret = init_message(&msg, device->adapter, device->process, -+ sizeof(*set_store_command)); -+ if (ret) -+ goto cleanup; -+ set_store_command = (void *)msg.msg; -+ -+ kmem = vmap(dxgalloc->pages, npages, VM_MAP, PAGE_KERNEL); -+ if (kmem == NULL) { -+ DXG_ERR("vmap failed"); -+ ret = -ENOMEM; -+ goto cleanup; -+ } -+ ret1 = vmbus_establish_gpadl(dxgglobal_get_vmbus(), kmem, -+ alloc_size, &dxgalloc->gpadl); -+ if (ret1) { -+ DXG_ERR("establish_gpadl failed: %d", ret1); -+ ret = -ENOMEM; -+ goto cleanup; -+ } - #ifdef _MAIN_KERNEL_ -- DXG_TRACE("New gpadl %d", dxgalloc->gpadl.gpadl_handle); -+ DXG_TRACE("New gpadl %d", dxgalloc->gpadl.gpadl_handle); - #else -- DXG_TRACE("New gpadl %d", dxgalloc->gpadl); -+ DXG_TRACE("New gpadl %d", dxgalloc->gpadl); - #endif - -- command_vgpu_to_host_init2(&set_store_command->hdr, -- DXGK_VMBCOMMAND_SETEXISTINGSYSMEMSTORE, -- device->process->host_handle); -- set_store_command->device = device->handle; -- set_store_command->device = device->handle; -- set_store_command->allocation = host_alloc->allocation; -+ command_vgpu_to_host_init2(&set_store_command->hdr, -+ DXGK_VMBCOMMAND_SETEXISTINGSYSMEMSTORE, -+ device->process->host_handle); -+ set_store_command->device = device->handle; -+ set_store_command->allocation = host_alloc->allocation; - #ifdef _MAIN_KERNEL_ -- set_store_command->gpadl = dxgalloc->gpadl.gpadl_handle; -+ set_store_command->gpadl = dxgalloc->gpadl.gpadl_handle; - #else -- set_store_command->gpadl = dxgalloc->gpadl; -+ set_store_command->gpadl = dxgalloc->gpadl; - #endif -- ret = dxgvmb_send_sync_msg_ntstatus(msg.channel, msg.hdr, msg.size); -- if (ret < 0) -- DXG_ERR("failed to set existing store: %x", ret); -+ ret = dxgvmb_send_sync_msg_ntstatus(msg.channel, msg.hdr, -+ msg.size); -+ if (ret < 0) -+ DXG_ERR("failed set existing store: %x", ret); -+ } else { -+ /* -+ * Send the list of the allocation PFNs to the host. The host -+ * will map the pages for GPU access. -+ */ -+ -+ ret = init_message(&msg, device->adapter, device->process, -+ sizeof(*set_pages_command) + -+ max_pfns_in_message * sizeof(u64)); -+ if (ret) -+ goto cleanup; -+ set_pages_command = (void *)msg.msg; -+ command_vgpu_to_host_init2(&set_pages_command->hdr, -+ DXGK_VMBCOMMAND_SETEXISTINGSYSMEMPAGES, -+ device->process->host_handle); -+ set_pages_command->device = device->handle; -+ set_pages_command->allocation = host_alloc->allocation; -+ -+ page_in = dxgalloc->pages; -+ while (alloc_offset_in_pages < npages) { -+ pfn = (u64 *)((char *)msg.msg + -+ sizeof(*set_pages_command)); -+ pages_to_send = min(npages - alloc_offset_in_pages, -+ max_pfns_in_message); -+ set_pages_command->num_pages = pages_to_send; -+ set_pages_command->alloc_offset_in_pages = -+ alloc_offset_in_pages; -+ -+ for (i = 0; i < pages_to_send; i++) -+ *pfn++ = page_to_pfn(*page_in++); -+ -+ ret = dxgvmb_send_sync_msg_ntstatus(msg.channel, -+ msg.hdr, -+ msg.size); -+ if (ret < 0) { -+ DXG_ERR("failed set existing pages: %x", ret); -+ break; -+ } -+ alloc_offset_in_pages += pages_to_send; -+ } -+ } - - cleanup: - if (kmem) -diff --git a/drivers/hv/dxgkrnl/dxgvmbus.h b/drivers/hv/dxgkrnl/dxgvmbus.h -index 111111111111..222222222222 100644 ---- a/drivers/hv/dxgkrnl/dxgvmbus.h -+++ b/drivers/hv/dxgkrnl/dxgvmbus.h -@@ -234,6 +234,16 @@ struct dxgkvmb_command_setexistingsysmemstore { - u32 gpadl; - }; - -+/* Returns ntstatus */ -+struct dxgkvmb_command_setexistingsysmempages { -+ struct dxgkvmb_command_vgpu_to_host hdr; -+ struct d3dkmthandle device; -+ struct d3dkmthandle allocation; -+ u32 num_pages; -+ u32 alloc_offset_in_pages; -+ /* u64 pfn_array[num_pages] */ -+}; -+ - struct dxgkvmb_command_createprocess { - struct dxgkvmb_command_vm_to_host hdr; - void *process; -diff --git a/drivers/hv/dxgkrnl/misc.c b/drivers/hv/dxgkrnl/misc.c -index 111111111111..222222222222 100644 ---- a/drivers/hv/dxgkrnl/misc.c -+++ b/drivers/hv/dxgkrnl/misc.c -@@ -35,3 +35,4 @@ u16 *wcsncpy(u16 *dest, const u16 *src, size_t n) - dest[i - 1] = 0; - return dest; - } -+ --- -Armbian - diff --git a/patch/kernel/archive/wsl2-arm64-6.6/1696-drivers-hv-dxgkrnl-Removed-struct-vmbus_gpadl-which-was-defined-in-the-main-linux-branch.patch b/patch/kernel/archive/wsl2-arm64-6.6/1696-drivers-hv-dxgkrnl-Removed-struct-vmbus_gpadl-which-was-defined-in-the-main-linux-branch.patch deleted file mode 100644 index f434a553a124..000000000000 --- a/patch/kernel/archive/wsl2-arm64-6.6/1696-drivers-hv-dxgkrnl-Removed-struct-vmbus_gpadl-which-was-defined-in-the-main-linux-branch.patch +++ /dev/null @@ -1,29 +0,0 @@ -From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001 -From: Iouri Tarassov -Date: Mon, 21 Mar 2022 20:32:44 -0700 -Subject: drivers: hv: dxgkrnl: Removed struct vmbus_gpadl, which was defined - in the main linux branch - -Signed-off-by: Iouri Tarassov -[kms: Forward port to v6.1] -Signed-off-by: Kelsey Steele ---- - drivers/hv/dxgkrnl/dxgadapter.c | 2 +- - 1 file changed, 1 insertion(+), 1 deletion(-) - -diff --git a/drivers/hv/dxgkrnl/dxgadapter.c b/drivers/hv/dxgkrnl/dxgadapter.c -index 111111111111..222222222222 100644 ---- a/drivers/hv/dxgkrnl/dxgadapter.c -+++ b/drivers/hv/dxgkrnl/dxgadapter.c -@@ -932,7 +932,7 @@ void dxgallocation_destroy(struct dxgallocation *alloc) - vmbus_teardown_gpadl(dxgglobal_get_vmbus(), &alloc->gpadl); - alloc->gpadl.gpadl_handle = 0; - } --else -+#else - if (alloc->gpadl) { - DXG_TRACE("Teardown gpadl %d", alloc->gpadl); - vmbus_teardown_gpadl(dxgglobal_get_vmbus(), alloc->gpadl); --- -Armbian - diff --git a/patch/kernel/archive/wsl2-arm64-6.6/1697-drivers-hv-dxgkrnl-Remove-dxgk_init_ioctls.patch b/patch/kernel/archive/wsl2-arm64-6.6/1697-drivers-hv-dxgkrnl-Remove-dxgk_init_ioctls.patch deleted file mode 100644 index a36ee3dedcf0..000000000000 --- a/patch/kernel/archive/wsl2-arm64-6.6/1697-drivers-hv-dxgkrnl-Remove-dxgk_init_ioctls.patch +++ /dev/null @@ -1,100 +0,0 @@ -From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001 -From: Iouri Tarassov -Date: Tue, 22 Mar 2022 10:32:54 -0700 -Subject: drivers: hv: dxgkrnl: Remove dxgk_init_ioctls - -The array of ioctls is initialized statically to remove the unnecessary -function. - -Signed-off-by: Iouri Tarassov -[kms: Forward port to v6.1] -Signed-off-by: Kelsey Steele ---- - drivers/hv/dxgkrnl/dxgmodule.c | 2 +- - drivers/hv/dxgkrnl/ioctl.c | 15 +++++----- - 2 files changed, 8 insertions(+), 9 deletions(-) - -diff --git a/drivers/hv/dxgkrnl/dxgmodule.c b/drivers/hv/dxgkrnl/dxgmodule.c -index 111111111111..222222222222 100644 ---- a/drivers/hv/dxgkrnl/dxgmodule.c -+++ b/drivers/hv/dxgkrnl/dxgmodule.c -@@ -300,7 +300,7 @@ static void dxgglobal_start_adapters(void) - } - - /* -- * Stopsthe active dxgadapter objects. -+ * Stop the active dxgadapter objects. - */ - static void dxgglobal_stop_adapters(void) - { -diff --git a/drivers/hv/dxgkrnl/ioctl.c b/drivers/hv/dxgkrnl/ioctl.c -index 111111111111..222222222222 100644 ---- a/drivers/hv/dxgkrnl/ioctl.c -+++ b/drivers/hv/dxgkrnl/ioctl.c -@@ -26,7 +26,6 @@ - struct ioctl_desc { - int (*ioctl_callback)(struct dxgprocess *p, void __user *arg); - u32 ioctl; -- u32 arg_size; - }; - - #ifdef DEBUG -@@ -91,7 +90,7 @@ static const struct file_operations dxg_resource_fops = { - }; - - static int dxgkio_open_adapter_from_luid(struct dxgprocess *process, -- void *__user inargs) -+ void *__user inargs) - { - struct d3dkmt_openadapterfromluid args; - int ret; -@@ -1002,7 +1001,7 @@ dxgkio_create_hwqueue(struct dxgprocess *process, void *__user inargs) - } - - static int dxgkio_destroy_hwqueue(struct dxgprocess *process, -- void *__user inargs) -+ void *__user inargs) - { - struct d3dkmt_destroyhwqueue args; - int ret; -@@ -2280,7 +2279,8 @@ dxgkio_submit_command(struct dxgprocess *process, void *__user inargs) - } - - static int --dxgkio_submit_command_to_hwqueue(struct dxgprocess *process, void *__user inargs) -+dxgkio_submit_command_to_hwqueue(struct dxgprocess *process, -+ void *__user inargs) - { - int ret; - struct d3dkmt_submitcommandtohwqueue args; -@@ -5087,8 +5087,7 @@ open_resource(struct dxgprocess *process, - } - - static int --dxgkio_open_resource_nt(struct dxgprocess *process, -- void *__user inargs) -+dxgkio_open_resource_nt(struct dxgprocess *process, void *__user inargs) - { - struct d3dkmt_openresourcefromnthandle args; - struct d3dkmt_openresourcefromnthandle *__user args_user = inargs; -@@ -5166,7 +5165,7 @@ static struct ioctl_desc ioctls[] = { - /* 0x14 */ {dxgkio_enum_adapters, LX_DXENUMADAPTERS2}, - /* 0x15 */ {dxgkio_close_adapter, LX_DXCLOSEADAPTER}, - /* 0x16 */ {dxgkio_change_vidmem_reservation, -- LX_DXCHANGEVIDEOMEMORYRESERVATION}, -+ LX_DXCHANGEVIDEOMEMORYRESERVATION}, - /* 0x17 */ {}, - /* 0x18 */ {dxgkio_create_hwqueue, LX_DXCREATEHWQUEUE}, - /* 0x19 */ {dxgkio_destroy_device, LX_DXDESTROYDEVICE}, -@@ -5205,7 +5204,7 @@ static struct ioctl_desc ioctls[] = { - LX_DXSIGNALSYNCHRONIZATIONOBJECTFROMGPU2}, - /* 0x34 */ {dxgkio_submit_command_to_hwqueue, LX_DXSUBMITCOMMANDTOHWQUEUE}, - /* 0x35 */ {dxgkio_submit_signal_to_hwqueue, -- LX_DXSUBMITSIGNALSYNCOBJECTSTOHWQUEUE}, -+ LX_DXSUBMITSIGNALSYNCOBJECTSTOHWQUEUE}, - /* 0x36 */ {dxgkio_submit_wait_to_hwqueue, - LX_DXSUBMITWAITFORSYNCOBJECTSTOHWQUEUE}, - /* 0x37 */ {dxgkio_unlock2, LX_DXUNLOCK2}, --- -Armbian - diff --git a/patch/kernel/archive/wsl2-arm64-6.6/1698-drivers-hv-dxgkrnl-Creation-of-dxgsyncfile-objects.patch b/patch/kernel/archive/wsl2-arm64-6.6/1698-drivers-hv-dxgkrnl-Creation-of-dxgsyncfile-objects.patch deleted file mode 100644 index 44e9b6778efd..000000000000 --- a/patch/kernel/archive/wsl2-arm64-6.6/1698-drivers-hv-dxgkrnl-Creation-of-dxgsyncfile-objects.patch +++ /dev/null @@ -1,482 +0,0 @@ -From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001 -From: Iouri Tarassov -Date: Tue, 22 Mar 2022 11:02:49 -0700 -Subject: drivers: hv: dxgkrnl: Creation of dxgsyncfile objects - -Implement the ioctl to create a dxgsyncfile object -(LX_DXCREATESYNCFILE). This object is a wrapper around a monitored -fence sync object and a fence value. - -dxgsyncfile is built on top of the Linux sync_file object and -provides a way for the user mode to synchronize with the execution -of the device DMA packets. - -The ioctl creates a dxgsyncfile object for the given GPU synchronization -object and a fence value. A file descriptor of the sync_file object -is returned to the caller. The caller could wait for the object by using -poll(). When the underlying GPU synchronization object is signaled on -the host, the host sends a message to the virtual machine and the -sync_file object is signaled. - -Signed-off-by: Iouri Tarassov -[kms: Forward port to v6.1] -Signed-off-by: Kelsey Steele ---- - drivers/hv/dxgkrnl/Kconfig | 2 + - drivers/hv/dxgkrnl/Makefile | 2 +- - drivers/hv/dxgkrnl/dxgkrnl.h | 2 + - drivers/hv/dxgkrnl/dxgmodule.c | 12 + - drivers/hv/dxgkrnl/dxgsyncfile.c | 215 ++++++++++ - drivers/hv/dxgkrnl/dxgsyncfile.h | 30 ++ - drivers/hv/dxgkrnl/dxgvmbus.c | 33 +- - drivers/hv/dxgkrnl/ioctl.c | 5 +- - include/uapi/misc/d3dkmthk.h | 9 + - 9 files changed, 294 insertions(+), 16 deletions(-) - -diff --git a/drivers/hv/dxgkrnl/Kconfig b/drivers/hv/dxgkrnl/Kconfig -index 111111111111..222222222222 100644 ---- a/drivers/hv/dxgkrnl/Kconfig -+++ b/drivers/hv/dxgkrnl/Kconfig -@@ -6,6 +6,8 @@ config DXGKRNL - tristate "Microsoft Paravirtualized GPU support" - depends on HYPERV - depends on 64BIT || COMPILE_TEST -+ select DMA_SHARED_BUFFER -+ select SYNC_FILE - help - This driver supports paravirtualized virtual compute devices, exposed - by Microsoft Hyper-V when Linux is running inside of a virtual machine -diff --git a/drivers/hv/dxgkrnl/Makefile b/drivers/hv/dxgkrnl/Makefile -index 111111111111..222222222222 100644 ---- a/drivers/hv/dxgkrnl/Makefile -+++ b/drivers/hv/dxgkrnl/Makefile -@@ -2,4 +2,4 @@ - # Makefile for the hyper-v compute device driver (dxgkrnl). - - obj-$(CONFIG_DXGKRNL) += dxgkrnl.o --dxgkrnl-y := dxgmodule.o hmgr.o misc.o dxgadapter.o ioctl.o dxgvmbus.o dxgprocess.o -+dxgkrnl-y := dxgmodule.o hmgr.o misc.o dxgadapter.o ioctl.o dxgvmbus.o dxgprocess.o dxgsyncfile.o -diff --git a/drivers/hv/dxgkrnl/dxgkrnl.h b/drivers/hv/dxgkrnl/dxgkrnl.h -index 111111111111..222222222222 100644 ---- a/drivers/hv/dxgkrnl/dxgkrnl.h -+++ b/drivers/hv/dxgkrnl/dxgkrnl.h -@@ -120,6 +120,7 @@ struct dxgpagingqueue { - */ - enum dxghosteventtype { - dxghostevent_cpu_event = 1, -+ dxghostevent_dma_fence = 2, - }; - - struct dxghostevent { -@@ -858,6 +859,7 @@ int dxgvmb_send_wait_sync_object_cpu(struct dxgprocess *process, - struct - d3dkmt_waitforsynchronizationobjectfromcpu - *args, -+ bool user_address, - u64 cpu_event); - int dxgvmb_send_lock2(struct dxgprocess *process, - struct dxgadapter *adapter, -diff --git a/drivers/hv/dxgkrnl/dxgmodule.c b/drivers/hv/dxgkrnl/dxgmodule.c -index 111111111111..222222222222 100644 ---- a/drivers/hv/dxgkrnl/dxgmodule.c -+++ b/drivers/hv/dxgkrnl/dxgmodule.c -@@ -16,6 +16,7 @@ - #include - #include - #include "dxgkrnl.h" -+#include "dxgsyncfile.h" - - #define PCI_VENDOR_ID_MICROSOFT 0x1414 - #define PCI_DEVICE_ID_VIRTUAL_RENDER 0x008E -@@ -145,6 +146,15 @@ void dxgglobal_remove_host_event(struct dxghostevent *event) - spin_unlock_irq(&dxgglobal->host_event_list_mutex); - } - -+static void signal_dma_fence(struct dxghostevent *eventhdr) -+{ -+ struct dxgsyncpoint *event = (struct dxgsyncpoint *)eventhdr; -+ -+ event->fence_value++; -+ list_del(&eventhdr->host_event_list_entry); -+ dma_fence_signal(&event->base); -+} -+ - void signal_host_cpu_event(struct dxghostevent *eventhdr) - { - struct dxghosteventcpu *event = (struct dxghosteventcpu *)eventhdr; -@@ -184,6 +194,8 @@ void dxgglobal_signal_host_event(u64 event_id) - DXG_TRACE("found event to signal"); - if (event->event_type == dxghostevent_cpu_event) - signal_host_cpu_event(event); -+ else if (event->event_type == dxghostevent_dma_fence) -+ signal_dma_fence(event); - else - DXG_ERR("Unknown host event type"); - break; -diff --git a/drivers/hv/dxgkrnl/dxgsyncfile.c b/drivers/hv/dxgkrnl/dxgsyncfile.c -new file mode 100644 -index 000000000000..111111111111 ---- /dev/null -+++ b/drivers/hv/dxgkrnl/dxgsyncfile.c -@@ -0,0 +1,215 @@ -+// SPDX-License-Identifier: GPL-2.0 -+ -+/* -+ * Copyright (c) 2022, Microsoft Corporation. -+ * -+ * Author: -+ * Iouri Tarassov -+ * -+ * Dxgkrnl Graphics Driver -+ * Ioctl implementation -+ * -+ */ -+ -+#include -+#include -+#include -+#include -+#include -+ -+#include "dxgkrnl.h" -+#include "dxgvmbus.h" -+#include "dxgsyncfile.h" -+ -+#undef dev_fmt -+#define dev_fmt(fmt) "dxgk: " fmt -+ -+#ifdef DEBUG -+static char *errorstr(int ret) -+{ -+ return ret < 0 ? "err" : ""; -+} -+#endif -+ -+static const struct dma_fence_ops dxgdmafence_ops; -+ -+static struct dxgsyncpoint *to_syncpoint(struct dma_fence *fence) -+{ -+ if (fence->ops != &dxgdmafence_ops) -+ return NULL; -+ return container_of(fence, struct dxgsyncpoint, base); -+} -+ -+int dxgkio_create_sync_file(struct dxgprocess *process, void *__user inargs) -+{ -+ struct d3dkmt_createsyncfile args; -+ struct dxgsyncpoint *pt = NULL; -+ int ret = 0; -+ int fd = get_unused_fd_flags(O_CLOEXEC); -+ struct sync_file *sync_file = NULL; -+ struct dxgdevice *device = NULL; -+ struct dxgadapter *adapter = NULL; -+ struct d3dkmt_waitforsynchronizationobjectfromcpu waitargs = {}; -+ -+ if (fd < 0) { -+ DXG_ERR("get_unused_fd_flags failed: %d", fd); -+ ret = fd; -+ goto cleanup; -+ } -+ -+ ret = copy_from_user(&args, inargs, sizeof(args)); -+ if (ret) { -+ DXG_ERR("failed to copy input args"); -+ ret = -EFAULT; -+ goto cleanup; -+ } -+ -+ device = dxgprocess_device_by_handle(process, args.device); -+ if (device == NULL) { -+ DXG_ERR("dxgprocess_device_by_handle failed"); -+ ret = -EINVAL; -+ goto cleanup; -+ } -+ -+ ret = dxgdevice_acquire_lock_shared(device); -+ if (ret < 0) { -+ DXG_ERR("dxgdevice_acquire_lock_shared failed"); -+ device = NULL; -+ goto cleanup; -+ } -+ -+ adapter = device->adapter; -+ ret = dxgadapter_acquire_lock_shared(adapter); -+ if (ret < 0) { -+ DXG_ERR("dxgadapter_acquire_lock_shared failed"); -+ adapter = NULL; -+ goto cleanup; -+ } -+ -+ pt = kzalloc(sizeof(*pt), GFP_KERNEL); -+ if (!pt) { -+ ret = -ENOMEM; -+ goto cleanup; -+ } -+ spin_lock_init(&pt->lock); -+ pt->fence_value = args.fence_value; -+ pt->context = dma_fence_context_alloc(1); -+ pt->hdr.event_id = dxgglobal_new_host_event_id(); -+ pt->hdr.event_type = dxghostevent_dma_fence; -+ dxgglobal_add_host_event(&pt->hdr); -+ -+ dma_fence_init(&pt->base, &dxgdmafence_ops, &pt->lock, -+ pt->context, args.fence_value); -+ -+ sync_file = sync_file_create(&pt->base); -+ if (sync_file == NULL) { -+ DXG_ERR("sync_file_create failed"); -+ ret = -ENOMEM; -+ goto cleanup; -+ } -+ dma_fence_put(&pt->base); -+ -+ waitargs.device = args.device; -+ waitargs.object_count = 1; -+ waitargs.objects = &args.monitored_fence; -+ waitargs.fence_values = &args.fence_value; -+ ret = dxgvmb_send_wait_sync_object_cpu(process, adapter, -+ &waitargs, false, -+ pt->hdr.event_id); -+ if (ret < 0) { -+ DXG_ERR("dxgvmb_send_wait_sync_object_cpu failed"); -+ goto cleanup; -+ } -+ -+ args.sync_file_handle = (u64)fd; -+ ret = copy_to_user(inargs, &args, sizeof(args)); -+ if (ret) { -+ DXG_ERR("failed to copy output args"); -+ ret = -EFAULT; -+ goto cleanup; -+ } -+ -+ fd_install(fd, sync_file->file); -+ -+cleanup: -+ if (adapter) -+ dxgadapter_release_lock_shared(adapter); -+ if (device) -+ dxgdevice_release_lock_shared(device); -+ if (ret) { -+ if (sync_file) { -+ fput(sync_file->file); -+ /* sync_file_release will destroy dma_fence */ -+ pt = NULL; -+ } -+ if (pt) -+ dma_fence_put(&pt->base); -+ if (fd >= 0) -+ put_unused_fd(fd); -+ } -+ DXG_TRACE("ioctl:%s %d", errorstr(ret), ret); -+ return ret; -+} -+ -+static const char *dxgdmafence_get_driver_name(struct dma_fence *fence) -+{ -+ return "dxgkrnl"; -+} -+ -+static const char *dxgdmafence_get_timeline_name(struct dma_fence *fence) -+{ -+ return "no_timeline"; -+} -+ -+static void dxgdmafence_release(struct dma_fence *fence) -+{ -+ struct dxgsyncpoint *syncpoint; -+ -+ syncpoint = to_syncpoint(fence); -+ if (syncpoint) { -+ if (syncpoint->hdr.event_id) -+ dxgglobal_get_host_event(syncpoint->hdr.event_id); -+ kfree(syncpoint); -+ } -+} -+ -+static bool dxgdmafence_signaled(struct dma_fence *fence) -+{ -+ struct dxgsyncpoint *syncpoint; -+ -+ syncpoint = to_syncpoint(fence); -+ if (syncpoint == 0) -+ return true; -+ return __dma_fence_is_later(syncpoint->fence_value, fence->seqno, -+ fence->ops); -+} -+ -+static bool dxgdmafence_enable_signaling(struct dma_fence *fence) -+{ -+ return true; -+} -+ -+static void dxgdmafence_value_str(struct dma_fence *fence, -+ char *str, int size) -+{ -+ snprintf(str, size, "%lld", fence->seqno); -+} -+ -+static void dxgdmafence_timeline_value_str(struct dma_fence *fence, -+ char *str, int size) -+{ -+ struct dxgsyncpoint *syncpoint; -+ -+ syncpoint = to_syncpoint(fence); -+ snprintf(str, size, "%lld", syncpoint->fence_value); -+} -+ -+static const struct dma_fence_ops dxgdmafence_ops = { -+ .get_driver_name = dxgdmafence_get_driver_name, -+ .get_timeline_name = dxgdmafence_get_timeline_name, -+ .enable_signaling = dxgdmafence_enable_signaling, -+ .signaled = dxgdmafence_signaled, -+ .release = dxgdmafence_release, -+ .fence_value_str = dxgdmafence_value_str, -+ .timeline_value_str = dxgdmafence_timeline_value_str, -+}; -diff --git a/drivers/hv/dxgkrnl/dxgsyncfile.h b/drivers/hv/dxgkrnl/dxgsyncfile.h -new file mode 100644 -index 000000000000..111111111111 ---- /dev/null -+++ b/drivers/hv/dxgkrnl/dxgsyncfile.h -@@ -0,0 +1,30 @@ -+/* SPDX-License-Identifier: GPL-2.0 */ -+ -+/* -+ * Copyright (c) 2022, Microsoft Corporation. -+ * -+ * Author: -+ * Iouri Tarassov -+ * -+ * Dxgkrnl Graphics Driver -+ * Headers for sync file objects -+ * -+ */ -+ -+#ifndef _DXGSYNCFILE_H -+#define _DXGSYNCFILE_H -+ -+#include -+ -+int dxgkio_create_sync_file(struct dxgprocess *process, void *__user inargs); -+ -+struct dxgsyncpoint { -+ struct dxghostevent hdr; -+ struct dma_fence base; -+ u64 fence_value; -+ u64 context; -+ spinlock_t lock; -+ u64 u64; -+}; -+ -+#endif /* _DXGSYNCFILE_H */ -diff --git a/drivers/hv/dxgkrnl/dxgvmbus.c b/drivers/hv/dxgkrnl/dxgvmbus.c -index 111111111111..222222222222 100644 ---- a/drivers/hv/dxgkrnl/dxgvmbus.c -+++ b/drivers/hv/dxgkrnl/dxgvmbus.c -@@ -2820,6 +2820,7 @@ int dxgvmb_send_wait_sync_object_cpu(struct dxgprocess *process, - struct - d3dkmt_waitforsynchronizationobjectfromcpu - *args, -+ bool user_address, - u64 cpu_event) - { - int ret = -EINVAL; -@@ -2844,19 +2845,25 @@ int dxgvmb_send_wait_sync_object_cpu(struct dxgprocess *process, - command->guest_event_pointer = (u64) cpu_event; - current_pos = (u8 *) &command[1]; - -- ret = copy_from_user(current_pos, args->objects, object_size); -- if (ret) { -- DXG_ERR("failed to copy objects"); -- ret = -EINVAL; -- goto cleanup; -- } -- current_pos += object_size; -- ret = copy_from_user(current_pos, args->fence_values, -- fence_size); -- if (ret) { -- DXG_ERR("failed to copy fences"); -- ret = -EINVAL; -- goto cleanup; -+ if (user_address) { -+ ret = copy_from_user(current_pos, args->objects, object_size); -+ if (ret) { -+ DXG_ERR("failed to copy objects"); -+ ret = -EINVAL; -+ goto cleanup; -+ } -+ current_pos += object_size; -+ ret = copy_from_user(current_pos, args->fence_values, -+ fence_size); -+ if (ret) { -+ DXG_ERR("failed to copy fences"); -+ ret = -EINVAL; -+ goto cleanup; -+ } -+ } else { -+ memcpy(current_pos, args->objects, object_size); -+ current_pos += object_size; -+ memcpy(current_pos, args->fence_values, fence_size); - } - - ret = dxgvmb_send_sync_msg_ntstatus(msg.channel, msg.hdr, msg.size); -diff --git a/drivers/hv/dxgkrnl/ioctl.c b/drivers/hv/dxgkrnl/ioctl.c -index 111111111111..222222222222 100644 ---- a/drivers/hv/dxgkrnl/ioctl.c -+++ b/drivers/hv/dxgkrnl/ioctl.c -@@ -19,6 +19,7 @@ - - #include "dxgkrnl.h" - #include "dxgvmbus.h" -+#include "dxgsyncfile.h" - - #undef pr_fmt - #define pr_fmt(fmt) "dxgk: " fmt -@@ -3488,7 +3489,7 @@ dxgkio_wait_sync_object_cpu(struct dxgprocess *process, void *__user inargs) - } - - ret = dxgvmb_send_wait_sync_object_cpu(process, adapter, -- &args, event_id); -+ &args, true, event_id); - if (ret < 0) - goto cleanup; - -@@ -5224,7 +5225,7 @@ static struct ioctl_desc ioctls[] = { - /* 0x42 */ {dxgkio_open_resource_nt, LX_DXOPENRESOURCEFROMNTHANDLE}, - /* 0x43 */ {dxgkio_query_statistics, LX_DXQUERYSTATISTICS}, - /* 0x44 */ {dxgkio_share_object_with_host, LX_DXSHAREOBJECTWITHHOST}, --/* 0x45 */ {}, -+/* 0x45 */ {dxgkio_create_sync_file, LX_DXCREATESYNCFILE}, - }; - - /* -diff --git a/include/uapi/misc/d3dkmthk.h b/include/uapi/misc/d3dkmthk.h -index 111111111111..222222222222 100644 ---- a/include/uapi/misc/d3dkmthk.h -+++ b/include/uapi/misc/d3dkmthk.h -@@ -1554,6 +1554,13 @@ struct d3dkmt_shareobjectwithhost { - __u64 object_vail_nt_handle; - }; - -+struct d3dkmt_createsyncfile { -+ struct d3dkmthandle device; -+ struct d3dkmthandle monitored_fence; -+ __u64 fence_value; -+ __u64 sync_file_handle; /* out */ -+}; -+ - /* - * Dxgkrnl Graphics Port Driver ioctl definitions - * -@@ -1677,5 +1684,7 @@ struct d3dkmt_shareobjectwithhost { - _IOWR(0x47, 0x43, struct d3dkmt_querystatistics) - #define LX_DXSHAREOBJECTWITHHOST \ - _IOWR(0x47, 0x44, struct d3dkmt_shareobjectwithhost) -+#define LX_DXCREATESYNCFILE \ -+ _IOWR(0x47, 0x45, struct d3dkmt_createsyncfile) - - #endif /* _D3DKMTHK_H */ --- -Armbian - diff --git a/patch/kernel/archive/wsl2-arm64-6.6/1699-drivers-hv-dxgkrnl-Use-tracing-instead-of-dev_dbg.patch b/patch/kernel/archive/wsl2-arm64-6.6/1699-drivers-hv-dxgkrnl-Use-tracing-instead-of-dev_dbg.patch deleted file mode 100644 index 3a99408a496b..000000000000 --- a/patch/kernel/archive/wsl2-arm64-6.6/1699-drivers-hv-dxgkrnl-Use-tracing-instead-of-dev_dbg.patch +++ /dev/null @@ -1,205 +0,0 @@ -From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001 -From: Iouri Tarassov -Date: Thu, 24 Mar 2022 15:03:41 -0700 -Subject: drivers: hv: dxgkrnl: Use tracing instead of dev_dbg - -Signed-off-by: Iouri Tarassov -[kms: Forward port to v6.1] -Signed-off-by: Kelsey Steele ---- - drivers/hv/dxgkrnl/dxgadapter.c | 4 +-- - drivers/hv/dxgkrnl/dxgmodule.c | 5 ++- - drivers/hv/dxgkrnl/dxgprocess.c | 6 ++-- - drivers/hv/dxgkrnl/dxgvmbus.c | 4 +-- - drivers/hv/dxgkrnl/hmgr.c | 16 +++++----- - drivers/hv/dxgkrnl/ioctl.c | 8 ++--- - drivers/hv/dxgkrnl/misc.c | 4 +-- - 7 files changed, 25 insertions(+), 22 deletions(-) - -diff --git a/drivers/hv/dxgkrnl/dxgadapter.c b/drivers/hv/dxgkrnl/dxgadapter.c -index 111111111111..222222222222 100644 ---- a/drivers/hv/dxgkrnl/dxgadapter.c -+++ b/drivers/hv/dxgkrnl/dxgadapter.c -@@ -18,8 +18,8 @@ - - #include "dxgkrnl.h" - --#undef pr_fmt --#define pr_fmt(fmt) "dxgk: " fmt -+#undef dev_fmt -+#define dev_fmt(fmt) "dxgk: " fmt - - int dxgadapter_set_vmbus(struct dxgadapter *adapter, struct hv_device *hdev) - { -diff --git a/drivers/hv/dxgkrnl/dxgmodule.c b/drivers/hv/dxgkrnl/dxgmodule.c -index 111111111111..222222222222 100644 ---- a/drivers/hv/dxgkrnl/dxgmodule.c -+++ b/drivers/hv/dxgkrnl/dxgmodule.c -@@ -24,6 +24,9 @@ - #undef pr_fmt - #define pr_fmt(fmt) "dxgk: " fmt - -+#undef dev_fmt -+#define dev_fmt(fmt) "dxgk: " fmt -+ - /* - * Interface from dxgglobal - */ -@@ -442,7 +445,7 @@ const struct file_operations dxgk_fops = { - #define DXGK_VMBUS_HOSTCAPS_OFFSET (DXGK_VMBUS_VGPU_LUID_OFFSET + \ - sizeof(struct winluid)) - --/* The guest writes its capavilities to this adderss */ -+/* The guest writes its capabilities to this address */ - #define DXGK_VMBUS_GUESTCAPS_OFFSET (DXGK_VMBUS_VERSION_OFFSET + \ - sizeof(u32)) - -diff --git a/drivers/hv/dxgkrnl/dxgprocess.c b/drivers/hv/dxgkrnl/dxgprocess.c -index 111111111111..222222222222 100644 ---- a/drivers/hv/dxgkrnl/dxgprocess.c -+++ b/drivers/hv/dxgkrnl/dxgprocess.c -@@ -13,8 +13,8 @@ - - #include "dxgkrnl.h" - --#undef pr_fmt --#define pr_fmt(fmt) "dxgk: " fmt -+#undef dev_fmt -+#define dev_fmt(fmt) "dxgk: " fmt - - /* - * Creates a new dxgprocess object -@@ -248,7 +248,7 @@ struct dxgadapter *dxgprocess_adapter_by_handle(struct dxgprocess *process, - HMGRENTRY_TYPE_DXGADAPTER, - handle); - if (adapter == NULL) -- DXG_ERR("adapter_by_handle failed %x", handle.v); -+ DXG_TRACE("adapter_by_handle failed %x", handle.v); - else if (kref_get_unless_zero(&adapter->adapter_kref) == 0) { - DXG_ERR("failed to acquire adapter reference"); - adapter = NULL; -diff --git a/drivers/hv/dxgkrnl/dxgvmbus.c b/drivers/hv/dxgkrnl/dxgvmbus.c -index 111111111111..222222222222 100644 ---- a/drivers/hv/dxgkrnl/dxgvmbus.c -+++ b/drivers/hv/dxgkrnl/dxgvmbus.c -@@ -22,8 +22,8 @@ - #include "dxgkrnl.h" - #include "dxgvmbus.h" - --#undef pr_fmt --#define pr_fmt(fmt) "dxgk: " fmt -+#undef dev_fmt -+#define dev_fmt(fmt) "dxgk: " fmt - - #define RING_BUFSIZE (256 * 1024) - -diff --git a/drivers/hv/dxgkrnl/hmgr.c b/drivers/hv/dxgkrnl/hmgr.c -index 111111111111..222222222222 100644 ---- a/drivers/hv/dxgkrnl/hmgr.c -+++ b/drivers/hv/dxgkrnl/hmgr.c -@@ -19,8 +19,8 @@ - #include "dxgkrnl.h" - #include "hmgr.h" - --#undef pr_fmt --#define pr_fmt(fmt) "dxgk: " fmt -+#undef dev_fmt -+#define dev_fmt(fmt) "dxgk: " fmt - - const struct d3dkmthandle zerohandle; - -@@ -90,29 +90,29 @@ static bool is_handle_valid(struct hmgrtable *table, struct d3dkmthandle h, - struct hmgrentry *entry; - - if (index >= table->table_size) { -- DXG_ERR("Invalid index %x %d", h.v, index); -+ DXG_TRACE("Invalid index %x %d", h.v, index); - return false; - } - - entry = &table->entry_table[index]; - if (unique != entry->unique) { -- DXG_ERR("Invalid unique %x %d %d %d %p", -+ DXG_TRACE("Invalid unique %x %d %d %d %p", - h.v, unique, entry->unique, index, entry->object); - return false; - } - - if (entry->destroyed && !ignore_destroyed) { -- DXG_ERR("Invalid destroyed value"); -+ DXG_TRACE("Invalid destroyed value"); - return false; - } - - if (entry->type == HMGRENTRY_TYPE_FREE) { -- DXG_ERR("Entry is freed %x %d", h.v, index); -+ DXG_TRACE("Entry is freed %x %d", h.v, index); - return false; - } - - if (t != HMGRENTRY_TYPE_FREE && t != entry->type) { -- DXG_ERR("type mismatch %x %d %d", h.v, t, entry->type); -+ DXG_TRACE("type mismatch %x %d %d", h.v, t, entry->type); - return false; - } - -@@ -500,7 +500,7 @@ void *hmgrtable_get_object_by_type(struct hmgrtable *table, - struct d3dkmthandle h) - { - if (!is_handle_valid(table, h, false, type)) { -- DXG_ERR("Invalid handle %x", h.v); -+ DXG_TRACE("Invalid handle %x", h.v); - return NULL; - } - return table->entry_table[get_index(h)].object; -diff --git a/drivers/hv/dxgkrnl/ioctl.c b/drivers/hv/dxgkrnl/ioctl.c -index 111111111111..222222222222 100644 ---- a/drivers/hv/dxgkrnl/ioctl.c -+++ b/drivers/hv/dxgkrnl/ioctl.c -@@ -21,8 +21,8 @@ - #include "dxgvmbus.h" - #include "dxgsyncfile.h" - --#undef pr_fmt --#define pr_fmt(fmt) "dxgk: " fmt -+#undef dev_fmt -+#define dev_fmt(fmt) "dxgk: " fmt - - struct ioctl_desc { - int (*ioctl_callback)(struct dxgprocess *p, void __user *arg); -@@ -556,7 +556,7 @@ dxgkio_enum_adapters3(struct dxgprocess *process, void *__user inargs) - - cleanup: - -- DXG_TRACE("ioctl: %s %d", errorstr(ret), ret); -+ DXG_TRACE("ioctl:%s %d", errorstr(ret), ret); - return ret; - } - -@@ -5242,7 +5242,7 @@ static int dxgk_ioctl(struct file *f, unsigned int p1, unsigned long p2) - int status; - struct dxgprocess *process; - -- if (code < 1 || code >= ARRAY_SIZE(ioctls)) { -+ if (code < 1 || code >= ARRAY_SIZE(ioctls)) { - DXG_ERR("bad ioctl %x %x %x %x", - code, _IOC_TYPE(p1), _IOC_SIZE(p1), _IOC_DIR(p1)); - return -ENOTTY; -diff --git a/drivers/hv/dxgkrnl/misc.c b/drivers/hv/dxgkrnl/misc.c -index 111111111111..222222222222 100644 ---- a/drivers/hv/dxgkrnl/misc.c -+++ b/drivers/hv/dxgkrnl/misc.c -@@ -18,8 +18,8 @@ - #include "dxgkrnl.h" - #include "misc.h" - --#undef pr_fmt --#define pr_fmt(fmt) "dxgk: " fmt -+#undef dev_fmt -+#define dev_fmt(fmt) "dxgk: " fmt - - u16 *wcsncpy(u16 *dest, const u16 *src, size_t n) - { --- -Armbian - diff --git a/patch/kernel/archive/wsl2-arm64-6.6/1700-drivers-hv-dxgkrnl-Implement-D3DKMTWaitSyncFile.patch b/patch/kernel/archive/wsl2-arm64-6.6/1700-drivers-hv-dxgkrnl-Implement-D3DKMTWaitSyncFile.patch deleted file mode 100644 index d2c43649bad4..000000000000 --- a/patch/kernel/archive/wsl2-arm64-6.6/1700-drivers-hv-dxgkrnl-Implement-D3DKMTWaitSyncFile.patch +++ /dev/null @@ -1,658 +0,0 @@ -From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001 -From: Iouri Tarassov -Date: Mon, 2 May 2022 11:46:48 -0700 -Subject: drivers: hv: dxgkrnl: Implement D3DKMTWaitSyncFile - -Signed-off-by: Iouri Tarassov -[kms: Forward port to v6.1] -Signed-off-by: Kelsey Steele ---- - drivers/hv/dxgkrnl/dxgkrnl.h | 11 + - drivers/hv/dxgkrnl/dxgmodule.c | 7 +- - drivers/hv/dxgkrnl/dxgprocess.c | 12 +- - drivers/hv/dxgkrnl/dxgsyncfile.c | 291 +++++++++- - drivers/hv/dxgkrnl/dxgsyncfile.h | 3 + - drivers/hv/dxgkrnl/dxgvmbus.c | 49 ++ - drivers/hv/dxgkrnl/ioctl.c | 16 +- - include/uapi/misc/d3dkmthk.h | 23 + - 8 files changed, 396 insertions(+), 16 deletions(-) - -diff --git a/drivers/hv/dxgkrnl/dxgkrnl.h b/drivers/hv/dxgkrnl/dxgkrnl.h -index 111111111111..222222222222 100644 ---- a/drivers/hv/dxgkrnl/dxgkrnl.h -+++ b/drivers/hv/dxgkrnl/dxgkrnl.h -@@ -254,6 +254,10 @@ void dxgsharedsyncobj_add_syncobj(struct dxgsharedsyncobject *sharedsyncobj, - struct dxgsyncobject *syncobj); - void dxgsharedsyncobj_remove_syncobj(struct dxgsharedsyncobject *sharedsyncobj, - struct dxgsyncobject *syncobj); -+int dxgsharedsyncobj_get_host_nt_handle(struct dxgsharedsyncobject *syncobj, -+ struct dxgprocess *process, -+ struct d3dkmthandle objecthandle); -+void dxgsharedsyncobj_put(struct dxgsharedsyncobject *syncobj); - - struct dxgsyncobject *dxgsyncobject_create(struct dxgprocess *process, - struct dxgdevice *device, -@@ -384,6 +388,8 @@ struct dxgprocess { - pid_t tgid; - /* how many time the process was opened */ - struct kref process_kref; -+ /* protects the object memory */ -+ struct kref process_mem_kref; - /* - * This handle table is used for all objects except dxgadapter - * The handle table lock order is higher than the local_handle_table -@@ -405,6 +411,7 @@ struct dxgprocess { - struct dxgprocess *dxgprocess_create(void); - void dxgprocess_destroy(struct dxgprocess *process); - void dxgprocess_release(struct kref *refcount); -+void dxgprocess_mem_release(struct kref *refcount); - int dxgprocess_open_adapter(struct dxgprocess *process, - struct dxgadapter *adapter, - struct d3dkmthandle *handle); -@@ -932,6 +939,10 @@ int dxgvmb_send_open_sync_object_nt(struct dxgprocess *process, - struct d3dkmt_opensyncobjectfromnthandle2 - *args, - struct dxgsyncobject *syncobj); -+int dxgvmb_send_open_sync_object(struct dxgprocess *process, -+ struct d3dkmthandle device, -+ struct d3dkmthandle host_shared_syncobj, -+ struct d3dkmthandle *syncobj); - int dxgvmb_send_query_alloc_residency(struct dxgprocess *process, - struct dxgadapter *adapter, - struct d3dkmt_queryallocationresidency -diff --git a/drivers/hv/dxgkrnl/dxgmodule.c b/drivers/hv/dxgkrnl/dxgmodule.c -index 111111111111..222222222222 100644 ---- a/drivers/hv/dxgkrnl/dxgmodule.c -+++ b/drivers/hv/dxgkrnl/dxgmodule.c -@@ -149,10 +149,11 @@ void dxgglobal_remove_host_event(struct dxghostevent *event) - spin_unlock_irq(&dxgglobal->host_event_list_mutex); - } - --static void signal_dma_fence(struct dxghostevent *eventhdr) -+static void dxg_signal_dma_fence(struct dxghostevent *eventhdr) - { - struct dxgsyncpoint *event = (struct dxgsyncpoint *)eventhdr; - -+ DXG_TRACE("syncpoint: %px, fence: %lld", event, event->fence_value); - event->fence_value++; - list_del(&eventhdr->host_event_list_entry); - dma_fence_signal(&event->base); -@@ -198,7 +199,7 @@ void dxgglobal_signal_host_event(u64 event_id) - if (event->event_type == dxghostevent_cpu_event) - signal_host_cpu_event(event); - else if (event->event_type == dxghostevent_dma_fence) -- signal_dma_fence(event); -+ dxg_signal_dma_fence(event); - else - DXG_ERR("Unknown host event type"); - break; -@@ -355,6 +356,7 @@ static struct dxgprocess *dxgglobal_get_current_process(void) - if (entry->tgid == current->tgid) { - if (kref_get_unless_zero(&entry->process_kref)) { - process = entry; -+ kref_get(&entry->process_mem_kref); - DXG_TRACE("found dxgprocess"); - } else { - DXG_TRACE("process is destroyed"); -@@ -405,6 +407,7 @@ static int dxgk_release(struct inode *n, struct file *f) - return -EINVAL; - - kref_put(&process->process_kref, dxgprocess_release); -+ kref_put(&process->process_mem_kref, dxgprocess_mem_release); - - f->private_data = NULL; - return 0; -diff --git a/drivers/hv/dxgkrnl/dxgprocess.c b/drivers/hv/dxgkrnl/dxgprocess.c -index 111111111111..222222222222 100644 ---- a/drivers/hv/dxgkrnl/dxgprocess.c -+++ b/drivers/hv/dxgkrnl/dxgprocess.c -@@ -39,6 +39,7 @@ struct dxgprocess *dxgprocess_create(void) - } else { - INIT_LIST_HEAD(&process->plistentry); - kref_init(&process->process_kref); -+ kref_init(&process->process_mem_kref); - - mutex_lock(&dxgglobal->plistmutex); - list_add_tail(&process->plistentry, -@@ -117,8 +118,17 @@ void dxgprocess_release(struct kref *refcount) - - dxgprocess_destroy(process); - -- if (process->host_handle.v) -+ if (process->host_handle.v) { - dxgvmb_send_destroy_process(process->host_handle); -+ process->host_handle.v = 0; -+ } -+} -+ -+void dxgprocess_mem_release(struct kref *refcount) -+{ -+ struct dxgprocess *process; -+ -+ process = container_of(refcount, struct dxgprocess, process_mem_kref); - kfree(process); - } - -diff --git a/drivers/hv/dxgkrnl/dxgsyncfile.c b/drivers/hv/dxgkrnl/dxgsyncfile.c -index 111111111111..222222222222 100644 ---- a/drivers/hv/dxgkrnl/dxgsyncfile.c -+++ b/drivers/hv/dxgkrnl/dxgsyncfile.c -@@ -9,6 +9,20 @@ - * Dxgkrnl Graphics Driver - * Ioctl implementation - * -+ * dxgsyncpoint: -+ * - pointer to dxgsharedsyncobject -+ * - host_shared_handle_nt_reference incremented -+ * - list of (process, local syncobj d3dkmthandle) pairs -+ * wait for sync file -+ * - get dxgsyncpoint -+ * - if process doesn't have a local syncobj -+ * - create local dxgsyncobject -+ * - send open syncobj to the host -+ * - Send wait for syncobj to the context -+ * dxgsyncpoint destruction -+ * - walk the list of (process, local syncobj) -+ * - destroy syncobj -+ * - remove reference to dxgsharedsyncobject - */ - - #include -@@ -45,12 +59,15 @@ int dxgkio_create_sync_file(struct dxgprocess *process, void *__user inargs) - struct d3dkmt_createsyncfile args; - struct dxgsyncpoint *pt = NULL; - int ret = 0; -- int fd = get_unused_fd_flags(O_CLOEXEC); -+ int fd; - struct sync_file *sync_file = NULL; - struct dxgdevice *device = NULL; - struct dxgadapter *adapter = NULL; -+ struct dxgsyncobject *syncobj = NULL; - struct d3dkmt_waitforsynchronizationobjectfromcpu waitargs = {}; -+ bool device_lock_acquired = false; - -+ fd = get_unused_fd_flags(O_CLOEXEC); - if (fd < 0) { - DXG_ERR("get_unused_fd_flags failed: %d", fd); - ret = fd; -@@ -74,9 +91,9 @@ int dxgkio_create_sync_file(struct dxgprocess *process, void *__user inargs) - ret = dxgdevice_acquire_lock_shared(device); - if (ret < 0) { - DXG_ERR("dxgdevice_acquire_lock_shared failed"); -- device = NULL; - goto cleanup; - } -+ device_lock_acquired = true; - - adapter = device->adapter; - ret = dxgadapter_acquire_lock_shared(adapter); -@@ -109,6 +126,30 @@ int dxgkio_create_sync_file(struct dxgprocess *process, void *__user inargs) - } - dma_fence_put(&pt->base); - -+ hmgrtable_lock(&process->handle_table, DXGLOCK_SHARED); -+ syncobj = hmgrtable_get_object(&process->handle_table, -+ args.monitored_fence); -+ if (syncobj == NULL) { -+ DXG_ERR("invalid syncobj handle %x", args.monitored_fence.v); -+ ret = -EINVAL; -+ } else { -+ if (syncobj->shared) { -+ kref_get(&syncobj->syncobj_kref); -+ pt->shared_syncobj = syncobj->shared_owner; -+ } -+ } -+ hmgrtable_unlock(&process->handle_table, DXGLOCK_SHARED); -+ -+ if (pt->shared_syncobj) { -+ ret = dxgsharedsyncobj_get_host_nt_handle(pt->shared_syncobj, -+ process, -+ args.monitored_fence); -+ if (ret) -+ pt->shared_syncobj = NULL; -+ } -+ if (ret) -+ goto cleanup; -+ - waitargs.device = args.device; - waitargs.object_count = 1; - waitargs.objects = &args.monitored_fence; -@@ -132,10 +173,15 @@ int dxgkio_create_sync_file(struct dxgprocess *process, void *__user inargs) - fd_install(fd, sync_file->file); - - cleanup: -+ if (syncobj && syncobj->shared) -+ kref_put(&syncobj->syncobj_kref, dxgsyncobject_release); - if (adapter) - dxgadapter_release_lock_shared(adapter); -- if (device) -- dxgdevice_release_lock_shared(device); -+ if (device) { -+ if (device_lock_acquired) -+ dxgdevice_release_lock_shared(device); -+ kref_put(&device->device_kref, dxgdevice_release); -+ } - if (ret) { - if (sync_file) { - fput(sync_file->file); -@@ -151,6 +197,228 @@ int dxgkio_create_sync_file(struct dxgprocess *process, void *__user inargs) - return ret; - } - -+int dxgkio_open_syncobj_from_syncfile(struct dxgprocess *process, -+ void *__user inargs) -+{ -+ struct d3dkmt_opensyncobjectfromsyncfile args; -+ int ret = 0; -+ struct dxgsyncpoint *pt = NULL; -+ struct dma_fence *dmafence = NULL; -+ struct dxgdevice *device = NULL; -+ struct dxgadapter *adapter = NULL; -+ struct dxgsyncobject *syncobj = NULL; -+ struct d3dddi_synchronizationobject_flags flags = { }; -+ struct d3dkmt_opensyncobjectfromnthandle2 openargs = { }; -+ struct dxgglobal *dxgglobal = dxggbl(); -+ -+ ret = copy_from_user(&args, inargs, sizeof(args)); -+ if (ret) { -+ DXG_ERR("failed to copy input args"); -+ ret = -EFAULT; -+ goto cleanup; -+ } -+ -+ dmafence = sync_file_get_fence(args.sync_file_handle); -+ if (dmafence == NULL) { -+ DXG_ERR("failed to get dmafence from handle: %llx", -+ args.sync_file_handle); -+ ret = -EINVAL; -+ goto cleanup; -+ } -+ pt = to_syncpoint(dmafence); -+ if (pt->shared_syncobj == NULL) { -+ DXG_ERR("Sync object is not shared"); -+ goto cleanup; -+ } -+ -+ device = dxgprocess_device_by_handle(process, args.device); -+ if (device == NULL) { -+ DXG_ERR("dxgprocess_device_by_handle failed"); -+ ret = -EINVAL; -+ goto cleanup; -+ } -+ -+ ret = dxgdevice_acquire_lock_shared(device); -+ if (ret < 0) { -+ DXG_ERR("dxgdevice_acquire_lock_shared failed"); -+ kref_put(&device->device_kref, dxgdevice_release); -+ device = NULL; -+ goto cleanup; -+ } -+ -+ adapter = device->adapter; -+ ret = dxgadapter_acquire_lock_shared(adapter); -+ if (ret < 0) { -+ DXG_ERR("dxgadapter_acquire_lock_shared failed"); -+ adapter = NULL; -+ goto cleanup; -+ } -+ -+ flags.shared = 1; -+ flags.nt_security_sharing = 1; -+ syncobj = dxgsyncobject_create(process, device, adapter, -+ _D3DDDI_MONITORED_FENCE, flags); -+ if (syncobj == NULL) { -+ DXG_ERR("failed to create sync object"); -+ ret = -ENOMEM; -+ goto cleanup; -+ } -+ dxgsharedsyncobj_add_syncobj(pt->shared_syncobj, syncobj); -+ -+ /* Open the shared syncobj to get a local handle */ -+ -+ openargs.device = device->handle; -+ openargs.flags.shared = 1; -+ openargs.flags.nt_security_sharing = 1; -+ openargs.flags.no_signal = 1; -+ -+ ret = dxgvmb_send_open_sync_object_nt(process, -+ &dxgglobal->channel, &openargs, syncobj); -+ if (ret) { -+ DXG_ERR("Failed to open shared syncobj on host"); -+ goto cleanup; -+ } -+ -+ hmgrtable_lock(&process->handle_table, DXGLOCK_EXCL); -+ ret = hmgrtable_assign_handle(&process->handle_table, -+ syncobj, -+ HMGRENTRY_TYPE_DXGSYNCOBJECT, -+ openargs.sync_object); -+ if (ret == 0) { -+ syncobj->handle = openargs.sync_object; -+ kref_get(&syncobj->syncobj_kref); -+ } -+ hmgrtable_unlock(&process->handle_table, DXGLOCK_EXCL); -+ -+ args.syncobj = openargs.sync_object; -+ args.fence_value = pt->fence_value; -+ args.fence_value_cpu_va = openargs.monitored_fence.fence_value_cpu_va; -+ args.fence_value_gpu_va = openargs.monitored_fence.fence_value_gpu_va; -+ -+ ret = copy_to_user(inargs, &args, sizeof(args)); -+ if (ret) { -+ DXG_ERR("failed to copy output args"); -+ ret = -EFAULT; -+ } -+ -+cleanup: -+ if (dmafence) -+ dma_fence_put(dmafence); -+ if (ret) { -+ if (syncobj) { -+ dxgsyncobject_destroy(process, syncobj); -+ kref_put(&syncobj->syncobj_kref, dxgsyncobject_release); -+ } -+ } -+ if (adapter) -+ dxgadapter_release_lock_shared(adapter); -+ if (device) { -+ dxgdevice_release_lock_shared(device); -+ kref_put(&device->device_kref, dxgdevice_release); -+ } -+ -+ DXG_TRACE("ioctl:%s %d", errorstr(ret), ret); -+ return ret; -+} -+ -+int dxgkio_wait_sync_file(struct dxgprocess *process, void *__user inargs) -+{ -+ struct d3dkmt_waitsyncfile args; -+ struct dma_fence *dmafence = NULL; -+ int ret = 0; -+ struct dxgsyncpoint *pt = NULL; -+ struct dxgdevice *device = NULL; -+ struct dxgadapter *adapter = NULL; -+ struct d3dkmthandle syncobj_handle = {}; -+ bool device_lock_acquired = false; -+ -+ ret = copy_from_user(&args, inargs, sizeof(args)); -+ if (ret) { -+ DXG_ERR("failed to copy input args"); -+ ret = -EFAULT; -+ goto cleanup; -+ } -+ -+ dmafence = sync_file_get_fence(args.sync_file_handle); -+ if (dmafence == NULL) { -+ DXG_ERR("failed to get dmafence from handle: %llx", -+ args.sync_file_handle); -+ ret = -EINVAL; -+ goto cleanup; -+ } -+ pt = to_syncpoint(dmafence); -+ -+ device = dxgprocess_device_by_object_handle(process, -+ HMGRENTRY_TYPE_DXGCONTEXT, -+ args.context); -+ if (device == NULL) { -+ ret = -EINVAL; -+ goto cleanup; -+ } -+ -+ ret = dxgdevice_acquire_lock_shared(device); -+ if (ret < 0) { -+ DXG_ERR("dxgdevice_acquire_lock_shared failed"); -+ device = NULL; -+ goto cleanup; -+ } -+ device_lock_acquired = true; -+ -+ adapter = device->adapter; -+ ret = dxgadapter_acquire_lock_shared(adapter); -+ if (ret < 0) { -+ DXG_ERR("dxgadapter_acquire_lock_shared failed"); -+ adapter = NULL; -+ goto cleanup; -+ } -+ -+ /* Open the shared syncobj to get a local handle */ -+ if (pt->shared_syncobj == NULL) { -+ DXG_ERR("Sync object is not shared"); -+ goto cleanup; -+ } -+ ret = dxgvmb_send_open_sync_object(process, -+ device->handle, -+ pt->shared_syncobj->host_shared_handle, -+ &syncobj_handle); -+ if (ret) { -+ DXG_ERR("Failed to open shared syncobj on host"); -+ goto cleanup; -+ } -+ -+ /* Ask the host to insert the syncobj to the context queue */ -+ ret = dxgvmb_send_wait_sync_object_gpu(process, adapter, -+ args.context, 1, -+ &syncobj_handle, -+ &pt->fence_value, -+ false); -+ if (ret < 0) { -+ DXG_ERR("dxgvmb_send_wait_sync_object_cpu failed"); -+ goto cleanup; -+ } -+ -+ /* -+ * Destroy the local syncobject immediately. This will not unblock -+ * GPU waiters, but will unblock CPU waiter, which includes the sync -+ * file itself. -+ */ -+ ret = dxgvmb_send_destroy_sync_object(process, syncobj_handle); -+ -+cleanup: -+ if (adapter) -+ dxgadapter_release_lock_shared(adapter); -+ if (device) { -+ if (device_lock_acquired) -+ dxgdevice_release_lock_shared(device); -+ kref_put(&device->device_kref, dxgdevice_release); -+ } -+ if (dmafence) -+ dma_fence_put(dmafence); -+ -+ DXG_TRACE("ioctl:%s %d", errorstr(ret), ret); -+ return ret; -+} -+ - static const char *dxgdmafence_get_driver_name(struct dma_fence *fence) - { - return "dxgkrnl"; -@@ -166,11 +434,16 @@ static void dxgdmafence_release(struct dma_fence *fence) - struct dxgsyncpoint *syncpoint; - - syncpoint = to_syncpoint(fence); -- if (syncpoint) { -- if (syncpoint->hdr.event_id) -- dxgglobal_get_host_event(syncpoint->hdr.event_id); -- kfree(syncpoint); -- } -+ if (syncpoint == NULL) -+ return; -+ -+ if (syncpoint->hdr.event_id) -+ dxgglobal_get_host_event(syncpoint->hdr.event_id); -+ -+ if (syncpoint->shared_syncobj) -+ dxgsharedsyncobj_put(syncpoint->shared_syncobj); -+ -+ kfree(syncpoint); - } - - static bool dxgdmafence_signaled(struct dma_fence *fence) -diff --git a/drivers/hv/dxgkrnl/dxgsyncfile.h b/drivers/hv/dxgkrnl/dxgsyncfile.h -index 111111111111..222222222222 100644 ---- a/drivers/hv/dxgkrnl/dxgsyncfile.h -+++ b/drivers/hv/dxgkrnl/dxgsyncfile.h -@@ -17,10 +17,13 @@ - #include - - int dxgkio_create_sync_file(struct dxgprocess *process, void *__user inargs); -+int dxgkio_wait_sync_file(struct dxgprocess *process, void *__user inargs); -+int dxgkio_open_syncobj_from_syncfile(struct dxgprocess *p, void *__user args); - - struct dxgsyncpoint { - struct dxghostevent hdr; - struct dma_fence base; -+ struct dxgsharedsyncobject *shared_syncobj; - u64 fence_value; - u64 context; - spinlock_t lock; -diff --git a/drivers/hv/dxgkrnl/dxgvmbus.c b/drivers/hv/dxgkrnl/dxgvmbus.c -index 111111111111..222222222222 100644 ---- a/drivers/hv/dxgkrnl/dxgvmbus.c -+++ b/drivers/hv/dxgkrnl/dxgvmbus.c -@@ -796,6 +796,55 @@ int dxgvmb_send_open_sync_object_nt(struct dxgprocess *process, - return ret; - } - -+int dxgvmb_send_open_sync_object(struct dxgprocess *process, -+ struct d3dkmthandle device, -+ struct d3dkmthandle host_shared_syncobj, -+ struct d3dkmthandle *syncobj) -+{ -+ struct dxgkvmb_command_opensyncobject *command; -+ struct dxgkvmb_command_opensyncobject_return result = { }; -+ int ret; -+ struct dxgvmbusmsg msg; -+ struct dxgglobal *dxgglobal = dxggbl(); -+ -+ ret = init_message(&msg, NULL, process, sizeof(*command)); -+ if (ret) -+ return ret; -+ command = (void *)msg.msg; -+ -+ command_vm_to_host_init2(&command->hdr, DXGK_VMBCOMMAND_OPENSYNCOBJECT, -+ process->host_handle); -+ command->device = device; -+ command->global_sync_object = host_shared_syncobj; -+ command->flags.shared = 1; -+ command->flags.nt_security_sharing = 1; -+ command->flags.no_signal = 1; -+ -+ ret = dxgglobal_acquire_channel_lock(); -+ if (ret < 0) -+ goto cleanup; -+ -+ ret = dxgvmb_send_sync_msg(&dxgglobal->channel, msg.hdr, msg.size, -+ &result, sizeof(result)); -+ -+ dxgglobal_release_channel_lock(); -+ -+ if (ret < 0) -+ goto cleanup; -+ -+ ret = ntstatus2int(result.status); -+ if (ret < 0) -+ goto cleanup; -+ -+ *syncobj = result.sync_object; -+ -+cleanup: -+ free_message(&msg, process); -+ if (ret) -+ DXG_TRACE("err: %d", ret); -+ return ret; -+} -+ - int dxgvmb_send_create_nt_shared_object(struct dxgprocess *process, - struct d3dkmthandle object, - struct d3dkmthandle *shared_handle) -diff --git a/drivers/hv/dxgkrnl/ioctl.c b/drivers/hv/dxgkrnl/ioctl.c -index 111111111111..222222222222 100644 ---- a/drivers/hv/dxgkrnl/ioctl.c -+++ b/drivers/hv/dxgkrnl/ioctl.c -@@ -36,10 +36,8 @@ static char *errorstr(int ret) - } - #endif - --static int dxgsyncobj_release(struct inode *inode, struct file *file) -+void dxgsharedsyncobj_put(struct dxgsharedsyncobject *syncobj) - { -- struct dxgsharedsyncobject *syncobj = file->private_data; -- - DXG_TRACE("Release syncobj: %p", syncobj); - mutex_lock(&syncobj->fd_mutex); - kref_get(&syncobj->ssyncobj_kref); -@@ -56,6 +54,13 @@ static int dxgsyncobj_release(struct inode *inode, struct file *file) - } - mutex_unlock(&syncobj->fd_mutex); - kref_put(&syncobj->ssyncobj_kref, dxgsharedsyncobj_release); -+} -+ -+static int dxgsyncobj_release(struct inode *inode, struct file *file) -+{ -+ struct dxgsharedsyncobject *syncobj = file->private_data; -+ -+ dxgsharedsyncobj_put(syncobj); - return 0; - } - -@@ -4478,7 +4483,7 @@ dxgkio_get_device_state(struct dxgprocess *process, void *__user inargs) - return ret; - } - --static int -+int - dxgsharedsyncobj_get_host_nt_handle(struct dxgsharedsyncobject *syncobj, - struct dxgprocess *process, - struct d3dkmthandle objecthandle) -@@ -5226,6 +5231,9 @@ static struct ioctl_desc ioctls[] = { - /* 0x43 */ {dxgkio_query_statistics, LX_DXQUERYSTATISTICS}, - /* 0x44 */ {dxgkio_share_object_with_host, LX_DXSHAREOBJECTWITHHOST}, - /* 0x45 */ {dxgkio_create_sync_file, LX_DXCREATESYNCFILE}, -+/* 0x46 */ {dxgkio_wait_sync_file, LX_DXWAITSYNCFILE}, -+/* 0x46 */ {dxgkio_open_syncobj_from_syncfile, -+ LX_DXOPENSYNCOBJECTFROMSYNCFILE}, - }; - - /* -diff --git a/include/uapi/misc/d3dkmthk.h b/include/uapi/misc/d3dkmthk.h -index 111111111111..222222222222 100644 ---- a/include/uapi/misc/d3dkmthk.h -+++ b/include/uapi/misc/d3dkmthk.h -@@ -1561,6 +1561,25 @@ struct d3dkmt_createsyncfile { - __u64 sync_file_handle; /* out */ - }; - -+struct d3dkmt_waitsyncfile { -+ __u64 sync_file_handle; -+ struct d3dkmthandle context; -+ __u32 reserved; -+}; -+ -+struct d3dkmt_opensyncobjectfromsyncfile { -+ __u64 sync_file_handle; -+ struct d3dkmthandle device; -+ struct d3dkmthandle syncobj; /* out */ -+ __u64 fence_value; /* out */ -+#ifdef __KERNEL__ -+ void *fence_value_cpu_va; /* out */ -+#else -+ __u64 fence_value_cpu_va; /* out */ -+#endif -+ __u64 fence_value_gpu_va; /* out */ -+}; -+ - /* - * Dxgkrnl Graphics Port Driver ioctl definitions - * -@@ -1686,5 +1705,9 @@ struct d3dkmt_createsyncfile { - _IOWR(0x47, 0x44, struct d3dkmt_shareobjectwithhost) - #define LX_DXCREATESYNCFILE \ - _IOWR(0x47, 0x45, struct d3dkmt_createsyncfile) -+#define LX_DXWAITSYNCFILE \ -+ _IOWR(0x47, 0x46, struct d3dkmt_waitsyncfile) -+#define LX_DXOPENSYNCOBJECTFROMSYNCFILE \ -+ _IOWR(0x47, 0x47, struct d3dkmt_opensyncobjectfromsyncfile) - - #endif /* _D3DKMTHK_H */ --- -Armbian - diff --git a/patch/kernel/archive/wsl2-arm64-6.6/1701-drivers-hv-dxgkrnl-Improve-tracing-and-return-values-from-copy-from-user.patch b/patch/kernel/archive/wsl2-arm64-6.6/1701-drivers-hv-dxgkrnl-Improve-tracing-and-return-values-from-copy-from-user.patch deleted file mode 100644 index 572b86f65edd..000000000000 --- a/patch/kernel/archive/wsl2-arm64-6.6/1701-drivers-hv-dxgkrnl-Improve-tracing-and-return-values-from-copy-from-user.patch +++ /dev/null @@ -1,2000 +0,0 @@ -From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001 -From: Iouri Tarassov -Date: Fri, 6 May 2022 19:19:09 -0700 -Subject: drivers: hv: dxgkrnl: Improve tracing and return values from copy - from user - -Signed-off-by: Iouri Tarassov -[kms: Forward port to v6.1] -Signed-off-by: Kelsey Steele ---- - drivers/hv/dxgkrnl/dxgkrnl.h | 17 +- - drivers/hv/dxgkrnl/dxgmodule.c | 1 + - drivers/hv/dxgkrnl/dxgsyncfile.c | 13 +- - drivers/hv/dxgkrnl/dxgvmbus.c | 98 +-- - drivers/hv/dxgkrnl/ioctl.c | 327 +++++----- - 5 files changed, 225 insertions(+), 231 deletions(-) - -diff --git a/drivers/hv/dxgkrnl/dxgkrnl.h b/drivers/hv/dxgkrnl/dxgkrnl.h -index 111111111111..222222222222 100644 ---- a/drivers/hv/dxgkrnl/dxgkrnl.h -+++ b/drivers/hv/dxgkrnl/dxgkrnl.h -@@ -999,18 +999,25 @@ void dxgk_validate_ioctls(void); - trace_printk(dev_fmt(fmt) "\n", ##__VA_ARGS__); \ - } while (0) - --#define DXG_ERR(fmt, ...) do { \ -- dev_err(DXGDEV, fmt, ##__VA_ARGS__); \ -- trace_printk("*** dxgkerror *** " dev_fmt(fmt) "\n", ##__VA_ARGS__); \ -+#define DXG_ERR(fmt, ...) do { \ -+ dev_err(DXGDEV, "%s: " fmt, __func__, ##__VA_ARGS__); \ -+ trace_printk("*** dxgkerror *** " dev_fmt(fmt) "\n", ##__VA_ARGS__); \ - } while (0) - - #else - - #define DXG_TRACE(...) --#define DXG_ERR(fmt, ...) do { \ -- dev_err(DXGDEV, fmt, ##__VA_ARGS__); \ -+#define DXG_ERR(fmt, ...) do { \ -+ dev_err(DXGDEV, "%s: " fmt, __func__, ##__VA_ARGS__); \ - } while (0) - - #endif /* DEBUG */ - -+#define DXG_TRACE_IOCTL_END(ret) do { \ -+ if (ret < 0) \ -+ DXG_ERR("Ioctl failed: %d", ret); \ -+ else \ -+ DXG_TRACE("Ioctl returned: %d", ret); \ -+} while (0) -+ - #endif -diff --git a/drivers/hv/dxgkrnl/dxgmodule.c b/drivers/hv/dxgkrnl/dxgmodule.c -index 111111111111..222222222222 100644 ---- a/drivers/hv/dxgkrnl/dxgmodule.c -+++ b/drivers/hv/dxgkrnl/dxgmodule.c -@@ -961,3 +961,4 @@ module_exit(dxg_drv_exit); - - MODULE_LICENSE("GPL"); - MODULE_DESCRIPTION("Microsoft Dxgkrnl virtual compute device Driver"); -+MODULE_VERSION("2.0.0"); -diff --git a/drivers/hv/dxgkrnl/dxgsyncfile.c b/drivers/hv/dxgkrnl/dxgsyncfile.c -index 111111111111..222222222222 100644 ---- a/drivers/hv/dxgkrnl/dxgsyncfile.c -+++ b/drivers/hv/dxgkrnl/dxgsyncfile.c -@@ -38,13 +38,6 @@ - #undef dev_fmt - #define dev_fmt(fmt) "dxgk: " fmt - --#ifdef DEBUG --static char *errorstr(int ret) --{ -- return ret < 0 ? "err" : ""; --} --#endif -- - static const struct dma_fence_ops dxgdmafence_ops; - - static struct dxgsyncpoint *to_syncpoint(struct dma_fence *fence) -@@ -193,7 +186,7 @@ int dxgkio_create_sync_file(struct dxgprocess *process, void *__user inargs) - if (fd >= 0) - put_unused_fd(fd); - } -- DXG_TRACE("ioctl:%s %d", errorstr(ret), ret); -+ DXG_TRACE_IOCTL_END(ret); - return ret; - } - -@@ -317,7 +310,7 @@ int dxgkio_open_syncobj_from_syncfile(struct dxgprocess *process, - kref_put(&device->device_kref, dxgdevice_release); - } - -- DXG_TRACE("ioctl:%s %d", errorstr(ret), ret); -+ DXG_TRACE_IOCTL_END(ret); - return ret; - } - -@@ -415,7 +408,7 @@ int dxgkio_wait_sync_file(struct dxgprocess *process, void *__user inargs) - if (dmafence) - dma_fence_put(dmafence); - -- DXG_TRACE("ioctl:%s %d", errorstr(ret), ret); -+ DXG_TRACE_IOCTL_END(ret); - return ret; - } - -diff --git a/drivers/hv/dxgkrnl/dxgvmbus.c b/drivers/hv/dxgkrnl/dxgvmbus.c -index 111111111111..222222222222 100644 ---- a/drivers/hv/dxgkrnl/dxgvmbus.c -+++ b/drivers/hv/dxgkrnl/dxgvmbus.c -@@ -1212,7 +1212,7 @@ dxgvmb_send_create_context(struct dxgadapter *adapter, - args->priv_drv_data_size); - if (ret) { - DXG_ERR("Faled to copy private data"); -- ret = -EINVAL; -+ ret = -EFAULT; - goto cleanup; - } - } -@@ -1230,7 +1230,7 @@ dxgvmb_send_create_context(struct dxgadapter *adapter, - if (ret) { - DXG_ERR( - "Faled to copy private data to user"); -- ret = -EINVAL; -+ ret = -EFAULT; - dxgvmb_send_destroy_context(adapter, process, - context); - context.v = 0; -@@ -1365,7 +1365,7 @@ copy_private_data(struct d3dkmt_createallocation *args, - args->private_runtime_data_size); - if (ret) { - DXG_ERR("failed to copy runtime data"); -- ret = -EINVAL; -+ ret = -EFAULT; - goto cleanup; - } - private_data_dest += args->private_runtime_data_size; -@@ -1385,7 +1385,7 @@ copy_private_data(struct d3dkmt_createallocation *args, - args->priv_drv_data_size); - if (ret) { - DXG_ERR("failed to copy private data"); -- ret = -EINVAL; -+ ret = -EFAULT; - goto cleanup; - } - private_data_dest += args->priv_drv_data_size; -@@ -1406,7 +1406,7 @@ copy_private_data(struct d3dkmt_createallocation *args, - input_alloc->priv_drv_data_size); - if (ret) { - DXG_ERR("failed to copy alloc data"); -- ret = -EINVAL; -+ ret = -EFAULT; - goto cleanup; - } - private_data_dest += input_alloc->priv_drv_data_size; -@@ -1658,7 +1658,7 @@ create_local_allocations(struct dxgprocess *process, - sizeof(struct d3dkmthandle)); - if (ret) { - DXG_ERR("failed to copy resource handle"); -- ret = -EINVAL; -+ ret = -EFAULT; - goto cleanup; - } - } -@@ -1690,7 +1690,7 @@ create_local_allocations(struct dxgprocess *process, - host_alloc->priv_drv_data_size); - if (ret) { - DXG_ERR("failed to copy private data"); -- ret = -EINVAL; -+ ret = -EFAULT; - goto cleanup; - } - alloc_private_data += host_alloc->priv_drv_data_size; -@@ -1700,7 +1700,7 @@ create_local_allocations(struct dxgprocess *process, - sizeof(struct d3dkmthandle)); - if (ret) { - DXG_ERR("failed to copy alloc handle"); -- ret = -EINVAL; -+ ret = -EFAULT; - goto cleanup; - } - } -@@ -1714,7 +1714,7 @@ create_local_allocations(struct dxgprocess *process, - sizeof(struct d3dkmthandle)); - if (ret) { - DXG_ERR("failed to copy global share"); -- ret = -EINVAL; -+ ret = -EFAULT; - } - - cleanup: -@@ -1961,7 +1961,7 @@ int dxgvmb_send_query_clock_calibration(struct dxgprocess *process, - sizeof(result.clock_data)); - if (ret) { - DXG_ERR("failed to copy clock data"); -- ret = -EINVAL; -+ ret = -EFAULT; - goto cleanup; - } - ret = ntstatus2int(result.status); -@@ -2041,7 +2041,7 @@ int dxgvmb_send_query_alloc_residency(struct dxgprocess *process, - alloc_size); - if (ret) { - DXG_ERR("failed to copy alloc handles"); -- ret = -EINVAL; -+ ret = -EFAULT; - goto cleanup; - } - } -@@ -2059,7 +2059,7 @@ int dxgvmb_send_query_alloc_residency(struct dxgprocess *process, - result_allocation_size); - if (ret) { - DXG_ERR("failed to copy residency status"); -- ret = -EINVAL; -+ ret = -EFAULT; - } - - cleanup: -@@ -2105,7 +2105,7 @@ int dxgvmb_send_escape(struct dxgprocess *process, - args->priv_drv_data_size); - if (ret) { - DXG_ERR("failed to copy priv data"); -- ret = -EINVAL; -+ ret = -EFAULT; - goto cleanup; - } - } -@@ -2164,14 +2164,14 @@ int dxgvmb_send_query_vidmem_info(struct dxgprocess *process, - sizeof(output->budget)); - if (ret) { - DXG_ERR("failed to copy budget"); -- ret = -EINVAL; -+ ret = -EFAULT; - goto cleanup; - } - ret = copy_to_user(&output->current_usage, &result.current_usage, - sizeof(output->current_usage)); - if (ret) { - DXG_ERR("failed to copy current usage"); -- ret = -EINVAL; -+ ret = -EFAULT; - goto cleanup; - } - ret = copy_to_user(&output->current_reservation, -@@ -2179,7 +2179,7 @@ int dxgvmb_send_query_vidmem_info(struct dxgprocess *process, - sizeof(output->current_reservation)); - if (ret) { - DXG_ERR("failed to copy reservation"); -- ret = -EINVAL; -+ ret = -EFAULT; - goto cleanup; - } - ret = copy_to_user(&output->available_for_reservation, -@@ -2187,7 +2187,7 @@ int dxgvmb_send_query_vidmem_info(struct dxgprocess *process, - sizeof(output->available_for_reservation)); - if (ret) { - DXG_ERR("failed to copy avail reservation"); -- ret = -EINVAL; -+ ret = -EFAULT; - } - - cleanup: -@@ -2229,7 +2229,7 @@ int dxgvmb_send_get_device_state(struct dxgprocess *process, - ret = copy_to_user(output, &result.args, sizeof(result.args)); - if (ret) { - DXG_ERR("failed to copy output args"); -- ret = -EINVAL; -+ ret = -EFAULT; - } - - if (args->state_type == _D3DKMT_DEVICESTATE_EXECUTION) -@@ -2404,7 +2404,7 @@ int dxgvmb_send_make_resident(struct dxgprocess *process, - sizeof(struct d3dkmthandle)); - if (ret) { - DXG_ERR("failed to copy alloc handles"); -- ret = -EINVAL; -+ ret = -EFAULT; - goto cleanup; - } - command_vgpu_to_host_init2(&command->hdr, -@@ -2454,7 +2454,7 @@ int dxgvmb_send_evict(struct dxgprocess *process, - sizeof(struct d3dkmthandle)); - if (ret) { - DXG_ERR("failed to copy alloc handles"); -- ret = -EINVAL; -+ ret = -EFAULT; - goto cleanup; - } - command_vgpu_to_host_init2(&command->hdr, -@@ -2502,14 +2502,14 @@ int dxgvmb_send_submit_command(struct dxgprocess *process, - hbufsize); - if (ret) { - DXG_ERR(" failed to copy history buffer"); -- ret = -EINVAL; -+ ret = -EFAULT; - goto cleanup; - } - ret = copy_from_user((u8 *) &command[1] + hbufsize, - args->priv_drv_data, args->priv_drv_data_size); - if (ret) { - DXG_ERR("failed to copy history priv data"); -- ret = -EINVAL; -+ ret = -EFAULT; - goto cleanup; - } - -@@ -2671,7 +2671,7 @@ int dxgvmb_send_update_gpu_va(struct dxgprocess *process, - op_size); - if (ret) { - DXG_ERR("failed to copy operations"); -- ret = -EINVAL; -+ ret = -EFAULT; - goto cleanup; - } - -@@ -2751,7 +2751,7 @@ dxgvmb_send_create_sync_object(struct dxgprocess *process, - sizeof(u64)); - if (ret) { - DXG_ERR("failed to read fence"); -- ret = -EINVAL; -+ ret = -EFAULT; - } else { - DXG_TRACE("fence value:%lx", - value); -@@ -2820,7 +2820,7 @@ int dxgvmb_send_signal_sync_object(struct dxgprocess *process, - if (ret) { - DXG_ERR("Failed to read objects %p %d", - objects, object_size); -- ret = -EINVAL; -+ ret = -EFAULT; - goto cleanup; - } - current_pos += object_size; -@@ -2834,7 +2834,7 @@ int dxgvmb_send_signal_sync_object(struct dxgprocess *process, - if (ret) { - DXG_ERR("Failed to read contexts %p %d", - contexts, context_size); -- ret = -EINVAL; -+ ret = -EFAULT; - goto cleanup; - } - current_pos += context_size; -@@ -2844,7 +2844,7 @@ int dxgvmb_send_signal_sync_object(struct dxgprocess *process, - if (ret) { - DXG_ERR("Failed to read fences %p %d", - fences, fence_size); -- ret = -EINVAL; -+ ret = -EFAULT; - goto cleanup; - } - } -@@ -2898,7 +2898,7 @@ int dxgvmb_send_wait_sync_object_cpu(struct dxgprocess *process, - ret = copy_from_user(current_pos, args->objects, object_size); - if (ret) { - DXG_ERR("failed to copy objects"); -- ret = -EINVAL; -+ ret = -EFAULT; - goto cleanup; - } - current_pos += object_size; -@@ -2906,7 +2906,7 @@ int dxgvmb_send_wait_sync_object_cpu(struct dxgprocess *process, - fence_size); - if (ret) { - DXG_ERR("failed to copy fences"); -- ret = -EINVAL; -+ ret = -EFAULT; - goto cleanup; - } - } else { -@@ -3037,7 +3037,7 @@ int dxgvmb_send_lock2(struct dxgprocess *process, - sizeof(args->data)); - if (ret) { - DXG_ERR("failed to copy data"); -- ret = -EINVAL; -+ ret = -EFAULT; - alloc->cpu_address_refcount--; - if (alloc->cpu_address_refcount == 0) { - dxg_unmap_iospace(alloc->cpu_address, -@@ -3119,7 +3119,7 @@ int dxgvmb_send_update_alloc_property(struct dxgprocess *process, - sizeof(u64)); - if (ret1) { - DXG_ERR("failed to copy paging fence"); -- ret = -EINVAL; -+ ret = -EFAULT; - } - } - cleanup: -@@ -3204,14 +3204,14 @@ int dxgvmb_send_set_allocation_priority(struct dxgprocess *process, - alloc_size); - if (ret) { - DXG_ERR("failed to copy alloc handle"); -- ret = -EINVAL; -+ ret = -EFAULT; - goto cleanup; - } - ret = copy_from_user((u8 *) allocations + alloc_size, - args->priorities, priority_size); - if (ret) { - DXG_ERR("failed to copy alloc priority"); -- ret = -EINVAL; -+ ret = -EFAULT; - goto cleanup; - } - -@@ -3277,7 +3277,7 @@ int dxgvmb_send_get_allocation_priority(struct dxgprocess *process, - alloc_size); - if (ret) { - DXG_ERR("failed to copy alloc handles"); -- ret = -EINVAL; -+ ret = -EFAULT; - goto cleanup; - } - -@@ -3296,7 +3296,7 @@ int dxgvmb_send_get_allocation_priority(struct dxgprocess *process, - priority_size); - if (ret) { - DXG_ERR("failed to copy priorities"); -- ret = -EINVAL; -+ ret = -EFAULT; - } - - cleanup: -@@ -3402,7 +3402,7 @@ int dxgvmb_send_offer_allocations(struct dxgprocess *process, - } - if (ret) { - DXG_ERR("failed to copy input handles"); -- ret = -EINVAL; -+ ret = -EFAULT; - goto cleanup; - } - -@@ -3457,7 +3457,7 @@ int dxgvmb_send_reclaim_allocations(struct dxgprocess *process, - } - if (ret) { - DXG_ERR("failed to copy input handles"); -- ret = -EINVAL; -+ ret = -EFAULT; - goto cleanup; - } - -@@ -3469,7 +3469,7 @@ int dxgvmb_send_reclaim_allocations(struct dxgprocess *process, - &result->paging_fence_value, sizeof(u64)); - if (ret) { - DXG_ERR("failed to copy paging fence"); -- ret = -EINVAL; -+ ret = -EFAULT; - goto cleanup; - } - -@@ -3480,7 +3480,7 @@ int dxgvmb_send_reclaim_allocations(struct dxgprocess *process, - args->allocation_count); - if (ret) { - DXG_ERR("failed to copy results"); -- ret = -EINVAL; -+ ret = -EFAULT; - } - } - -@@ -3559,7 +3559,7 @@ int dxgvmb_send_create_hwqueue(struct dxgprocess *process, - args->priv_drv_data_size); - if (ret) { - DXG_ERR("failed to copy private data"); -- ret = -EINVAL; -+ ret = -EFAULT; - goto cleanup; - } - } -@@ -3604,7 +3604,7 @@ int dxgvmb_send_create_hwqueue(struct dxgprocess *process, - sizeof(struct d3dkmthandle)); - if (ret) { - DXG_ERR("failed to copy hwqueue handle"); -- ret = -EINVAL; -+ ret = -EFAULT; - goto cleanup; - } - ret = copy_to_user(&inargs->queue_progress_fence, -@@ -3612,7 +3612,7 @@ int dxgvmb_send_create_hwqueue(struct dxgprocess *process, - sizeof(struct d3dkmthandle)); - if (ret) { - DXG_ERR("failed to progress fence"); -- ret = -EINVAL; -+ ret = -EFAULT; - goto cleanup; - } - ret = copy_to_user(&inargs->queue_progress_fence_cpu_va, -@@ -3620,7 +3620,7 @@ int dxgvmb_send_create_hwqueue(struct dxgprocess *process, - sizeof(inargs->queue_progress_fence_cpu_va)); - if (ret) { - DXG_ERR("failed to copy fence cpu va"); -- ret = -EINVAL; -+ ret = -EFAULT; - goto cleanup; - } - ret = copy_to_user(&inargs->queue_progress_fence_gpu_va, -@@ -3628,7 +3628,7 @@ int dxgvmb_send_create_hwqueue(struct dxgprocess *process, - sizeof(u64)); - if (ret) { - DXG_ERR("failed to copy fence gpu va"); -- ret = -EINVAL; -+ ret = -EFAULT; - goto cleanup; - } - if (args->priv_drv_data_size) { -@@ -3637,7 +3637,7 @@ int dxgvmb_send_create_hwqueue(struct dxgprocess *process, - args->priv_drv_data_size); - if (ret) { - DXG_ERR("failed to copy private data"); -- ret = -EINVAL; -+ ret = -EFAULT; - } - } - -@@ -3706,7 +3706,7 @@ int dxgvmb_send_query_adapter_info(struct dxgprocess *process, - args->private_data, args->private_data_size); - if (ret) { - DXG_ERR("Faled to copy private data"); -- ret = -EINVAL; -+ ret = -EFAULT; - goto cleanup; - } - -@@ -3758,7 +3758,7 @@ int dxgvmb_send_query_adapter_info(struct dxgprocess *process, - args->private_data_size); - if (ret) { - DXG_ERR("Faled to copy private data to user"); -- ret = -EINVAL; -+ ret = -EFAULT; - } - - cleanup: -@@ -3791,7 +3791,7 @@ int dxgvmb_send_submit_command_hwqueue(struct dxgprocess *process, - primaries_size); - if (ret) { - DXG_ERR("failed to copy primaries handles"); -- ret = -EINVAL; -+ ret = -EFAULT; - goto cleanup; - } - } -@@ -3801,7 +3801,7 @@ int dxgvmb_send_submit_command_hwqueue(struct dxgprocess *process, - args->priv_drv_data_size); - if (ret) { - DXG_ERR("failed to copy primaries data"); -- ret = -EINVAL; -+ ret = -EFAULT; - goto cleanup; - } - } -diff --git a/drivers/hv/dxgkrnl/ioctl.c b/drivers/hv/dxgkrnl/ioctl.c -index 111111111111..222222222222 100644 ---- a/drivers/hv/dxgkrnl/ioctl.c -+++ b/drivers/hv/dxgkrnl/ioctl.c -@@ -29,13 +29,6 @@ struct ioctl_desc { - u32 ioctl; - }; - --#ifdef DEBUG --static char *errorstr(int ret) --{ -- return ret < 0 ? "err" : ""; --} --#endif -- - void dxgsharedsyncobj_put(struct dxgsharedsyncobject *syncobj) - { - DXG_TRACE("Release syncobj: %p", syncobj); -@@ -108,7 +101,7 @@ static int dxgkio_open_adapter_from_luid(struct dxgprocess *process, - ret = copy_from_user(&args, inargs, sizeof(args)); - if (ret) { - DXG_ERR("Faled to copy input args"); -- ret = -EINVAL; -+ ret = -EFAULT; - goto cleanup; - } - -@@ -129,7 +122,7 @@ static int dxgkio_open_adapter_from_luid(struct dxgprocess *process, - &args.adapter_handle, - sizeof(struct d3dkmthandle)); - if (ret) -- ret = -EINVAL; -+ ret = -EFAULT; - } - adapter = entry; - } -@@ -150,7 +143,7 @@ static int dxgkio_open_adapter_from_luid(struct dxgprocess *process, - if (ret < 0) - dxgprocess_close_adapter(process, args.adapter_handle); - -- DXG_TRACE("ioctl:%s %d", errorstr(ret), ret); -+ DXG_TRACE_IOCTL_END(ret); - return ret; - } - -@@ -173,7 +166,7 @@ static int dxgkio_query_statistics(struct dxgprocess *process, - ret = copy_from_user(args, inargs, sizeof(*args)); - if (ret) { - DXG_ERR("failed to copy input args"); -- ret = -EINVAL; -+ ret = -EFAULT; - goto cleanup; - } - -@@ -199,7 +192,7 @@ static int dxgkio_query_statistics(struct dxgprocess *process, - ret = copy_to_user(inargs, args, sizeof(*args)); - if (ret) { - DXG_ERR("failed to copy args"); -- ret = -EINVAL; -+ ret = -EFAULT; - } - } - dxgadapter_release_lock_shared(adapter); -@@ -209,7 +202,7 @@ static int dxgkio_query_statistics(struct dxgprocess *process, - if (args) - vfree(args); - -- DXG_TRACE("ioctl:%s %d", errorstr(ret), ret); -+ DXG_TRACE_IOCTL_END(ret); - return ret; - } - -@@ -233,7 +226,7 @@ dxgkp_enum_adapters(struct dxgprocess *process, - &dxgglobal->num_adapters, sizeof(u32)); - if (ret) { - DXG_ERR("copy_to_user faled"); -- ret = -EINVAL; -+ ret = -EFAULT; - } - goto cleanup; - } -@@ -291,7 +284,7 @@ dxgkp_enum_adapters(struct dxgprocess *process, - &dxgglobal->num_adapters, sizeof(u32)); - if (ret) { - DXG_ERR("copy_to_user failed"); -- ret = -EINVAL; -+ ret = -EFAULT; - } - goto cleanup; - } -@@ -300,13 +293,13 @@ dxgkp_enum_adapters(struct dxgprocess *process, - sizeof(adapter_count)); - if (ret) { - DXG_ERR("failed to copy adapter_count"); -- ret = -EINVAL; -+ ret = -EFAULT; - goto cleanup; - } - ret = copy_to_user(info_out, info, sizeof(info[0]) * adapter_count); - if (ret) { - DXG_ERR("failed to copy adapter info"); -- ret = -EINVAL; -+ ret = -EFAULT; - } - - cleanup: -@@ -326,7 +319,7 @@ dxgkp_enum_adapters(struct dxgprocess *process, - if (adapters) - vfree(adapters); - -- DXG_TRACE("ioctl:%s %d", errorstr(ret), ret); -+ DXG_TRACE_IOCTL_END(ret); - return ret; - } - -@@ -437,7 +430,7 @@ dxgkio_enum_adapters(struct dxgprocess *process, void *__user inargs) - ret = copy_from_user(&args, inargs, sizeof(args)); - if (ret) { - DXG_ERR("failed to copy input args"); -- ret = -EINVAL; -+ ret = -EFAULT; - goto cleanup; - } - -@@ -447,7 +440,7 @@ dxgkio_enum_adapters(struct dxgprocess *process, void *__user inargs) - ret = copy_to_user(inargs, &args, sizeof(args)); - if (ret) { - DXG_ERR("failed to copy args to user"); -- ret = -EINVAL; -+ ret = -EFAULT; - } - goto cleanup; - } -@@ -508,14 +501,14 @@ dxgkio_enum_adapters(struct dxgprocess *process, void *__user inargs) - ret = copy_to_user(inargs, &args, sizeof(args)); - if (ret) { - DXG_ERR("failed to copy args to user"); -- ret = -EINVAL; -+ ret = -EFAULT; - goto cleanup; - } - ret = copy_to_user(args.adapters, info, - sizeof(info[0]) * args.num_adapters); - if (ret) { - DXG_ERR("failed to copy adapter info to user"); -- ret = -EINVAL; -+ ret = -EFAULT; - } - - cleanup: -@@ -536,7 +529,7 @@ dxgkio_enum_adapters(struct dxgprocess *process, void *__user inargs) - if (adapters) - vfree(adapters); - -- DXG_TRACE("ioctl:%s %d", errorstr(ret), ret); -+ DXG_TRACE_IOCTL_END(ret); - return ret; - } - -@@ -549,7 +542,7 @@ dxgkio_enum_adapters3(struct dxgprocess *process, void *__user inargs) - ret = copy_from_user(&args, inargs, sizeof(args)); - if (ret) { - DXG_ERR("failed to copy input args"); -- ret = -EINVAL; -+ ret = -EFAULT; - goto cleanup; - } - -@@ -561,7 +554,7 @@ dxgkio_enum_adapters3(struct dxgprocess *process, void *__user inargs) - - cleanup: - -- DXG_TRACE("ioctl:%s %d", errorstr(ret), ret); -+ DXG_TRACE_IOCTL_END(ret); - return ret; - } - -@@ -574,7 +567,7 @@ dxgkio_close_adapter(struct dxgprocess *process, void *__user inargs) - ret = copy_from_user(&args, inargs, sizeof(args)); - if (ret) { - DXG_ERR("failed to copy input args"); -- ret = -EINVAL; -+ ret = -EFAULT; - goto cleanup; - } - -@@ -584,7 +577,7 @@ dxgkio_close_adapter(struct dxgprocess *process, void *__user inargs) - - cleanup: - -- DXG_TRACE("ioctl: %s %d", errorstr(ret), ret); -+ DXG_TRACE_IOCTL_END(ret); - return ret; - } - -@@ -598,7 +591,7 @@ dxgkio_query_adapter_info(struct dxgprocess *process, void *__user inargs) - ret = copy_from_user(&args, inargs, sizeof(args)); - if (ret) { - DXG_ERR("failed to copy input args"); -- ret = -EINVAL; -+ ret = -EFAULT; - goto cleanup; - } - -@@ -630,7 +623,7 @@ dxgkio_query_adapter_info(struct dxgprocess *process, void *__user inargs) - if (adapter) - kref_put(&adapter->adapter_kref, dxgadapter_release); - -- DXG_TRACE("ioctl:%s %d", errorstr(ret), ret); -+ DXG_TRACE_IOCTL_END(ret); - return ret; - } - -@@ -647,7 +640,7 @@ dxgkio_create_device(struct dxgprocess *process, void *__user inargs) - ret = copy_from_user(&args, inargs, sizeof(args)); - if (ret) { - DXG_ERR("failed to copy input args"); -- ret = -EINVAL; -+ ret = -EFAULT; - goto cleanup; - } - -@@ -677,7 +670,7 @@ dxgkio_create_device(struct dxgprocess *process, void *__user inargs) - sizeof(struct d3dkmthandle)); - if (ret) { - DXG_ERR("failed to copy device handle"); -- ret = -EINVAL; -+ ret = -EFAULT; - goto cleanup; - } - -@@ -709,7 +702,7 @@ dxgkio_create_device(struct dxgprocess *process, void *__user inargs) - if (adapter) - kref_put(&adapter->adapter_kref, dxgadapter_release); - -- DXG_TRACE("ioctl:%s %d", errorstr(ret), ret); -+ DXG_TRACE_IOCTL_END(ret); - return ret; - } - -@@ -724,7 +717,7 @@ dxgkio_destroy_device(struct dxgprocess *process, void *__user inargs) - ret = copy_from_user(&args, inargs, sizeof(args)); - if (ret) { - DXG_ERR("failed to copy input args"); -- ret = -EINVAL; -+ ret = -EFAULT; - goto cleanup; - } - -@@ -756,7 +749,7 @@ dxgkio_destroy_device(struct dxgprocess *process, void *__user inargs) - - cleanup: - -- DXG_TRACE("ioctl:%s %d", errorstr(ret), ret); -+ DXG_TRACE_IOCTL_END(ret); - return ret; - } - -@@ -774,7 +767,7 @@ dxgkio_create_context_virtual(struct dxgprocess *process, void *__user inargs) - ret = copy_from_user(&args, inargs, sizeof(args)); - if (ret) { - DXG_ERR("failed to copy input args"); -- ret = -EINVAL; -+ ret = -EFAULT; - goto cleanup; - } - -@@ -824,7 +817,7 @@ dxgkio_create_context_virtual(struct dxgprocess *process, void *__user inargs) - sizeof(struct d3dkmthandle)); - if (ret) { - DXG_ERR("failed to copy context handle"); -- ret = -EINVAL; -+ ret = -EFAULT; - } - } else { - DXG_ERR("invalid host handle"); -@@ -851,7 +844,7 @@ dxgkio_create_context_virtual(struct dxgprocess *process, void *__user inargs) - kref_put(&device->device_kref, dxgdevice_release); - } - -- DXG_TRACE("ioctl:%s %d", errorstr(ret), ret); -+ DXG_TRACE_IOCTL_END(ret); - return ret; - } - -@@ -868,7 +861,7 @@ dxgkio_destroy_context(struct dxgprocess *process, void *__user inargs) - ret = copy_from_user(&args, inargs, sizeof(args)); - if (ret) { - DXG_ERR("failed to copy input args"); -- ret = -EINVAL; -+ ret = -EFAULT; - goto cleanup; - } - -@@ -920,7 +913,7 @@ dxgkio_destroy_context(struct dxgprocess *process, void *__user inargs) - if (device) - kref_put(&device->device_kref, dxgdevice_release); - -- DXG_TRACE("ioctl:%s %s %d", errorstr(ret), __func__, ret); -+ DXG_TRACE_IOCTL_END(ret); - return ret; - } - -@@ -938,7 +931,7 @@ dxgkio_create_hwqueue(struct dxgprocess *process, void *__user inargs) - ret = copy_from_user(&args, inargs, sizeof(args)); - if (ret) { - DXG_ERR("failed to copy input args"); -- ret = -EINVAL; -+ ret = -EFAULT; - goto cleanup; - } - -@@ -1002,7 +995,7 @@ dxgkio_create_hwqueue(struct dxgprocess *process, void *__user inargs) - if (device) - kref_put(&device->device_kref, dxgdevice_release); - -- DXG_TRACE("ioctl:%s %d", errorstr(ret), ret); -+ DXG_TRACE_IOCTL_END(ret); - return ret; - } - -@@ -1019,7 +1012,7 @@ static int dxgkio_destroy_hwqueue(struct dxgprocess *process, - ret = copy_from_user(&args, inargs, sizeof(args)); - if (ret) { - DXG_ERR("failed to copy input args"); -- ret = -EINVAL; -+ ret = -EFAULT; - goto cleanup; - } - -@@ -1070,7 +1063,7 @@ static int dxgkio_destroy_hwqueue(struct dxgprocess *process, - if (device) - kref_put(&device->device_kref, dxgdevice_release); - -- DXG_TRACE("ioctl:%s %d", errorstr(ret), ret); -+ DXG_TRACE_IOCTL_END(ret); - return ret; - } - -@@ -1088,7 +1081,7 @@ dxgkio_create_paging_queue(struct dxgprocess *process, void *__user inargs) - ret = copy_from_user(&args, inargs, sizeof(args)); - if (ret) { - DXG_ERR("failed to copy input args"); -- ret = -EINVAL; -+ ret = -EFAULT; - goto cleanup; - } - -@@ -1128,7 +1121,7 @@ dxgkio_create_paging_queue(struct dxgprocess *process, void *__user inargs) - ret = copy_to_user(inargs, &args, sizeof(args)); - if (ret) { - DXG_ERR("failed to copy input args"); -- ret = -EINVAL; -+ ret = -EFAULT; - goto cleanup; - } - -@@ -1169,7 +1162,7 @@ dxgkio_create_paging_queue(struct dxgprocess *process, void *__user inargs) - kref_put(&device->device_kref, dxgdevice_release); - } - -- DXG_TRACE("ioctl:%s %d", errorstr(ret), ret); -+ DXG_TRACE_IOCTL_END(ret); - return ret; - } - -@@ -1186,7 +1179,7 @@ dxgkio_destroy_paging_queue(struct dxgprocess *process, void *__user inargs) - ret = copy_from_user(&args, inargs, sizeof(args)); - if (ret) { - DXG_ERR("failed to copy input args"); -- ret = -EINVAL; -+ ret = -EFAULT; - goto cleanup; - } - -@@ -1247,7 +1240,7 @@ dxgkio_destroy_paging_queue(struct dxgprocess *process, void *__user inargs) - kref_put(&device->device_kref, dxgdevice_release); - } - -- DXG_TRACE("ioctl:%s %d", errorstr(ret), ret); -+ DXG_TRACE_IOCTL_END(ret); - return ret; - } - -@@ -1351,7 +1344,7 @@ dxgkio_create_allocation(struct dxgprocess *process, void *__user inargs) - ret = copy_from_user(&args, inargs, sizeof(args)); - if (ret) { - DXG_ERR("failed to copy input args"); -- ret = -EINVAL; -+ ret = -EFAULT; - goto cleanup; - } - -@@ -1373,7 +1366,7 @@ dxgkio_create_allocation(struct dxgprocess *process, void *__user inargs) - alloc_info_size); - if (ret) { - DXG_ERR("failed to copy alloc info"); -- ret = -EINVAL; -+ ret = -EFAULT; - goto cleanup; - } - -@@ -1412,7 +1405,7 @@ dxgkio_create_allocation(struct dxgprocess *process, void *__user inargs) - sizeof(standard_alloc)); - if (ret) { - DXG_ERR("failed to copy std alloc data"); -- ret = -EINVAL; -+ ret = -EFAULT; - goto cleanup; - } - if (standard_alloc.type == -@@ -1556,7 +1549,7 @@ dxgkio_create_allocation(struct dxgprocess *process, void *__user inargs) - if (ret) { - DXG_ERR( - "failed to copy runtime data"); -- ret = -EINVAL; -+ ret = -EFAULT; - goto cleanup; - } - } -@@ -1576,7 +1569,7 @@ dxgkio_create_allocation(struct dxgprocess *process, void *__user inargs) - if (ret) { - DXG_ERR( - "failed to copy res data"); -- ret = -EINVAL; -+ ret = -EFAULT; - goto cleanup; - } - } -@@ -1733,7 +1726,7 @@ dxgkio_create_allocation(struct dxgprocess *process, void *__user inargs) - kref_put(&device->device_kref, dxgdevice_release); - } - -- DXG_TRACE("ioctl:%s %d", errorstr(ret), ret); -+ DXG_TRACE_IOCTL_END(ret); - return ret; - } - -@@ -1793,7 +1786,7 @@ dxgkio_destroy_allocation(struct dxgprocess *process, void *__user inargs) - ret = copy_from_user(&args, inargs, sizeof(args)); - if (ret) { - DXG_ERR("failed to copy input args"); -- ret = -EINVAL; -+ ret = -EFAULT; - goto cleanup; - } - -@@ -1823,7 +1816,7 @@ dxgkio_destroy_allocation(struct dxgprocess *process, void *__user inargs) - handle_size); - if (ret) { - DXG_ERR("failed to copy alloc handles"); -- ret = -EINVAL; -+ ret = -EFAULT; - goto cleanup; - } - } -@@ -1962,7 +1955,7 @@ dxgkio_destroy_allocation(struct dxgprocess *process, void *__user inargs) - if (allocs) - vfree(allocs); - -- DXG_TRACE("ioctl:%s %d", errorstr(ret), ret); -+ DXG_TRACE_IOCTL_END(ret); - return ret; - } - -@@ -1978,7 +1971,7 @@ dxgkio_make_resident(struct dxgprocess *process, void *__user inargs) - ret = copy_from_user(&args, inargs, sizeof(args)); - if (ret) { - DXG_ERR("failed to copy input args"); -- ret = -EINVAL; -+ ret = -EFAULT; - goto cleanup; - } - -@@ -2022,7 +2015,7 @@ dxgkio_make_resident(struct dxgprocess *process, void *__user inargs) - &args.paging_fence_value, sizeof(u64)); - if (ret2) { - DXG_ERR("failed to copy paging fence"); -- ret = -EINVAL; -+ ret = -EFAULT; - goto cleanup; - } - -@@ -2030,7 +2023,7 @@ dxgkio_make_resident(struct dxgprocess *process, void *__user inargs) - &args.num_bytes_to_trim, sizeof(u64)); - if (ret2) { - DXG_ERR("failed to copy bytes to trim"); -- ret = -EINVAL; -+ ret = -EFAULT; - goto cleanup; - } - -@@ -2041,7 +2034,7 @@ dxgkio_make_resident(struct dxgprocess *process, void *__user inargs) - if (device) - kref_put(&device->device_kref, dxgdevice_release); - -- DXG_TRACE("ioctl:%s %d", errorstr(ret), ret); -+ DXG_TRACE_IOCTL_END(ret); - - return ret; - } -@@ -2058,7 +2051,7 @@ dxgkio_evict(struct dxgprocess *process, void *__user inargs) - ret = copy_from_user(&args, inargs, sizeof(args)); - if (ret) { - DXG_ERR("failed to copy input args"); -- ret = -EINVAL; -+ ret = -EFAULT; - goto cleanup; - } - -@@ -2090,7 +2083,7 @@ dxgkio_evict(struct dxgprocess *process, void *__user inargs) - &args.num_bytes_to_trim, sizeof(u64)); - if (ret) { - DXG_ERR("failed to copy bytes to trim to user"); -- ret = -EINVAL; -+ ret = -EFAULT; - } - cleanup: - -@@ -2099,7 +2092,7 @@ dxgkio_evict(struct dxgprocess *process, void *__user inargs) - if (device) - kref_put(&device->device_kref, dxgdevice_release); - -- DXG_TRACE("ioctl:%s %d", errorstr(ret), ret); -+ DXG_TRACE_IOCTL_END(ret); - return ret; - } - -@@ -2114,7 +2107,7 @@ dxgkio_offer_allocations(struct dxgprocess *process, void *__user inargs) - ret = copy_from_user(&args, inargs, sizeof(args)); - if (ret) { - DXG_ERR("failed to copy input args"); -- ret = -EINVAL; -+ ret = -EFAULT; - goto cleanup; - } - -@@ -2153,7 +2146,7 @@ dxgkio_offer_allocations(struct dxgprocess *process, void *__user inargs) - if (device) - kref_put(&device->device_kref, dxgdevice_release); - -- DXG_TRACE("ioctl:%s %d", errorstr(ret), ret); -+ DXG_TRACE_IOCTL_END(ret); - return ret; - } - -@@ -2169,7 +2162,7 @@ dxgkio_reclaim_allocations(struct dxgprocess *process, void *__user inargs) - ret = copy_from_user(&args, inargs, sizeof(args)); - if (ret) { - DXG_ERR("failed to copy input args"); -- ret = -EINVAL; -+ ret = -EFAULT; - goto cleanup; - } - -@@ -2212,7 +2205,7 @@ dxgkio_reclaim_allocations(struct dxgprocess *process, void *__user inargs) - if (device) - kref_put(&device->device_kref, dxgdevice_release); - -- DXG_TRACE("ioctl:%s %d", errorstr(ret), ret); -+ DXG_TRACE_IOCTL_END(ret); - return ret; - } - -@@ -2227,7 +2220,7 @@ dxgkio_submit_command(struct dxgprocess *process, void *__user inargs) - ret = copy_from_user(&args, inargs, sizeof(args)); - if (ret) { - DXG_ERR("failed to copy input args"); -- ret = -EINVAL; -+ ret = -EFAULT; - goto cleanup; - } - -@@ -2280,7 +2273,7 @@ dxgkio_submit_command(struct dxgprocess *process, void *__user inargs) - if (device) - kref_put(&device->device_kref, dxgdevice_release); - -- DXG_TRACE("ioctl:%s %d", errorstr(ret), ret); -+ DXG_TRACE_IOCTL_END(ret); - return ret; - } - -@@ -2296,7 +2289,7 @@ dxgkio_submit_command_to_hwqueue(struct dxgprocess *process, - ret = copy_from_user(&args, inargs, sizeof(args)); - if (ret) { - DXG_ERR("failed to copy input args"); -- ret = -EINVAL; -+ ret = -EFAULT; - goto cleanup; - } - -@@ -2336,7 +2329,7 @@ dxgkio_submit_command_to_hwqueue(struct dxgprocess *process, - if (device) - kref_put(&device->device_kref, dxgdevice_release); - -- DXG_TRACE("ioctl:%s %d", errorstr(ret), ret); -+ DXG_TRACE_IOCTL_END(ret); - return ret; - } - -@@ -2352,7 +2345,7 @@ dxgkio_submit_signal_to_hwqueue(struct dxgprocess *process, void *__user inargs) - ret = copy_from_user(&args, inargs, sizeof(args)); - if (ret) { - DXG_ERR("failed to copy input args"); -- ret = -EINVAL; -+ ret = -EFAULT; - goto cleanup; - } - -@@ -2376,7 +2369,7 @@ dxgkio_submit_signal_to_hwqueue(struct dxgprocess *process, void *__user inargs) - sizeof(struct d3dkmthandle)); - if (ret) { - DXG_ERR("failed to copy hwqueue handle"); -- ret = -EINVAL; -+ ret = -EFAULT; - goto cleanup; - } - -@@ -2410,7 +2403,7 @@ dxgkio_submit_signal_to_hwqueue(struct dxgprocess *process, void *__user inargs) - if (device) - kref_put(&device->device_kref, dxgdevice_release); - -- DXG_TRACE("ioctl:%s %d", errorstr(ret), ret); -+ DXG_TRACE_IOCTL_END(ret); - return ret; - } - -@@ -2428,7 +2421,7 @@ dxgkio_submit_wait_to_hwqueue(struct dxgprocess *process, void *__user inargs) - ret = copy_from_user(&args, inargs, sizeof(args)); - if (ret) { - DXG_ERR("failed to copy input args"); -- ret = -EINVAL; -+ ret = -EFAULT; - goto cleanup; - } - -@@ -2447,7 +2440,7 @@ dxgkio_submit_wait_to_hwqueue(struct dxgprocess *process, void *__user inargs) - ret = copy_from_user(objects, args.objects, object_size); - if (ret) { - DXG_ERR("failed to copy objects"); -- ret = -EINVAL; -+ ret = -EFAULT; - goto cleanup; - } - -@@ -2460,7 +2453,7 @@ dxgkio_submit_wait_to_hwqueue(struct dxgprocess *process, void *__user inargs) - ret = copy_from_user(fences, args.fence_values, object_size); - if (ret) { - DXG_ERR("failed to copy fence values"); -- ret = -EINVAL; -+ ret = -EFAULT; - goto cleanup; - } - -@@ -2494,7 +2487,7 @@ dxgkio_submit_wait_to_hwqueue(struct dxgprocess *process, void *__user inargs) - if (device) - kref_put(&device->device_kref, dxgdevice_release); - -- DXG_TRACE("ioctl:%s %d", errorstr(ret), ret); -+ DXG_TRACE_IOCTL_END(ret); - return ret; - } - -@@ -2510,7 +2503,7 @@ dxgkio_map_gpu_va(struct dxgprocess *process, void *__user inargs) - ret = copy_from_user(&args, inargs, sizeof(args)); - if (ret) { - DXG_ERR("failed to copy input args"); -- ret = -EINVAL; -+ ret = -EFAULT; - goto cleanup; - } - -@@ -2542,7 +2535,7 @@ dxgkio_map_gpu_va(struct dxgprocess *process, void *__user inargs) - &args.paging_fence_value, sizeof(u64)); - if (ret2) { - DXG_ERR("failed to copy paging fence to user"); -- ret = -EINVAL; -+ ret = -EFAULT; - goto cleanup; - } - -@@ -2550,7 +2543,7 @@ dxgkio_map_gpu_va(struct dxgprocess *process, void *__user inargs) - sizeof(args.virtual_address)); - if (ret2) { - DXG_ERR("failed to copy va to user"); -- ret = -EINVAL; -+ ret = -EFAULT; - goto cleanup; - } - -@@ -2561,7 +2554,7 @@ dxgkio_map_gpu_va(struct dxgprocess *process, void *__user inargs) - if (device) - kref_put(&device->device_kref, dxgdevice_release); - -- DXG_TRACE("ioctl:%s %d", errorstr(ret), ret); -+ DXG_TRACE_IOCTL_END(ret); - return ret; - } - -@@ -2577,7 +2570,7 @@ dxgkio_reserve_gpu_va(struct dxgprocess *process, void *__user inargs) - ret = copy_from_user(&args, inargs, sizeof(args)); - if (ret) { - DXG_ERR("failed to copy input args"); -- ret = -EINVAL; -+ ret = -EFAULT; - goto cleanup; - } - -@@ -2614,7 +2607,7 @@ dxgkio_reserve_gpu_va(struct dxgprocess *process, void *__user inargs) - sizeof(args.virtual_address)); - if (ret) { - DXG_ERR("failed to copy VA to user"); -- ret = -EINVAL; -+ ret = -EFAULT; - } - - cleanup: -@@ -2624,7 +2617,7 @@ dxgkio_reserve_gpu_va(struct dxgprocess *process, void *__user inargs) - kref_put(&adapter->adapter_kref, dxgadapter_release); - } - -- DXG_TRACE("ioctl:%s %d", errorstr(ret), ret); -+ DXG_TRACE_IOCTL_END(ret); - return ret; - } - -@@ -2638,7 +2631,7 @@ dxgkio_free_gpu_va(struct dxgprocess *process, void *__user inargs) - ret = copy_from_user(&args, inargs, sizeof(args)); - if (ret) { - DXG_ERR("failed to copy input args"); -- ret = -EINVAL; -+ ret = -EFAULT; - goto cleanup; - } - -@@ -2680,7 +2673,7 @@ dxgkio_update_gpu_va(struct dxgprocess *process, void *__user inargs) - ret = copy_from_user(&args, inargs, sizeof(args)); - if (ret) { - DXG_ERR("failed to copy input args"); -- ret = -EINVAL; -+ ret = -EFAULT; - goto cleanup; - } - -@@ -2705,7 +2698,7 @@ dxgkio_update_gpu_va(struct dxgprocess *process, void *__user inargs) - sizeof(args.fence_value)); - if (ret) { - DXG_ERR("failed to copy fence value to user"); -- ret = -EINVAL; -+ ret = -EFAULT; - } - - cleanup: -@@ -2734,7 +2727,7 @@ dxgkio_create_sync_object(struct dxgprocess *process, void *__user inargs) - ret = copy_from_user(&args, inargs, sizeof(args)); - if (ret) { - DXG_ERR("failed to copy input args"); -- ret = -EINVAL; -+ ret = -EFAULT; - goto cleanup; - } - -@@ -2808,7 +2801,7 @@ dxgkio_create_sync_object(struct dxgprocess *process, void *__user inargs) - ret = copy_to_user(inargs, &args, sizeof(args)); - if (ret) { - DXG_ERR("failed to copy output args"); -- ret = -EINVAL; -+ ret = -EFAULT; - goto cleanup; - } - -@@ -2842,7 +2835,7 @@ dxgkio_create_sync_object(struct dxgprocess *process, void *__user inargs) - if (device) - kref_put(&device->device_kref, dxgdevice_release); - -- DXG_TRACE("ioctl:%s %d", errorstr(ret), ret); -+ DXG_TRACE_IOCTL_END(ret); - return ret; - } - -@@ -2856,7 +2849,7 @@ dxgkio_destroy_sync_object(struct dxgprocess *process, void *__user inargs) - ret = copy_from_user(&args, inargs, sizeof(args)); - if (ret) { - DXG_ERR("failed to copy input args"); -- ret = -EINVAL; -+ ret = -EFAULT; - goto cleanup; - } - -@@ -2885,7 +2878,7 @@ dxgkio_destroy_sync_object(struct dxgprocess *process, void *__user inargs) - - cleanup: - -- DXG_TRACE("ioctl:%s %d", errorstr(ret), ret); -+ DXG_TRACE_IOCTL_END(ret); - return ret; - } - -@@ -2906,7 +2899,7 @@ dxgkio_open_sync_object_nt(struct dxgprocess *process, void *__user inargs) - ret = copy_from_user(&args, inargs, sizeof(args)); - if (ret) { - DXG_ERR("failed to copy input args"); -- ret = -EINVAL; -+ ret = -EFAULT; - goto cleanup; - } - -@@ -2995,7 +2988,7 @@ dxgkio_open_sync_object_nt(struct dxgprocess *process, void *__user inargs) - if (ret == 0) - goto success; - DXG_ERR("failed to copy output args"); -- ret = -EINVAL; -+ ret = -EFAULT; - - cleanup: - -@@ -3020,7 +3013,7 @@ dxgkio_open_sync_object_nt(struct dxgprocess *process, void *__user inargs) - if (device) - kref_put(&device->device_kref, dxgdevice_release); - -- DXG_TRACE("ioctl:%s %d", errorstr(ret), ret); -+ DXG_TRACE_IOCTL_END(ret); - return ret; - } - -@@ -3041,7 +3034,7 @@ dxgkio_signal_sync_object(struct dxgprocess *process, void *__user inargs) - ret = copy_from_user(&args, inargs, sizeof(args)); - if (ret) { - DXG_ERR("failed to copy input args"); -- ret = -EINVAL; -+ ret = -EFAULT; - goto cleanup; - } - -@@ -3129,7 +3122,7 @@ dxgkio_signal_sync_object(struct dxgprocess *process, void *__user inargs) - if (device) - kref_put(&device->device_kref, dxgdevice_release); - -- DXG_TRACE("ioctl:%s %d", errorstr(ret), ret); -+ DXG_TRACE_IOCTL_END(ret); - return ret; - } - -@@ -3144,7 +3137,7 @@ dxgkio_signal_sync_object_cpu(struct dxgprocess *process, void *__user inargs) - ret = copy_from_user(&args, inargs, sizeof(args)); - if (ret) { - DXG_ERR("failed to copy input args"); -- ret = -EINVAL; -+ ret = -EFAULT; - goto cleanup; - } - if (args.object_count == 0 || -@@ -3181,7 +3174,7 @@ dxgkio_signal_sync_object_cpu(struct dxgprocess *process, void *__user inargs) - if (device) - kref_put(&device->device_kref, dxgdevice_release); - -- DXG_TRACE("ioctl:%s %d", errorstr(ret), ret); -+ DXG_TRACE_IOCTL_END(ret); - return ret; - } - -@@ -3199,7 +3192,7 @@ dxgkio_signal_sync_object_gpu(struct dxgprocess *process, void *__user inargs) - ret = copy_from_user(&args, inargs, sizeof(args)); - if (ret) { - DXG_ERR("failed to copy input args"); -- ret = -EINVAL; -+ ret = -EFAULT; - goto cleanup; - } - -@@ -3240,7 +3233,7 @@ dxgkio_signal_sync_object_gpu(struct dxgprocess *process, void *__user inargs) - if (device) - kref_put(&device->device_kref, dxgdevice_release); - -- DXG_TRACE("ioctl:%s %d", errorstr(ret), ret); -+ DXG_TRACE_IOCTL_END(ret); - return ret; - } - -@@ -3262,7 +3255,7 @@ dxgkio_signal_sync_object_gpu2(struct dxgprocess *process, void *__user inargs) - ret = copy_from_user(&args, inargs, sizeof(args)); - if (ret) { - DXG_ERR("failed to copy input args"); -- ret = -EINVAL; -+ ret = -EFAULT; - goto cleanup; - } - -@@ -3287,7 +3280,7 @@ dxgkio_signal_sync_object_gpu2(struct dxgprocess *process, void *__user inargs) - sizeof(struct d3dkmthandle)); - if (ret) { - DXG_ERR("failed to copy context handle"); -- ret = -EINVAL; -+ ret = -EFAULT; - goto cleanup; - } - -@@ -3365,7 +3358,7 @@ dxgkio_signal_sync_object_gpu2(struct dxgprocess *process, void *__user inargs) - if (device) - kref_put(&device->device_kref, dxgdevice_release); - -- DXG_TRACE("ioctl:%s %d", errorstr(ret), ret); -+ DXG_TRACE_IOCTL_END(ret); - return ret; - } - -@@ -3380,7 +3373,7 @@ dxgkio_wait_sync_object(struct dxgprocess *process, void *__user inargs) - ret = copy_from_user(&args, inargs, sizeof(args)); - if (ret) { - DXG_ERR("failed to copy input args"); -- ret = -EINVAL; -+ ret = -EFAULT; - goto cleanup; - } - -@@ -3418,7 +3411,7 @@ dxgkio_wait_sync_object(struct dxgprocess *process, void *__user inargs) - if (device) - kref_put(&device->device_kref, dxgdevice_release); - -- DXG_TRACE("ioctl:%s %d", errorstr(ret), ret); -+ DXG_TRACE_IOCTL_END(ret); - return ret; - } - -@@ -3439,7 +3432,7 @@ dxgkio_wait_sync_object_cpu(struct dxgprocess *process, void *__user inargs) - ret = copy_from_user(&args, inargs, sizeof(args)); - if (ret) { - DXG_ERR("failed to copy input args"); -- ret = -EINVAL; -+ ret = -EFAULT; - goto cleanup; - } - -@@ -3540,7 +3533,7 @@ dxgkio_wait_sync_object_cpu(struct dxgprocess *process, void *__user inargs) - kfree(async_host_event); - } - -- DXG_TRACE("ioctl:%s %d", errorstr(ret), ret); -+ DXG_TRACE_IOCTL_END(ret); - return ret; - } - -@@ -3563,7 +3556,7 @@ dxgkio_wait_sync_object_gpu(struct dxgprocess *process, void *__user inargs) - ret = copy_from_user(&args, inargs, sizeof(args)); - if (ret) { - DXG_ERR("failed to copy input args"); -- ret = -EINVAL; -+ ret = -EFAULT; - goto cleanup; - } - -@@ -3583,7 +3576,7 @@ dxgkio_wait_sync_object_gpu(struct dxgprocess *process, void *__user inargs) - ret = copy_from_user(objects, args.objects, object_size); - if (ret) { - DXG_ERR("failed to copy objects"); -- ret = -EINVAL; -+ ret = -EFAULT; - goto cleanup; - } - -@@ -3637,7 +3630,7 @@ dxgkio_wait_sync_object_gpu(struct dxgprocess *process, void *__user inargs) - object_size); - if (ret) { - DXG_ERR("failed to copy fences"); -- ret = -EINVAL; -+ ret = -EFAULT; - goto cleanup; - } - } else { -@@ -3673,7 +3666,7 @@ dxgkio_wait_sync_object_gpu(struct dxgprocess *process, void *__user inargs) - if (fences && fences != &args.fence_value) - vfree(fences); - -- DXG_TRACE("ioctl:%s %d", errorstr(ret), ret); -+ DXG_TRACE_IOCTL_END(ret); - return ret; - } - -@@ -3690,7 +3683,7 @@ dxgkio_lock2(struct dxgprocess *process, void *__user inargs) - ret = copy_from_user(&args, inargs, sizeof(args)); - if (ret) { - DXG_ERR("failed to copy input args"); -- ret = -EINVAL; -+ ret = -EFAULT; - goto cleanup; - } - -@@ -3712,7 +3705,7 @@ dxgkio_lock2(struct dxgprocess *process, void *__user inargs) - alloc->cpu_address_refcount++; - } else { - DXG_ERR("Failed to copy cpu address"); -- ret = -EINVAL; -+ ret = -EFAULT; - } - } - } -@@ -3749,7 +3742,7 @@ dxgkio_lock2(struct dxgprocess *process, void *__user inargs) - kref_put(&device->device_kref, dxgdevice_release); - - success: -- DXG_TRACE("ioctl:%s %d", errorstr(ret), ret); -+ DXG_TRACE_IOCTL_END(ret); - return ret; - } - -@@ -3766,7 +3759,7 @@ dxgkio_unlock2(struct dxgprocess *process, void *__user inargs) - ret = copy_from_user(&args, inargs, sizeof(args)); - if (ret) { - DXG_ERR("failed to copy input args"); -- ret = -EINVAL; -+ ret = -EFAULT; - goto cleanup; - } - -@@ -3829,7 +3822,7 @@ dxgkio_unlock2(struct dxgprocess *process, void *__user inargs) - kref_put(&device->device_kref, dxgdevice_release); - - success: -- DXG_TRACE("ioctl:%s %d", errorstr(ret), ret); -+ DXG_TRACE_IOCTL_END(ret); - return ret; - } - -@@ -3844,7 +3837,7 @@ dxgkio_update_alloc_property(struct dxgprocess *process, void *__user inargs) - ret = copy_from_user(&args, inargs, sizeof(args)); - if (ret) { - DXG_ERR("failed to copy input args"); -- ret = -EINVAL; -+ ret = -EFAULT; - goto cleanup; - } - -@@ -3872,7 +3865,7 @@ dxgkio_update_alloc_property(struct dxgprocess *process, void *__user inargs) - if (device) - kref_put(&device->device_kref, dxgdevice_release); - -- DXG_TRACE("ioctl:%s %d", errorstr(ret), ret); -+ DXG_TRACE_IOCTL_END(ret); - return ret; - } - -@@ -3887,7 +3880,7 @@ dxgkio_mark_device_as_error(struct dxgprocess *process, void *__user inargs) - ret = copy_from_user(&args, inargs, sizeof(args)); - if (ret) { - DXG_ERR("failed to copy input args"); -- ret = -EINVAL; -+ ret = -EFAULT; - goto cleanup; - } - device = dxgprocess_device_by_handle(process, args.device); -@@ -3908,7 +3901,7 @@ dxgkio_mark_device_as_error(struct dxgprocess *process, void *__user inargs) - dxgadapter_release_lock_shared(adapter); - if (device) - kref_put(&device->device_kref, dxgdevice_release); -- DXG_TRACE("ioctl:%s %d", errorstr(ret), ret); -+ DXG_TRACE_IOCTL_END(ret); - return ret; - } - -@@ -3923,7 +3916,7 @@ dxgkio_query_alloc_residency(struct dxgprocess *process, void *__user inargs) - ret = copy_from_user(&args, inargs, sizeof(args)); - if (ret) { - DXG_ERR("failed to copy input args"); -- ret = -EINVAL; -+ ret = -EFAULT; - goto cleanup; - } - -@@ -3949,7 +3942,7 @@ dxgkio_query_alloc_residency(struct dxgprocess *process, void *__user inargs) - dxgadapter_release_lock_shared(adapter); - if (device) - kref_put(&device->device_kref, dxgdevice_release); -- DXG_TRACE("ioctl:%s %d", errorstr(ret), ret); -+ DXG_TRACE_IOCTL_END(ret); - return ret; - } - -@@ -3964,7 +3957,7 @@ dxgkio_set_allocation_priority(struct dxgprocess *process, void *__user inargs) - ret = copy_from_user(&args, inargs, sizeof(args)); - if (ret) { - DXG_ERR("failed to copy input args"); -- ret = -EINVAL; -+ ret = -EFAULT; - goto cleanup; - } - device = dxgprocess_device_by_handle(process, args.device); -@@ -3984,7 +3977,7 @@ dxgkio_set_allocation_priority(struct dxgprocess *process, void *__user inargs) - dxgadapter_release_lock_shared(adapter); - if (device) - kref_put(&device->device_kref, dxgdevice_release); -- DXG_TRACE("ioctl:%s %d", errorstr(ret), ret); -+ DXG_TRACE_IOCTL_END(ret); - return ret; - } - -@@ -3999,7 +3992,7 @@ dxgkio_get_allocation_priority(struct dxgprocess *process, void *__user inargs) - ret = copy_from_user(&args, inargs, sizeof(args)); - if (ret) { - DXG_ERR("failed to copy input args"); -- ret = -EINVAL; -+ ret = -EFAULT; - goto cleanup; - } - device = dxgprocess_device_by_handle(process, args.device); -@@ -4019,7 +4012,7 @@ dxgkio_get_allocation_priority(struct dxgprocess *process, void *__user inargs) - dxgadapter_release_lock_shared(adapter); - if (device) - kref_put(&device->device_kref, dxgdevice_release); -- DXG_TRACE("ioctl:%s %d", errorstr(ret), ret); -+ DXG_TRACE_IOCTL_END(ret); - return ret; - } - -@@ -4069,14 +4062,14 @@ dxgkio_set_context_scheduling_priority(struct dxgprocess *process, - ret = copy_from_user(&args, inargs, sizeof(args)); - if (ret) { - DXG_ERR("failed to copy input args"); -- ret = -EINVAL; -+ ret = -EFAULT; - goto cleanup; - } - - ret = set_context_scheduling_priority(process, args.context, - args.priority, false); - cleanup: -- DXG_TRACE("ioctl:%s %d", errorstr(ret), ret); -+ DXG_TRACE_IOCTL_END(ret); - return ret; - } - -@@ -4111,7 +4104,7 @@ get_context_scheduling_priority(struct dxgprocess *process, - ret = copy_to_user(priority, &pri, sizeof(pri)); - if (ret) { - DXG_ERR("failed to copy priority to user"); -- ret = -EINVAL; -+ ret = -EFAULT; - } - - cleanup: -@@ -4134,14 +4127,14 @@ dxgkio_get_context_scheduling_priority(struct dxgprocess *process, - ret = copy_from_user(&args, inargs, sizeof(args)); - if (ret) { - DXG_ERR("failed to copy input args"); -- ret = -EINVAL; -+ ret = -EFAULT; - goto cleanup; - } - - ret = get_context_scheduling_priority(process, args.context, - &input->priority, false); - cleanup: -- DXG_TRACE("ioctl:%s %d", errorstr(ret), ret); -+ DXG_TRACE_IOCTL_END(ret); - return ret; - } - -@@ -4155,14 +4148,14 @@ dxgkio_set_context_process_scheduling_priority(struct dxgprocess *process, - ret = copy_from_user(&args, inargs, sizeof(args)); - if (ret) { - DXG_ERR("failed to copy input args"); -- ret = -EINVAL; -+ ret = -EFAULT; - goto cleanup; - } - - ret = set_context_scheduling_priority(process, args.context, - args.priority, true); - cleanup: -- DXG_TRACE("ioctl:%s %d", errorstr(ret), ret); -+ DXG_TRACE_IOCTL_END(ret); - return ret; - } - -@@ -4176,7 +4169,7 @@ dxgkio_get_context_process_scheduling_priority(struct dxgprocess *process, - ret = copy_from_user(&args, inargs, sizeof(args)); - if (ret) { - DXG_ERR("failed to copy input args"); -- ret = -EINVAL; -+ ret = -EFAULT; - goto cleanup; - } - -@@ -4184,7 +4177,7 @@ dxgkio_get_context_process_scheduling_priority(struct dxgprocess *process, - &((struct d3dkmt_getcontextinprocessschedulingpriority *) - inargs)->priority, true); - cleanup: -- DXG_TRACE("ioctl:%s %d", errorstr(ret), ret); -+ DXG_TRACE_IOCTL_END(ret); - return ret; - } - -@@ -4199,7 +4192,7 @@ dxgkio_change_vidmem_reservation(struct dxgprocess *process, void *__user inargs - ret = copy_from_user(&args, inargs, sizeof(args)); - if (ret) { - DXG_ERR("failed to copy input args"); -- ret = -EINVAL; -+ ret = -EFAULT; - goto cleanup; - } - -@@ -4232,7 +4225,7 @@ dxgkio_change_vidmem_reservation(struct dxgprocess *process, void *__user inargs - if (adapter) - kref_put(&adapter->adapter_kref, dxgadapter_release); - -- DXG_TRACE("ioctl:%s %d", errorstr(ret), ret); -+ DXG_TRACE_IOCTL_END(ret); - return ret; - } - -@@ -4247,7 +4240,7 @@ dxgkio_query_clock_calibration(struct dxgprocess *process, void *__user inargs) - ret = copy_from_user(&args, inargs, sizeof(args)); - if (ret) { - DXG_ERR("failed to copy input args"); -- ret = -EINVAL; -+ ret = -EFAULT; - goto cleanup; - } - -@@ -4272,7 +4265,7 @@ dxgkio_query_clock_calibration(struct dxgprocess *process, void *__user inargs) - ret = copy_to_user(inargs, &args, sizeof(args)); - if (ret) { - DXG_ERR("failed to copy output args"); -- ret = -EINVAL; -+ ret = -EFAULT; - } - - cleanup: -@@ -4295,7 +4288,7 @@ dxgkio_flush_heap_transitions(struct dxgprocess *process, void *__user inargs) - ret = copy_from_user(&args, inargs, sizeof(args)); - if (ret) { - DXG_ERR("failed to copy input args"); -- ret = -EINVAL; -+ ret = -EFAULT; - goto cleanup; - } - -@@ -4319,7 +4312,7 @@ dxgkio_flush_heap_transitions(struct dxgprocess *process, void *__user inargs) - ret = copy_to_user(inargs, &args, sizeof(args)); - if (ret) { - DXG_ERR("failed to copy output args"); -- ret = -EINVAL; -+ ret = -EFAULT; - } - - cleanup: -@@ -4341,7 +4334,7 @@ dxgkio_escape(struct dxgprocess *process, void *__user inargs) - - ret = copy_from_user(&args, inargs, sizeof(args)); - if (ret) { -- ret = -EINVAL; -+ ret = -EFAULT; - goto cleanup; - } - -@@ -4367,7 +4360,7 @@ dxgkio_escape(struct dxgprocess *process, void *__user inargs) - dxgadapter_release_lock_shared(adapter); - if (adapter) - kref_put(&adapter->adapter_kref, dxgadapter_release); -- DXG_TRACE("ioctl:%s %d", errorstr(ret), ret); -+ DXG_TRACE_IOCTL_END(ret); - return ret; - } - -@@ -4382,7 +4375,7 @@ dxgkio_query_vidmem_info(struct dxgprocess *process, void *__user inargs) - ret = copy_from_user(&args, inargs, sizeof(args)); - if (ret) { - DXG_ERR("failed to copy input args"); -- ret = -EINVAL; -+ ret = -EFAULT; - goto cleanup; - } - -@@ -4432,7 +4425,7 @@ dxgkio_get_device_state(struct dxgprocess *process, void *__user inargs) - ret = copy_from_user(&args, inargs, sizeof(args)); - if (ret) { - DXG_ERR("failed to copy input args"); -- ret = -EINVAL; -+ ret = -EFAULT; - goto cleanup; - } - -@@ -4458,7 +4451,7 @@ dxgkio_get_device_state(struct dxgprocess *process, void *__user inargs) - ret = copy_to_user(inargs, &args, sizeof(args)); - if (ret) { - DXG_ERR("failed to copy args to user"); -- ret = -EINVAL; -+ ret = -EFAULT; - } - goto cleanup; - } -@@ -4590,7 +4583,7 @@ dxgkio_share_objects(struct dxgprocess *process, void *__user inargs) - ret = copy_from_user(&args, inargs, sizeof(args)); - if (ret) { - DXG_ERR("failed to copy input args"); -- ret = -EINVAL; -+ ret = -EFAULT; - goto cleanup; - } - -@@ -4610,7 +4603,7 @@ dxgkio_share_objects(struct dxgprocess *process, void *__user inargs) - ret = copy_from_user(handles, args.objects, handle_size); - if (ret) { - DXG_ERR("failed to copy object handles"); -- ret = -EINVAL; -+ ret = -EFAULT; - goto cleanup; - } - -@@ -4708,7 +4701,7 @@ dxgkio_share_objects(struct dxgprocess *process, void *__user inargs) - ret = copy_to_user(args.shared_handle, &tmp, sizeof(u64)); - if (ret) { - DXG_ERR("failed to copy shared handle"); -- ret = -EINVAL; -+ ret = -EFAULT; - } - - cleanup: -@@ -4726,7 +4719,7 @@ dxgkio_share_objects(struct dxgprocess *process, void *__user inargs) - if (resource) - kref_put(&resource->resource_kref, dxgresource_release); - -- DXG_TRACE("ioctl:%s %d", errorstr(ret), ret); -+ DXG_TRACE_IOCTL_END(ret); - return ret; - } - -@@ -4742,7 +4735,7 @@ dxgkio_query_resource_info_nt(struct dxgprocess *process, void *__user inargs) - ret = copy_from_user(&args, inargs, sizeof(args)); - if (ret) { - DXG_ERR("failed to copy input args"); -- ret = -EINVAL; -+ ret = -EFAULT; - goto cleanup; - } - -@@ -4795,7 +4788,7 @@ dxgkio_query_resource_info_nt(struct dxgprocess *process, void *__user inargs) - ret = copy_to_user(inargs, &args, sizeof(args)); - if (ret) { - DXG_ERR("failed to copy output args"); -- ret = -EINVAL; -+ ret = -EFAULT; - } - - cleanup: -@@ -4807,7 +4800,7 @@ dxgkio_query_resource_info_nt(struct dxgprocess *process, void *__user inargs) - if (device) - kref_put(&device->device_kref, dxgdevice_release); - -- DXG_TRACE("ioctl:%s %d", errorstr(ret), ret); -+ DXG_TRACE_IOCTL_END(ret); - return ret; - } - -@@ -4859,7 +4852,7 @@ assign_resource_handles(struct dxgprocess *process, - sizeof(open_alloc_info)); - if (ret) { - DXG_ERR("failed to copy alloc info"); -- ret = -EINVAL; -+ ret = -EFAULT; - goto cleanup; - } - } -@@ -5009,7 +5002,7 @@ open_resource(struct dxgprocess *process, - shared_resource->runtime_private_data_size); - if (ret) { - DXG_ERR("failed to copy runtime data"); -- ret = -EINVAL; -+ ret = -EFAULT; - goto cleanup; - } - } -@@ -5020,7 +5013,7 @@ open_resource(struct dxgprocess *process, - shared_resource->resource_private_data_size); - if (ret) { - DXG_ERR("failed to copy resource data"); -- ret = -EINVAL; -+ ret = -EFAULT; - goto cleanup; - } - } -@@ -5031,7 +5024,7 @@ open_resource(struct dxgprocess *process, - shared_resource->alloc_private_data_size); - if (ret) { - DXG_ERR("failed to copy alloc data"); -- ret = -EINVAL; -+ ret = -EFAULT; - goto cleanup; - } - } -@@ -5046,7 +5039,7 @@ open_resource(struct dxgprocess *process, - sizeof(struct d3dkmthandle)); - if (ret) { - DXG_ERR("failed to copy resource handle to user"); -- ret = -EINVAL; -+ ret = -EFAULT; - goto cleanup; - } - -@@ -5054,7 +5047,7 @@ open_resource(struct dxgprocess *process, - &args->total_priv_drv_data_size, sizeof(u32)); - if (ret) { - DXG_ERR("failed to copy total driver data size"); -- ret = -EINVAL; -+ ret = -EFAULT; - } - - cleanup: -@@ -5102,7 +5095,7 @@ dxgkio_open_resource_nt(struct dxgprocess *process, void *__user inargs) - ret = copy_from_user(&args, inargs, sizeof(args)); - if (ret) { - DXG_ERR("failed to copy input args"); -- ret = -EINVAL; -+ ret = -EFAULT; - goto cleanup; - } - -@@ -5112,7 +5105,7 @@ dxgkio_open_resource_nt(struct dxgprocess *process, void *__user inargs) - - cleanup: - -- DXG_TRACE("ioctl:%s %d", errorstr(ret), ret); -+ DXG_TRACE_IOCTL_END(ret); - return ret; - } - -@@ -5125,7 +5118,7 @@ dxgkio_share_object_with_host(struct dxgprocess *process, void *__user inargs) - ret = copy_from_user(&args, inargs, sizeof(args)); - if (ret) { - DXG_ERR("failed to copy input args"); -- ret = -EINVAL; -+ ret = -EFAULT; - goto cleanup; - } - -@@ -5138,12 +5131,12 @@ dxgkio_share_object_with_host(struct dxgprocess *process, void *__user inargs) - ret = copy_to_user(inargs, &args, sizeof(args)); - if (ret) { - DXG_ERR("failed to copy data to user"); -- ret = -EINVAL; -+ ret = -EFAULT; - } - - cleanup: - -- DXG_TRACE("ioctl:%s %d", errorstr(ret), ret); -+ DXG_TRACE_IOCTL_END(ret); - return ret; - } - --- -Armbian - diff --git a/patch/kernel/archive/wsl2-arm64-6.6/1702-drivers-hv-dxgkrnl-Fix-synchronization-locks.patch b/patch/kernel/archive/wsl2-arm64-6.6/1702-drivers-hv-dxgkrnl-Fix-synchronization-locks.patch deleted file mode 100644 index d2f13ea1b3f6..000000000000 --- a/patch/kernel/archive/wsl2-arm64-6.6/1702-drivers-hv-dxgkrnl-Fix-synchronization-locks.patch +++ /dev/null @@ -1,391 +0,0 @@ -From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001 -From: Iouri Tarassov -Date: Mon, 13 Jun 2022 14:18:10 -0700 -Subject: drivers: hv: dxgkrnl: Fix synchronization locks - -Signed-off-by: Iouri Tarassov -[kms: Forward port to v6.1] -Signed-off-by: Kelsey Steele ---- - drivers/hv/dxgkrnl/dxgadapter.c | 19 ++- - drivers/hv/dxgkrnl/dxgkrnl.h | 8 +- - drivers/hv/dxgkrnl/dxgmodule.c | 3 +- - drivers/hv/dxgkrnl/dxgprocess.c | 11 +- - drivers/hv/dxgkrnl/dxgvmbus.c | 85 +++++++--- - drivers/hv/dxgkrnl/ioctl.c | 24 ++- - drivers/hv/dxgkrnl/misc.h | 1 + - 7 files changed, 101 insertions(+), 50 deletions(-) - -diff --git a/drivers/hv/dxgkrnl/dxgadapter.c b/drivers/hv/dxgkrnl/dxgadapter.c -index 111111111111..222222222222 100644 ---- a/drivers/hv/dxgkrnl/dxgadapter.c -+++ b/drivers/hv/dxgkrnl/dxgadapter.c -@@ -136,7 +136,7 @@ void dxgadapter_release(struct kref *refcount) - struct dxgadapter *adapter; - - adapter = container_of(refcount, struct dxgadapter, adapter_kref); -- DXG_TRACE("%p", adapter); -+ DXG_TRACE("Destroying adapter: %px", adapter); - kfree(adapter); - } - -@@ -270,6 +270,8 @@ struct dxgdevice *dxgdevice_create(struct dxgadapter *adapter, - if (ret < 0) { - kref_put(&device->device_kref, dxgdevice_release); - device = NULL; -+ } else { -+ DXG_TRACE("dxgdevice created: %px", device); - } - } - return device; -@@ -413,11 +415,8 @@ void dxgdevice_destroy(struct dxgdevice *device) - - cleanup: - -- if (device->adapter) { -+ if (device->adapter) - dxgprocess_adapter_remove_device(device); -- kref_put(&device->adapter->adapter_kref, dxgadapter_release); -- device->adapter = NULL; -- } - - up_write(&device->device_lock); - -@@ -721,6 +720,8 @@ void dxgdevice_release(struct kref *refcount) - struct dxgdevice *device; - - device = container_of(refcount, struct dxgdevice, device_kref); -+ DXG_TRACE("Destroying device: %px", device); -+ kref_put(&device->adapter->adapter_kref, dxgadapter_release); - kfree(device); - } - -@@ -999,6 +1000,9 @@ void dxgpagingqueue_destroy(struct dxgpagingqueue *pqueue) - kfree(pqueue); - } - -+/* -+ * Process_adapter_mutex is held. -+ */ - struct dxgprocess_adapter *dxgprocess_adapter_create(struct dxgprocess *process, - struct dxgadapter *adapter) - { -@@ -1108,7 +1112,7 @@ int dxgprocess_adapter_add_device(struct dxgprocess *process, - - void dxgprocess_adapter_remove_device(struct dxgdevice *device) - { -- DXG_TRACE("Removing device: %p", device); -+ DXG_TRACE("Removing device: %px", device); - mutex_lock(&device->adapter_info->device_list_mutex); - if (device->device_list_entry.next) { - list_del(&device->device_list_entry); -@@ -1147,8 +1151,7 @@ void dxgsharedsyncobj_release(struct kref *refcount) - if (syncobj->adapter) { - dxgadapter_remove_shared_syncobj(syncobj->adapter, - syncobj); -- kref_put(&syncobj->adapter->adapter_kref, -- dxgadapter_release); -+ kref_put(&syncobj->adapter->adapter_kref, dxgadapter_release); - } - kfree(syncobj); - } -diff --git a/drivers/hv/dxgkrnl/dxgkrnl.h b/drivers/hv/dxgkrnl/dxgkrnl.h -index 111111111111..222222222222 100644 ---- a/drivers/hv/dxgkrnl/dxgkrnl.h -+++ b/drivers/hv/dxgkrnl/dxgkrnl.h -@@ -404,7 +404,10 @@ struct dxgprocess { - /* Handle of the corresponding objec on the host */ - struct d3dkmthandle host_handle; - -- /* List of opened adapters (dxgprocess_adapter) */ -+ /* -+ * List of opened adapters (dxgprocess_adapter). -+ * Protected by process_adapter_mutex. -+ */ - struct list_head process_adapter_list_head; - }; - -@@ -451,6 +454,8 @@ enum dxgadapter_state { - struct dxgadapter { - struct rw_semaphore core_lock; - struct kref adapter_kref; -+ /* Protects creation and destruction of dxgdevice objects */ -+ struct mutex device_creation_lock; - /* Entry in the list of adapters in dxgglobal */ - struct list_head adapter_list_entry; - /* The list of dxgprocess_adapter entries */ -@@ -997,6 +1002,7 @@ void dxgk_validate_ioctls(void); - - #define DXG_TRACE(fmt, ...) do { \ - trace_printk(dev_fmt(fmt) "\n", ##__VA_ARGS__); \ -+ dev_dbg(DXGDEV, "%s: " fmt, __func__, ##__VA_ARGS__); \ - } while (0) - - #define DXG_ERR(fmt, ...) do { \ -diff --git a/drivers/hv/dxgkrnl/dxgmodule.c b/drivers/hv/dxgkrnl/dxgmodule.c -index 111111111111..222222222222 100644 ---- a/drivers/hv/dxgkrnl/dxgmodule.c -+++ b/drivers/hv/dxgkrnl/dxgmodule.c -@@ -272,6 +272,7 @@ int dxgglobal_create_adapter(struct pci_dev *dev, guid_t *guid, - adapter->host_vgpu_luid = host_vgpu_luid; - kref_init(&adapter->adapter_kref); - init_rwsem(&adapter->core_lock); -+ mutex_init(&adapter->device_creation_lock); - - INIT_LIST_HEAD(&adapter->adapter_process_list_head); - INIT_LIST_HEAD(&adapter->shared_resource_list_head); -@@ -961,4 +962,4 @@ module_exit(dxg_drv_exit); - - MODULE_LICENSE("GPL"); - MODULE_DESCRIPTION("Microsoft Dxgkrnl virtual compute device Driver"); --MODULE_VERSION("2.0.0"); -+MODULE_VERSION("2.0.1"); -diff --git a/drivers/hv/dxgkrnl/dxgprocess.c b/drivers/hv/dxgkrnl/dxgprocess.c -index 111111111111..222222222222 100644 ---- a/drivers/hv/dxgkrnl/dxgprocess.c -+++ b/drivers/hv/dxgkrnl/dxgprocess.c -@@ -214,14 +214,15 @@ int dxgprocess_close_adapter(struct dxgprocess *process, - hmgrtable_unlock(&process->local_handle_table, DXGLOCK_EXCL); - - if (adapter) { -+ mutex_lock(&adapter->device_creation_lock); -+ dxgglobal_acquire_process_adapter_lock(); - adapter_info = dxgprocess_get_adapter_info(process, adapter); -- if (adapter_info) { -- dxgglobal_acquire_process_adapter_lock(); -+ if (adapter_info) - dxgprocess_adapter_release(adapter_info); -- dxgglobal_release_process_adapter_lock(); -- } else { -+ else - ret = -EINVAL; -- } -+ dxgglobal_release_process_adapter_lock(); -+ mutex_unlock(&adapter->device_creation_lock); - } else { - DXG_ERR("Adapter not found %x", handle.v); - ret = -EINVAL; -diff --git a/drivers/hv/dxgkrnl/dxgvmbus.c b/drivers/hv/dxgkrnl/dxgvmbus.c -index 111111111111..222222222222 100644 ---- a/drivers/hv/dxgkrnl/dxgvmbus.c -+++ b/drivers/hv/dxgkrnl/dxgvmbus.c -@@ -1573,8 +1573,27 @@ process_allocation_handles(struct dxgprocess *process, - struct dxgresource *resource) - { - int ret = 0; -- int i; -+ int i = 0; -+ int k; -+ struct dxgkvmb_command_allocinfo_return *host_alloc; - -+ /* -+ * Assign handle to the internal objects, so VM bus messages will be -+ * sent to the host to free them during object destruction. -+ */ -+ if (args->flags.create_resource) -+ resource->handle = res->resource; -+ for (i = 0; i < args->alloc_count; i++) { -+ host_alloc = &res->allocation_info[i]; -+ dxgalloc[i]->alloc_handle = host_alloc->allocation; -+ } -+ -+ /* -+ * Assign handle to the handle table. -+ * In case of a failure all handles should be freed. -+ * When the function returns, the objects could be destroyed by -+ * handle immediately. -+ */ - hmgrtable_lock(&process->handle_table, DXGLOCK_EXCL); - if (args->flags.create_resource) { - ret = hmgrtable_assign_handle(&process->handle_table, resource, -@@ -1583,14 +1602,12 @@ process_allocation_handles(struct dxgprocess *process, - if (ret < 0) { - DXG_ERR("failed to assign resource handle %x", - res->resource.v); -+ goto cleanup; - } else { -- resource->handle = res->resource; - resource->handle_valid = 1; - } - } - for (i = 0; i < args->alloc_count; i++) { -- struct dxgkvmb_command_allocinfo_return *host_alloc; -- - host_alloc = &res->allocation_info[i]; - ret = hmgrtable_assign_handle(&process->handle_table, - dxgalloc[i], -@@ -1602,9 +1619,26 @@ process_allocation_handles(struct dxgprocess *process, - args->alloc_count, i); - break; - } -- dxgalloc[i]->alloc_handle = host_alloc->allocation; - dxgalloc[i]->handle_valid = 1; - } -+ if (ret < 0) { -+ if (args->flags.create_resource) { -+ hmgrtable_free_handle(&process->handle_table, -+ HMGRENTRY_TYPE_DXGRESOURCE, -+ res->resource); -+ resource->handle_valid = 0; -+ } -+ for (k = 0; k < i; k++) { -+ host_alloc = &res->allocation_info[i]; -+ hmgrtable_free_handle(&process->handle_table, -+ HMGRENTRY_TYPE_DXGALLOCATION, -+ host_alloc->allocation); -+ dxgalloc[i]->handle_valid = 0; -+ } -+ } -+ -+cleanup: -+ - hmgrtable_unlock(&process->handle_table, DXGLOCK_EXCL); - - if (ret) -@@ -1705,18 +1739,17 @@ create_local_allocations(struct dxgprocess *process, - } - } - -- ret = process_allocation_handles(process, device, args, result, -- dxgalloc, resource); -- if (ret < 0) -- goto cleanup; -- - ret = copy_to_user(&input_args->global_share, &args->global_share, - sizeof(struct d3dkmthandle)); - if (ret) { - DXG_ERR("failed to copy global share"); - ret = -EFAULT; -+ goto cleanup; - } - -+ ret = process_allocation_handles(process, device, args, result, -+ dxgalloc, resource); -+ - cleanup: - - if (ret < 0) { -@@ -3576,22 +3609,6 @@ int dxgvmb_send_create_hwqueue(struct dxgprocess *process, - goto cleanup; - } - -- ret = hmgrtable_assign_handle_safe(&process->handle_table, hwqueue, -- HMGRENTRY_TYPE_DXGHWQUEUE, -- command->hwqueue); -- if (ret < 0) -- goto cleanup; -- -- ret = hmgrtable_assign_handle_safe(&process->handle_table, -- NULL, -- HMGRENTRY_TYPE_MONITOREDFENCE, -- command->hwqueue_progress_fence); -- if (ret < 0) -- goto cleanup; -- -- hwqueue->handle = command->hwqueue; -- hwqueue->progress_fence_sync_object = command->hwqueue_progress_fence; -- - hwqueue->progress_fence_mapped_address = - dxg_map_iospace((u64)command->hwqueue_progress_fence_cpuva, - PAGE_SIZE, PROT_READ | PROT_WRITE, true); -@@ -3641,6 +3658,22 @@ int dxgvmb_send_create_hwqueue(struct dxgprocess *process, - } - } - -+ ret = hmgrtable_assign_handle_safe(&process->handle_table, -+ NULL, -+ HMGRENTRY_TYPE_MONITOREDFENCE, -+ command->hwqueue_progress_fence); -+ if (ret < 0) -+ goto cleanup; -+ -+ hwqueue->progress_fence_sync_object = command->hwqueue_progress_fence; -+ hwqueue->handle = command->hwqueue; -+ -+ ret = hmgrtable_assign_handle_safe(&process->handle_table, hwqueue, -+ HMGRENTRY_TYPE_DXGHWQUEUE, -+ command->hwqueue); -+ if (ret < 0) -+ hwqueue->handle.v = 0; -+ - cleanup: - if (ret < 0) { - DXG_ERR("failed %x", ret); -diff --git a/drivers/hv/dxgkrnl/ioctl.c b/drivers/hv/dxgkrnl/ioctl.c -index 111111111111..222222222222 100644 ---- a/drivers/hv/dxgkrnl/ioctl.c -+++ b/drivers/hv/dxgkrnl/ioctl.c -@@ -636,6 +636,7 @@ dxgkio_create_device(struct dxgprocess *process, void *__user inargs) - struct dxgdevice *device = NULL; - struct d3dkmthandle host_device_handle = {}; - bool adapter_locked = false; -+ bool device_creation_locked = false; - - ret = copy_from_user(&args, inargs, sizeof(args)); - if (ret) { -@@ -651,6 +652,9 @@ dxgkio_create_device(struct dxgprocess *process, void *__user inargs) - goto cleanup; - } - -+ mutex_lock(&adapter->device_creation_lock); -+ device_creation_locked = true; -+ - device = dxgdevice_create(adapter, process); - if (device == NULL) { - ret = -ENOMEM; -@@ -699,6 +703,9 @@ dxgkio_create_device(struct dxgprocess *process, void *__user inargs) - if (adapter_locked) - dxgadapter_release_lock_shared(adapter); - -+ if (device_creation_locked) -+ mutex_unlock(&adapter->device_creation_lock); -+ - if (adapter) - kref_put(&adapter->adapter_kref, dxgadapter_release); - -@@ -803,22 +810,21 @@ dxgkio_create_context_virtual(struct dxgprocess *process, void *__user inargs) - host_context_handle = dxgvmb_send_create_context(adapter, - process, &args); - if (host_context_handle.v) { -- hmgrtable_lock(&process->handle_table, DXGLOCK_EXCL); -- ret = hmgrtable_assign_handle(&process->handle_table, context, -- HMGRENTRY_TYPE_DXGCONTEXT, -- host_context_handle); -- if (ret >= 0) -- context->handle = host_context_handle; -- hmgrtable_unlock(&process->handle_table, DXGLOCK_EXCL); -- if (ret < 0) -- goto cleanup; - ret = copy_to_user(&((struct d3dkmt_createcontextvirtual *) - inargs)->context, &host_context_handle, - sizeof(struct d3dkmthandle)); - if (ret) { - DXG_ERR("failed to copy context handle"); - ret = -EFAULT; -+ goto cleanup; - } -+ hmgrtable_lock(&process->handle_table, DXGLOCK_EXCL); -+ ret = hmgrtable_assign_handle(&process->handle_table, context, -+ HMGRENTRY_TYPE_DXGCONTEXT, -+ host_context_handle); -+ if (ret >= 0) -+ context->handle = host_context_handle; -+ hmgrtable_unlock(&process->handle_table, DXGLOCK_EXCL); - } else { - DXG_ERR("invalid host handle"); - ret = -EINVAL; -diff --git a/drivers/hv/dxgkrnl/misc.h b/drivers/hv/dxgkrnl/misc.h -index 111111111111..222222222222 100644 ---- a/drivers/hv/dxgkrnl/misc.h -+++ b/drivers/hv/dxgkrnl/misc.h -@@ -38,6 +38,7 @@ extern const struct d3dkmthandle zerohandle; - * core_lock (dxgadapter lock) - * device_lock (dxgdevice lock) - * process_adapter_mutex -+ * device_creation_lock in dxgadapter - * adapter_list_lock - * device_mutex (dxgglobal mutex) - */ --- -Armbian - diff --git a/patch/kernel/archive/wsl2-arm64-6.6/1703-drivers-hv-dxgkrnl-Close-shared-file-objects-in-case-of-a-failure.patch b/patch/kernel/archive/wsl2-arm64-6.6/1703-drivers-hv-dxgkrnl-Close-shared-file-objects-in-case-of-a-failure.patch deleted file mode 100644 index 3d0f8dbf3e00..000000000000 --- a/patch/kernel/archive/wsl2-arm64-6.6/1703-drivers-hv-dxgkrnl-Close-shared-file-objects-in-case-of-a-failure.patch +++ /dev/null @@ -1,80 +0,0 @@ -From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001 -From: Iouri Tarassov -Date: Tue, 28 Jun 2022 17:26:11 -0700 -Subject: drivers: hv: dxgkrnl: Close shared file objects in case of a failure - -Signed-off-by: Iouri Tarassov -[kms: Forward port to v6.1] -Signed-off-by: Kelsey Steele ---- - drivers/hv/dxgkrnl/ioctl.c | 14 +++++++--- - 1 file changed, 10 insertions(+), 4 deletions(-) - -diff --git a/drivers/hv/dxgkrnl/ioctl.c b/drivers/hv/dxgkrnl/ioctl.c -index 111111111111..222222222222 100644 ---- a/drivers/hv/dxgkrnl/ioctl.c -+++ b/drivers/hv/dxgkrnl/ioctl.c -@@ -4536,7 +4536,7 @@ enum dxg_sharedobject_type { - }; - - static int get_object_fd(enum dxg_sharedobject_type type, -- void *object, int *fdout) -+ void *object, int *fdout, struct file **filp) - { - struct file *file; - int fd; -@@ -4565,8 +4565,8 @@ static int get_object_fd(enum dxg_sharedobject_type type, - return -ENOTRECOVERABLE; - } - -- fd_install(fd, file); - *fdout = fd; -+ *filp = file; - return 0; - } - -@@ -4581,6 +4581,7 @@ dxgkio_share_objects(struct dxgprocess *process, void *__user inargs) - struct dxgsharedresource *shared_resource = NULL; - struct d3dkmthandle *handles = NULL; - int object_fd = -1; -+ struct file *filp = NULL; - void *obj = NULL; - u32 handle_size; - int ret; -@@ -4660,7 +4661,7 @@ dxgkio_share_objects(struct dxgprocess *process, void *__user inargs) - switch (object_type) { - case HMGRENTRY_TYPE_DXGSYNCOBJECT: - ret = get_object_fd(DXG_SHARED_SYNCOBJECT, shared_syncobj, -- &object_fd); -+ &object_fd, &filp); - if (ret < 0) { - DXG_ERR("get_object_fd failed for sync object"); - goto cleanup; -@@ -4675,7 +4676,7 @@ dxgkio_share_objects(struct dxgprocess *process, void *__user inargs) - break; - case HMGRENTRY_TYPE_DXGRESOURCE: - ret = get_object_fd(DXG_SHARED_RESOURCE, shared_resource, -- &object_fd); -+ &object_fd, &filp); - if (ret < 0) { - DXG_ERR("get_object_fd failed for resource"); - goto cleanup; -@@ -4708,10 +4709,15 @@ dxgkio_share_objects(struct dxgprocess *process, void *__user inargs) - if (ret) { - DXG_ERR("failed to copy shared handle"); - ret = -EFAULT; -+ goto cleanup; - } - -+ fd_install(object_fd, filp); -+ - cleanup: - if (ret < 0) { -+ if (filp) -+ fput(filp); - if (object_fd >= 0) - put_unused_fd(object_fd); - } --- -Armbian - diff --git a/patch/kernel/archive/wsl2-arm64-6.6/1704-drivers-hv-dxgkrnl-Added-missed-NULL-check-for-resource-object.patch b/patch/kernel/archive/wsl2-arm64-6.6/1704-drivers-hv-dxgkrnl-Added-missed-NULL-check-for-resource-object.patch deleted file mode 100644 index 0e0e86806c35..000000000000 --- a/patch/kernel/archive/wsl2-arm64-6.6/1704-drivers-hv-dxgkrnl-Added-missed-NULL-check-for-resource-object.patch +++ /dev/null @@ -1,51 +0,0 @@ -From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001 -From: Iouri Tarassov -Date: Wed, 29 Jun 2022 10:04:23 -0700 -Subject: drivers: hv: dxgkrnl: Added missed NULL check for resource object - -Signed-off-by: Iouri Tarassov -[kms: Forward port to v6.1] -Signed-off-by: Kelsey Steele ---- - drivers/hv/dxgkrnl/ioctl.c | 10 ++++++---- - 1 file changed, 6 insertions(+), 4 deletions(-) - -diff --git a/drivers/hv/dxgkrnl/ioctl.c b/drivers/hv/dxgkrnl/ioctl.c -index 111111111111..222222222222 100644 ---- a/drivers/hv/dxgkrnl/ioctl.c -+++ b/drivers/hv/dxgkrnl/ioctl.c -@@ -1589,7 +1589,8 @@ dxgkio_create_allocation(struct dxgprocess *process, void *__user inargs) - &process->handle_table, - HMGRENTRY_TYPE_DXGRESOURCE, - args.resource); -- kref_get(&resource->resource_kref); -+ if (resource != NULL) -+ kref_get(&resource->resource_kref); - dxgprocess_ht_lock_shared_up(process); - - if (resource == NULL || resource->device != device) { -@@ -1693,10 +1694,8 @@ dxgkio_create_allocation(struct dxgprocess *process, void *__user inargs) - &standard_alloc); - cleanup: - -- if (resource_mutex_acquired) { -+ if (resource_mutex_acquired) - mutex_unlock(&resource->resource_mutex); -- kref_put(&resource->resource_kref, dxgresource_release); -- } - if (ret < 0) { - if (dxgalloc) { - for (i = 0; i < args.alloc_count; i++) { -@@ -1727,6 +1726,9 @@ dxgkio_create_allocation(struct dxgprocess *process, void *__user inargs) - if (adapter) - dxgadapter_release_lock_shared(adapter); - -+ if (resource && !args.flags.create_resource) -+ kref_put(&resource->resource_kref, dxgresource_release); -+ - if (device) { - dxgdevice_release_lock_shared(device); - kref_put(&device->device_kref, dxgdevice_release); --- -Armbian - diff --git a/patch/kernel/archive/wsl2-arm64-6.6/1705-drivers-hv-dxgkrnl-Fixed-dxgkrnl-to-build-for-the-6.1-kernel.patch b/patch/kernel/archive/wsl2-arm64-6.6/1705-drivers-hv-dxgkrnl-Fixed-dxgkrnl-to-build-for-the-6.1-kernel.patch deleted file mode 100644 index da923d76f19a..000000000000 --- a/patch/kernel/archive/wsl2-arm64-6.6/1705-drivers-hv-dxgkrnl-Fixed-dxgkrnl-to-build-for-the-6.1-kernel.patch +++ /dev/null @@ -1,84 +0,0 @@ -From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001 -From: Iouri Tarassov -Date: Thu, 26 Jan 2023 10:49:41 -0800 -Subject: drivers: hv: dxgkrnl: Fixed dxgkrnl to build for the 6.1 kernel - -Definition for GPADL was changed from u32 to struct vmbus_gpadl. - -Signed-off-by: Iouri Tarassov ---- - drivers/hv/dxgkrnl/dxgadapter.c | 8 -------- - drivers/hv/dxgkrnl/dxgkrnl.h | 4 ---- - drivers/hv/dxgkrnl/dxgvmbus.c | 8 -------- - 3 files changed, 20 deletions(-) - -diff --git a/drivers/hv/dxgkrnl/dxgadapter.c b/drivers/hv/dxgkrnl/dxgadapter.c -index 111111111111..222222222222 100644 ---- a/drivers/hv/dxgkrnl/dxgadapter.c -+++ b/drivers/hv/dxgkrnl/dxgadapter.c -@@ -927,19 +927,11 @@ void dxgallocation_destroy(struct dxgallocation *alloc) - alloc->owner.device, - &args, &alloc->alloc_handle); - } --#ifdef _MAIN_KERNEL_ - if (alloc->gpadl.gpadl_handle) { - DXG_TRACE("Teardown gpadl %d", alloc->gpadl.gpadl_handle); - vmbus_teardown_gpadl(dxgglobal_get_vmbus(), &alloc->gpadl); - alloc->gpadl.gpadl_handle = 0; - } --#else -- if (alloc->gpadl) { -- DXG_TRACE("Teardown gpadl %d", alloc->gpadl); -- vmbus_teardown_gpadl(dxgglobal_get_vmbus(), alloc->gpadl); -- alloc->gpadl = 0; -- } --#endif - if (alloc->priv_drv_data) - vfree(alloc->priv_drv_data); - if (alloc->cpu_address_mapped) -diff --git a/drivers/hv/dxgkrnl/dxgkrnl.h b/drivers/hv/dxgkrnl/dxgkrnl.h -index 111111111111..222222222222 100644 ---- a/drivers/hv/dxgkrnl/dxgkrnl.h -+++ b/drivers/hv/dxgkrnl/dxgkrnl.h -@@ -728,11 +728,7 @@ struct dxgallocation { - u32 cached:1; - u32 handle_valid:1; - /* GPADL address list for existing sysmem allocations */ --#ifdef _MAIN_KERNEL_ - struct vmbus_gpadl gpadl; --#else -- u32 gpadl; --#endif - /* Number of pages in the 'pages' array */ - u32 num_pages; - /* -diff --git a/drivers/hv/dxgkrnl/dxgvmbus.c b/drivers/hv/dxgkrnl/dxgvmbus.c -index 111111111111..222222222222 100644 ---- a/drivers/hv/dxgkrnl/dxgvmbus.c -+++ b/drivers/hv/dxgkrnl/dxgvmbus.c -@@ -1493,22 +1493,14 @@ int create_existing_sysmem(struct dxgdevice *device, - ret = -ENOMEM; - goto cleanup; - } --#ifdef _MAIN_KERNEL_ - DXG_TRACE("New gpadl %d", dxgalloc->gpadl.gpadl_handle); --#else -- DXG_TRACE("New gpadl %d", dxgalloc->gpadl); --#endif - - command_vgpu_to_host_init2(&set_store_command->hdr, - DXGK_VMBCOMMAND_SETEXISTINGSYSMEMSTORE, - device->process->host_handle); - set_store_command->device = device->handle; - set_store_command->allocation = host_alloc->allocation; --#ifdef _MAIN_KERNEL_ - set_store_command->gpadl = dxgalloc->gpadl.gpadl_handle; --#else -- set_store_command->gpadl = dxgalloc->gpadl; --#endif - ret = dxgvmb_send_sync_msg_ntstatus(msg.channel, msg.hdr, - msg.size); - if (ret < 0) --- -Armbian - diff --git a/patch/kernel/archive/wsl2-arm64-6.6/1706-virtio-pmem-Support-PCI-BAR-relative-addresses.patch b/patch/kernel/archive/wsl2-arm64-6.6/1706-virtio-pmem-Support-PCI-BAR-relative-addresses.patch deleted file mode 100644 index 5989721b7e90..000000000000 --- a/patch/kernel/archive/wsl2-arm64-6.6/1706-virtio-pmem-Support-PCI-BAR-relative-addresses.patch +++ /dev/null @@ -1,80 +0,0 @@ -From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001 -From: Taylor Stark -Date: Thu, 15 Jul 2021 15:35:05 -0700 -Subject: virtio-pmem: Support PCI BAR-relative addresses - -Update virtio-pmem to allow for the pmem region to be specified in either -guest absolute terms or as a PCI BAR-relative address. This is required -to support virtio-pmem in Hyper-V, since Hyper-V only allows PCI devices -to operate on PCI memory ranges defined via BARs. - -Virtio-pmem will check for a shared memory window and use that if found, -else it will fallback to using the guest absolute addresses in -virtio_pmem_config. This was chosen over defining a new feature bit, -since it's similar to how virtio-fs is configured. - -Signed-off-by: Taylor Stark - -Link: https://lore.kernel.org/r/20210715223505.GA29329@linuxonhyperv3.guj3yctzbm1etfxqx2vob5hsef.xx.internal.cloudapp.net -Signed-off-by: Tyler Hicks ---- - drivers/nvdimm/virtio_pmem.c | 21 ++++++++-- - drivers/nvdimm/virtio_pmem.h | 3 ++ - 2 files changed, 20 insertions(+), 4 deletions(-) - -diff --git a/drivers/nvdimm/virtio_pmem.c b/drivers/nvdimm/virtio_pmem.c -index 111111111111..222222222222 100644 ---- a/drivers/nvdimm/virtio_pmem.c -+++ b/drivers/nvdimm/virtio_pmem.c -@@ -36,6 +36,8 @@ static int virtio_pmem_probe(struct virtio_device *vdev) - struct virtio_pmem *vpmem; - struct resource res; - int err = 0; -+ bool have_shm_region; -+ struct virtio_shm_region pmem_region; - - if (!vdev->config->get) { - dev_err(&vdev->dev, "%s failure: config access disabled\n", -@@ -57,10 +59,21 @@ static int virtio_pmem_probe(struct virtio_device *vdev) - goto out_err; - } - -- virtio_cread_le(vpmem->vdev, struct virtio_pmem_config, -- start, &vpmem->start); -- virtio_cread_le(vpmem->vdev, struct virtio_pmem_config, -- size, &vpmem->size); -+ /* Retrieve the pmem device's address and size. It may have been supplied -+ * as a PCI BAR-relative shared memory region, or as a guest absolute address. -+ */ -+ have_shm_region = virtio_get_shm_region(vpmem->vdev, &pmem_region, -+ VIRTIO_PMEM_SHMCAP_ID_PMEM_REGION); -+ -+ if (have_shm_region) { -+ vpmem->start = pmem_region.addr; -+ vpmem->size = pmem_region.len; -+ } else { -+ virtio_cread_le(vpmem->vdev, struct virtio_pmem_config, -+ start, &vpmem->start); -+ virtio_cread_le(vpmem->vdev, struct virtio_pmem_config, -+ size, &vpmem->size); -+ } - - res.start = vpmem->start; - res.end = vpmem->start + vpmem->size - 1; -diff --git a/drivers/nvdimm/virtio_pmem.h b/drivers/nvdimm/virtio_pmem.h -index 111111111111..222222222222 100644 ---- a/drivers/nvdimm/virtio_pmem.h -+++ b/drivers/nvdimm/virtio_pmem.h -@@ -50,6 +50,9 @@ struct virtio_pmem { - __u64 size; - }; - -+/* For the id field in virtio_pci_shm_cap */ -+#define VIRTIO_PMEM_SHMCAP_ID_PMEM_REGION 0 -+ - void virtio_pmem_host_ack(struct virtqueue *vq); - int async_pmem_flush(struct nd_region *nd_region, struct bio *bio); - #endif --- -Armbian - diff --git a/patch/kernel/archive/wsl2-arm64-6.6/1707-virtio-pmem-Set-DRIVER_OK-status-prior-to-creating-pmem-region.patch b/patch/kernel/archive/wsl2-arm64-6.6/1707-virtio-pmem-Set-DRIVER_OK-status-prior-to-creating-pmem-region.patch deleted file mode 100644 index 3a7e2859f998..000000000000 --- a/patch/kernel/archive/wsl2-arm64-6.6/1707-virtio-pmem-Set-DRIVER_OK-status-prior-to-creating-pmem-region.patch +++ /dev/null @@ -1,52 +0,0 @@ -From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001 -From: Taylor Stark -Date: Thu, 15 Jul 2021 15:36:38 -0700 -Subject: virtio-pmem: Set DRIVER_OK status prior to creating pmem region - -Update virtio-pmem to call virtio_device_ready prior to creating the pmem -region. Otherwise, the guest may try to access the pmem region prior to -the DRIVER_OK status being set. - -In the case of Hyper-V, the backing pmem file isn't mapped to the guest -until the DRIVER_OK status is set. Therefore, attempts to access the pmem -region can cause the guest to crash. Hyper-V could map the file earlier, -for example at VM creation, but we didn't want to pay the mapping cost if -the device is never used. Additionally, it felt weird to allow the guest -to access the region prior to the device fully coming online. - -Signed-off-by: Taylor Stark -Reviewed-by: Pankaj Gupta - -Link: https://lore.kernel.org/r/20210715223638.GA29649@linuxonhyperv3.guj3yctzbm1etfxqx2vob5hsef.xx.internal.cloudapp.net -Signed-off-by: Tyler Hicks ---- - drivers/nvdimm/virtio_pmem.c | 6 ++++++ - 1 file changed, 6 insertions(+) - -diff --git a/drivers/nvdimm/virtio_pmem.c b/drivers/nvdimm/virtio_pmem.c -index 111111111111..222222222222 100644 ---- a/drivers/nvdimm/virtio_pmem.c -+++ b/drivers/nvdimm/virtio_pmem.c -@@ -90,6 +90,11 @@ static int virtio_pmem_probe(struct virtio_device *vdev) - - dev_set_drvdata(&vdev->dev, vpmem->nvdimm_bus); - -+ /* Online the device prior to creating a pmem region, to ensure that -+ * the region is never touched while the device is offline. -+ */ -+ virtio_device_ready(vdev); -+ - ndr_desc.res = &res; - - ndr_desc.numa_node = memory_add_physaddr_to_nid(res.start); -@@ -118,6 +123,7 @@ static int virtio_pmem_probe(struct virtio_device *vdev) - } - return 0; - out_nd: -+ vdev->config->reset(vdev); - virtio_reset_device(vdev); - nvdimm_bus_unregister(vpmem->nvdimm_bus); - out_vq: --- -Armbian - diff --git a/patch/kernel/archive/wsl2-arm64-6.6/1708-drivers-hv-dxgkrnl-restore-uuid_le_cmp-removed-from-upstream-in-f5b3c341a.patch b/patch/kernel/archive/wsl2-arm64-6.6/1708-drivers-hv-dxgkrnl-restore-uuid_le_cmp-removed-from-upstream-in-f5b3c341a.patch deleted file mode 100644 index 0c89b7798728..000000000000 --- a/patch/kernel/archive/wsl2-arm64-6.6/1708-drivers-hv-dxgkrnl-restore-uuid_le_cmp-removed-from-upstream-in-f5b3c341a.patch +++ /dev/null @@ -1,30 +0,0 @@ -From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001 -From: Shradha Gupta -Date: Fri, 30 Sep 2022 08:01:38 +0200 -Subject: drivers: hv: dxgkrnl: restore `uuid_le_cmp` removed from upstream in - f5b3c341a - ---- - drivers/hv/dxgkrnl/dxgmodule.c | 6 ++++++ - 1 file changed, 6 insertions(+) - -diff --git a/drivers/hv/dxgkrnl/dxgmodule.c b/drivers/hv/dxgkrnl/dxgmodule.c -index 111111111111..222222222222 100644 ---- a/drivers/hv/dxgkrnl/dxgmodule.c -+++ b/drivers/hv/dxgkrnl/dxgmodule.c -@@ -27,6 +27,12 @@ - #undef dev_fmt - #define dev_fmt(fmt) "dxgk: " fmt - -+// Was removed from include/linux/uuid.h in f5b3c341a: "mei: Move uuid_le_cmp() to its only user" -- this would be the 2nd user ;-) -+static inline int uuid_le_cmp(const guid_t u1, const guid_t u2) -+{ -+ return memcmp(&u1, &u2, sizeof(guid_t)); -+} -+ - /* - * Interface from dxgglobal - */ --- -Armbian - diff --git a/patch/kernel/archive/wsl2-arm64-6.6/1709-drivers-hv-dxgkrnl-adapt-dxg_remove_vmbus-to-96ec29396-s-reality-void-return.patch b/patch/kernel/archive/wsl2-arm64-6.6/1709-drivers-hv-dxgkrnl-adapt-dxg_remove_vmbus-to-96ec29396-s-reality-void-return.patch deleted file mode 100644 index dc4a046a5f86..000000000000 --- a/patch/kernel/archive/wsl2-arm64-6.6/1709-drivers-hv-dxgkrnl-adapt-dxg_remove_vmbus-to-96ec29396-s-reality-void-return.patch +++ /dev/null @@ -1,41 +0,0 @@ -From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001 -From: Ricardo Pardini -Date: Sun, 26 Nov 2023 13:44:08 +0100 -Subject: drivers: hv: dxgkrnl: adapt dxg_remove_vmbus to 96ec29396's reality - (void return) - ---- - drivers/hv/dxgkrnl/dxgmodule.c | 6 +----- - 1 file changed, 1 insertion(+), 5 deletions(-) - -diff --git a/drivers/hv/dxgkrnl/dxgmodule.c b/drivers/hv/dxgkrnl/dxgmodule.c -index 111111111111..222222222222 100644 ---- a/drivers/hv/dxgkrnl/dxgmodule.c -+++ b/drivers/hv/dxgkrnl/dxgmodule.c -@@ -800,9 +800,8 @@ static int dxg_probe_vmbus(struct hv_device *hdev, - return ret; - } - --static int dxg_remove_vmbus(struct hv_device *hdev) -+static void dxg_remove_vmbus(struct hv_device *hdev) - { -- int ret = 0; - struct dxgvgpuchannel *vgpu_channel; - struct dxgglobal *dxgglobal = dxggbl(); - -@@ -827,12 +826,9 @@ static int dxg_remove_vmbus(struct hv_device *hdev) - } else { - /* Unknown device type */ - DXG_ERR("Unknown device type"); -- ret = -ENODEV; - } - - mutex_unlock(&dxgglobal->device_mutex); -- -- return ret; - } - - MODULE_DEVICE_TABLE(vmbus, dxg_vmbus_id_table); --- -Armbian - diff --git a/patch/kernel/archive/wsl2-x86-6.1 b/patch/kernel/archive/wsl2-x86-6.1 deleted file mode 120000 index 7c68d5517bb6..000000000000 --- a/patch/kernel/archive/wsl2-x86-6.1 +++ /dev/null @@ -1 +0,0 @@ -wsl2-arm64-6.1 \ No newline at end of file diff --git a/patch/kernel/archive/wsl2-x86-6.6 b/patch/kernel/archive/wsl2-x86-6.6 deleted file mode 120000 index e8710a96aeb5..000000000000 --- a/patch/kernel/archive/wsl2-x86-6.6 +++ /dev/null @@ -1 +0,0 @@ -wsl2-arm64-6.6 \ No newline at end of file